k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/kubectl/kubectl.go (about)

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  // OWNER = sig/cli
    18  
    19  package kubectl
    20  
    21  import (
    22  	"bytes"
    23  	"context"
    24  	"encoding/json"
    25  	"fmt"
    26  	"io"
    27  	"net"
    28  	"net/http"
    29  	"net/http/httptest"
    30  	"os"
    31  	"os/exec"
    32  	"path"
    33  	"path/filepath"
    34  	"regexp"
    35  	"sort"
    36  	"strconv"
    37  	"strings"
    38  	"time"
    39  
    40  	openapi_v2 "github.com/google/gnostic-models/openapiv2"
    41  	"github.com/google/go-cmp/cmp"
    42  
    43  	"sigs.k8s.io/yaml"
    44  
    45  	v1 "k8s.io/api/core/v1"
    46  	rbacv1 "k8s.io/api/rbac/v1"
    47  	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
    48  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    49  	"k8s.io/apimachinery/pkg/api/meta"
    50  	"k8s.io/apimachinery/pkg/api/resource"
    51  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    52  	"k8s.io/apimachinery/pkg/labels"
    53  	"k8s.io/apimachinery/pkg/runtime"
    54  	"k8s.io/apimachinery/pkg/runtime/schema"
    55  	utilnet "k8s.io/apimachinery/pkg/util/net"
    56  	utilnettesting "k8s.io/apimachinery/pkg/util/net/testing"
    57  	"k8s.io/apimachinery/pkg/util/uuid"
    58  	"k8s.io/apimachinery/pkg/util/wait"
    59  	"k8s.io/apiserver/pkg/authentication/serviceaccount"
    60  	genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
    61  	clientset "k8s.io/client-go/kubernetes"
    62  	"k8s.io/client-go/tools/clientcmd"
    63  	"k8s.io/kubectl/pkg/polymorphichelpers"
    64  	"k8s.io/kubernetes/pkg/controller"
    65  	commonutils "k8s.io/kubernetes/test/e2e/common"
    66  	"k8s.io/kubernetes/test/e2e/framework"
    67  	e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
    68  	e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
    69  	e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
    70  	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
    71  	e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
    72  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    73  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    74  	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
    75  	e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
    76  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    77  	e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
    78  	"k8s.io/kubernetes/test/e2e/scheduling"
    79  	testutils "k8s.io/kubernetes/test/utils"
    80  	"k8s.io/kubernetes/test/utils/crd"
    81  	imageutils "k8s.io/kubernetes/test/utils/image"
    82  	admissionapi "k8s.io/pod-security-admission/api"
    83  	uexec "k8s.io/utils/exec"
    84  	"k8s.io/utils/pointer"
    85  
    86  	"github.com/onsi/ginkgo/v2"
    87  	"github.com/onsi/gomega"
    88  )
    89  
    90  const (
    91  	updateDemoSelector        = "name=update-demo"
    92  	guestbookStartupTimeout   = 10 * time.Minute
    93  	guestbookResponseTimeout  = 3 * time.Minute
    94  	simplePodSelector         = "name=httpd"
    95  	simplePodName             = "httpd"
    96  	simplePodResourceName     = "pod/httpd"
    97  	httpdDefaultOutput        = "It works!"
    98  	simplePodPort             = 80
    99  	pausePodSelector          = "name=pause"
   100  	pausePodName              = "pause"
   101  	busyboxPodSelector        = "app=busybox1"
   102  	busyboxPodName            = "busybox1"
   103  	kubeCtlManifestPath       = "test/e2e/testing-manifests/kubectl"
   104  	agnhostControllerFilename = "agnhost-primary-controller.json.in"
   105  	agnhostServiceFilename    = "agnhost-primary-service.json"
   106  	httpdDeployment1Filename  = "httpd-deployment1.yaml.in"
   107  	httpdDeployment2Filename  = "httpd-deployment2.yaml.in"
   108  	httpdDeployment3Filename  = "httpd-deployment3.yaml.in"
   109  	metaPattern               = `"kind":"%s","apiVersion":"%s/%s","metadata":{"name":"%s"}`
   110  )
   111  
   112  func unknownFieldMetadataJSON(gvk schema.GroupVersionKind, name string) string {
   113  	return fmt.Sprintf(`"kind":"%s","apiVersion":"%s/%s","metadata":{"unknownMeta": "foo", "name":"%s"}`, gvk.Kind, gvk.Group, gvk.Version, name)
   114  }
   115  
   116  var (
   117  	// If this suite still flakes due to timeouts we should change this to framework.PodStartTimeout
   118  	podRunningTimeoutArg = fmt.Sprintf("--pod-running-timeout=%s", framework.PodStartShortTimeout.String())
   119  )
   120  
   121  var proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
   122  
   123  var schemaFoo = []byte(`description: Foo CRD for Testing
   124  type: object
   125  properties:
   126    spec:
   127      type: object
   128      description: Specification of Foo
   129      properties:
   130        bars:
   131          description: List of Bars and their specs.
   132          type: array
   133          items:
   134            type: object
   135            required:
   136            - name
   137            properties:
   138              name:
   139                description: Name of Bar.
   140                type: string
   141              age:
   142                description: Age of Bar.
   143                type: string
   144              bazs:
   145                description: List of Bazs.
   146                items:
   147                  type: string
   148                type: array
   149    status:
   150      description: Status of Foo
   151      type: object
   152      properties:
   153        bars:
   154          description: List of Bars and their statuses.
   155          type: array
   156          items:
   157            type: object
   158            properties:
   159              name:
   160                description: Name of Bar.
   161                type: string
   162              available:
   163                description: Whether the Bar is installed.
   164                type: boolean
   165              quxType:
   166                description: Indicates to external qux type.
   167                pattern: in-tree|out-of-tree
   168                type: string`)
   169  
   170  var schemaFooEmbedded = []byte(`description: Foo CRD with an embedded resource
   171  type: object
   172  properties:
   173    spec:
   174      type: object
   175      properties:
   176        template:
   177          type: object
   178          x-kubernetes-embedded-resource: true
   179          properties:
   180            metadata:
   181              type: object
   182              properties:
   183                name:
   184                  type: string
   185            spec:
   186              type: object
   187    metadata:
   188      type: object
   189      properties:
   190        name:
   191          type: string`)
   192  
   193  // Stops everything from filePath from namespace ns and checks if everything matching selectors from the given namespace is correctly stopped.
   194  // Aware of the kubectl example files map.
   195  func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {
   196  	ginkgo.By("using delete to clean up resources")
   197  	// support backward compatibility : file paths or raw json - since we are removing file path
   198  	// dependencies from this test.
   199  	e2ekubectl.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-")
   200  	assertCleanup(ns, selectors...)
   201  }
   202  
   203  // assertCleanup asserts that cleanup of a namespace wrt selectors occurred.
   204  func assertCleanup(ns string, selectors ...string) {
   205  	var e error
   206  	verifyCleanupFunc := func() (bool, error) {
   207  		e = nil
   208  		for _, selector := range selectors {
   209  			resources := e2ekubectl.RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers")
   210  			if resources != "" {
   211  				e = fmt.Errorf("Resources left running after stop:\n%s", resources)
   212  				return false, nil
   213  			}
   214  			pods := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-l", selector, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
   215  			if pods != "" {
   216  				e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods)
   217  				return false, nil
   218  			}
   219  		}
   220  		return true, nil
   221  	}
   222  	err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
   223  	if err != nil {
   224  		framework.Failf(e.Error())
   225  	}
   226  }
   227  
   228  func readTestFileOrDie(file string) []byte {
   229  	data, err := e2etestfiles.Read(path.Join(kubeCtlManifestPath, file))
   230  	if err != nil {
   231  		framework.Fail(err.Error(), 1)
   232  	}
   233  	return data
   234  }
   235  
   236  func runKubectlRetryOrDie(ns string, args ...string) string {
   237  	var err error
   238  	var output string
   239  	for i := 0; i < 5; i++ {
   240  		output, err = e2ekubectl.RunKubectl(ns, args...)
   241  		if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) {
   242  			break
   243  		}
   244  		time.Sleep(time.Second)
   245  	}
   246  	// Expect no errors to be present after retries are finished
   247  	// Copied from framework #ExecOrDie
   248  	framework.Logf("stdout: %q", output)
   249  	framework.ExpectNoError(err)
   250  	return output
   251  }
   252  
   253  var _ = SIGDescribe("Kubectl client", func() {
   254  	defer ginkgo.GinkgoRecover()
   255  	f := framework.NewDefaultFramework("kubectl")
   256  	f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
   257  
   258  	// Reusable cluster state function.  This won't be adversely affected by lazy initialization of framework.
   259  	clusterState := func() *framework.ClusterVerification {
   260  		return f.NewClusterVerification(
   261  			f.Namespace,
   262  			framework.PodStateVerification{
   263  				Selectors:   map[string]string{"app": "agnhost"},
   264  				ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
   265  			})
   266  	}
   267  	forEachPod := func(ctx context.Context, podFunc func(p v1.Pod)) {
   268  		_ = clusterState().ForEach(ctx, podFunc)
   269  	}
   270  	var c clientset.Interface
   271  	var ns string
   272  	ginkgo.BeforeEach(func() {
   273  		c = f.ClientSet
   274  		ns = f.Namespace.Name
   275  	})
   276  
   277  	// Customized Wait  / ForEach wrapper for this test.  These demonstrate the
   278  	// idiomatic way to wrap the ClusterVerification structs for syntactic sugar in large
   279  	// test files.
   280  	// Print debug info if atLeast Pods are not found before the timeout
   281  	waitForOrFailWithDebug := func(ctx context.Context, atLeast int) {
   282  		pods, err := clusterState().WaitFor(ctx, atLeast, framework.PodStartTimeout)
   283  		if err != nil || len(pods) < atLeast {
   284  			// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
   285  			e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, ns)
   286  			framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
   287  		}
   288  	}
   289  
   290  	debugDiscovery := func() {
   291  		home := os.Getenv("HOME")
   292  		if len(home) == 0 {
   293  			framework.Logf("no $HOME envvar set")
   294  			return
   295  		}
   296  
   297  		cacheDir := filepath.Join(home, ".kube", "cache", "discovery")
   298  		err := filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error {
   299  			if err != nil {
   300  				return err
   301  			}
   302  
   303  			// only pay attention to $host_$port/v1/serverresources.json files
   304  			subpath := strings.TrimPrefix(path, cacheDir+string(filepath.Separator))
   305  			parts := filepath.SplitList(subpath)
   306  			if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" {
   307  				return nil
   308  			}
   309  			framework.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now())
   310  
   311  			data, readError := os.ReadFile(path)
   312  			if readError != nil {
   313  				framework.Logf("%s error: %v", path, readError)
   314  			} else {
   315  				framework.Logf("%s content: %s", path, string(data))
   316  			}
   317  			return nil
   318  		})
   319  		framework.Logf("scanned %s for discovery docs: %v", home, err)
   320  	}
   321  
   322  	ginkgo.Describe("Update Demo", func() {
   323  		var nautilus string
   324  		ginkgo.BeforeEach(func() {
   325  			updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
   326  			data, err := e2etestfiles.Read(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"))
   327  			if err != nil {
   328  				framework.Fail(err.Error())
   329  			}
   330  			nautilus = commonutils.SubstituteImageName(string(data))
   331  		})
   332  		/*
   333  			Release: v1.9
   334  			Testname: Kubectl, replication controller
   335  			Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2.
   336  		*/
   337  		framework.ConformanceIt("should create and stop a replication controller", func(ctx context.Context) {
   338  			defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
   339  
   340  			ginkgo.By("creating a replication controller")
   341  			e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
   342  			validateController(ctx, c, imageutils.GetE2EImage(imageutils.Nautilus), 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
   343  		})
   344  
   345  		/*
   346  			Release: v1.9
   347  			Testname: Kubectl, scale replication controller
   348  			Description: Create a Pod and a container with a given image. Configure replication controller to run 2 replicas. The number of running instances of the Pod MUST equal the number of replicas set on the replication controller which is 2. Update the replicaset to 1. Number of running instances of the Pod MUST be 1. Update the replicaset to 2. Number of running instances of the Pod MUST be 2.
   349  		*/
   350  		framework.ConformanceIt("should scale a replication controller", func(ctx context.Context) {
   351  			defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
   352  			nautilusImage := imageutils.GetE2EImage(imageutils.Nautilus)
   353  
   354  			ginkgo.By("creating a replication controller")
   355  			e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
   356  			validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
   357  			ginkgo.By("scaling down the replication controller")
   358  			debugDiscovery()
   359  			e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m")
   360  			validateController(ctx, c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
   361  			ginkgo.By("scaling up the replication controller")
   362  			debugDiscovery()
   363  			e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m")
   364  			validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
   365  		})
   366  	})
   367  
   368  	ginkgo.Describe("Guestbook application", func() {
   369  		forEachGBFile := func(run func(s string)) {
   370  			guestbookRoot := "test/e2e/testing-manifests/guestbook"
   371  			for _, gbAppFile := range []string{
   372  				"agnhost-replica-service.yaml",
   373  				"agnhost-primary-service.yaml",
   374  				"frontend-service.yaml",
   375  				"frontend-deployment.yaml.in",
   376  				"agnhost-primary-deployment.yaml.in",
   377  				"agnhost-replica-deployment.yaml.in",
   378  			} {
   379  				data, err := e2etestfiles.Read(filepath.Join(guestbookRoot, gbAppFile))
   380  				if err != nil {
   381  					framework.Fail(err.Error())
   382  				}
   383  				contents := commonutils.SubstituteImageName(string(data))
   384  				run(contents)
   385  			}
   386  		}
   387  
   388  		/*
   389  			Release: v1.9
   390  			Testname: Kubectl, guestbook application
   391  			Description: Create Guestbook application that contains an agnhost primary server, 2 agnhost replicas, frontend application, frontend service and agnhost primary service and agnhost replica service. Using frontend service, the test will write an entry into the guestbook application which will store the entry into the backend agnhost store. Application flow MUST work as expected and the data written MUST be available to read.
   392  		*/
   393  		framework.ConformanceIt("should create and stop a working application", func(ctx context.Context) {
   394  			defer forEachGBFile(func(contents string) {
   395  				cleanupKubectlInputs(contents, ns)
   396  			})
   397  			ginkgo.By("creating all guestbook components")
   398  			forEachGBFile(func(contents string) {
   399  				framework.Logf(contents)
   400  				e2ekubectl.RunKubectlOrDieInput(ns, contents, "create", "-f", "-")
   401  			})
   402  
   403  			ginkgo.By("validating guestbook app")
   404  			validateGuestbookApp(ctx, c, ns)
   405  		})
   406  	})
   407  
   408  	ginkgo.Describe("Simple pod", func() {
   409  		var podYaml string
   410  		ginkgo.BeforeEach(func(ctx context.Context) {
   411  			ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
   412  			podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
   413  			e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
   414  			framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, simplePodName, ns, framework.PodStartTimeout))
   415  		})
   416  		ginkgo.AfterEach(func() {
   417  			cleanupKubectlInputs(podYaml, ns, simplePodSelector)
   418  		})
   419  
   420  		ginkgo.It("should support exec", func(ctx context.Context) {
   421  			ginkgo.By("executing a command in the container")
   422  			execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container")
   423  			if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
   424  				framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
   425  			}
   426  
   427  			ginkgo.By("executing a very long command in the container")
   428  			veryLongData := make([]rune, 20000)
   429  			for i := 0; i < len(veryLongData); i++ {
   430  				veryLongData[i] = 'a'
   431  			}
   432  			execOutput = e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", string(veryLongData))
   433  			gomega.Expect(string(veryLongData)).To(gomega.Equal(strings.TrimSpace(execOutput)), "Unexpected kubectl exec output")
   434  
   435  			ginkgo.By("executing a command in the container with noninteractive stdin")
   436  			execOutput = e2ekubectl.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "cat").
   437  				WithStdinData("abcd1234").
   438  				ExecOrDie(ns)
   439  			if e, a := "abcd1234", execOutput; e != a {
   440  				framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
   441  			}
   442  
   443  			// pretend that we're a user in an interactive shell
   444  			r, closer, err := newBlockingReader("echo hi\nexit\n")
   445  			if err != nil {
   446  				framework.Failf("Error creating blocking reader: %v", err)
   447  			}
   448  			// NOTE this is solely for test cleanup!
   449  			defer closer.Close()
   450  
   451  			ginkgo.By("executing a command in the container with pseudo-interactive stdin")
   452  			execOutput = e2ekubectl.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "sh").
   453  				WithStdinReader(r).
   454  				ExecOrDie(ns)
   455  			if e, a := "hi", strings.TrimSpace(execOutput); e != a {
   456  				framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
   457  			}
   458  		})
   459  
   460  		ginkgo.It("should support exec using resource/name", func(ctx context.Context) {
   461  			ginkgo.By("executing a command in the container")
   462  			execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodResourceName, "--", "echo", "running", "in", "container")
   463  			if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
   464  				framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
   465  			}
   466  		})
   467  
   468  		ginkgo.It("should support exec through an HTTP proxy", func(ctx context.Context) {
   469  			testContextHost := getTestContextHost()
   470  
   471  			ginkgo.By("Starting http_proxy")
   472  			var proxyLogs bytes.Buffer
   473  			testSrv := httptest.NewServer(utilnettesting.NewHTTPProxyHandler(ginkgo.GinkgoT(), func(req *http.Request) bool {
   474  				fmt.Fprintf(&proxyLogs, "Accepting %s to %s\n", req.Method, req.Host)
   475  				return true
   476  			}))
   477  			defer testSrv.Close()
   478  			proxyAddr := testSrv.URL
   479  
   480  			for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
   481  				proxyLogs.Reset()
   482  				ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar)
   483  				output := e2ekubectl.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container").
   484  					AppendEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
   485  					ExecOrDie(ns)
   486  
   487  				// Verify we got the normal output captured by the exec server
   488  				expectedExecOutput := "running in container\n"
   489  				if output != expectedExecOutput {
   490  					framework.Failf("Unexpected kubectl exec output. Wanted %q, got  %q", expectedExecOutput, output)
   491  				}
   492  
   493  				// Verify the proxy server logs saw the connection
   494  				expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimSuffix(strings.TrimPrefix(testContextHost, "https://"), "/api"))
   495  
   496  				proxyLog := proxyLogs.String()
   497  				if !strings.Contains(proxyLog, expectedProxyLog) {
   498  					framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
   499  				}
   500  			}
   501  		})
   502  
   503  		ginkgo.It("should support exec through kubectl proxy", func(ctx context.Context) {
   504  			_ = getTestContextHost()
   505  
   506  			ginkgo.By("Starting kubectl proxy")
   507  			port, proxyCmd, err := startProxyServer(ns)
   508  			framework.ExpectNoError(err)
   509  			defer framework.TryKill(proxyCmd)
   510  
   511  			//proxyLogs.Reset()
   512  			host := fmt.Sprintf("--server=http://127.0.0.1:%d", port)
   513  			ginkgo.By("Running kubectl via kubectl proxy using " + host)
   514  			output := e2ekubectl.NewKubectlCommand(
   515  				ns, host,
   516  				"exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container",
   517  			).ExecOrDie(ns)
   518  
   519  			// Verify we got the normal output captured by the exec server
   520  			expectedExecOutput := "running in container\n"
   521  			if output != expectedExecOutput {
   522  				framework.Failf("Unexpected kubectl exec output. Wanted %q, got  %q", expectedExecOutput, output)
   523  			}
   524  		})
   525  
   526  		ginkgo.Context("should return command exit codes", func() {
   527  			ginkgo.It("execing into a container with a successful command", func(ctx context.Context) {
   528  				_, err := e2ekubectl.NewKubectlCommand(ns, "exec", simplePodName, podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec()
   529  				framework.ExpectNoError(err)
   530  			})
   531  
   532  			ginkgo.It("execing into a container with a failing command", func(ctx context.Context) {
   533  				_, err := e2ekubectl.NewKubectlCommand(ns, "exec", simplePodName, podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec()
   534  				ee, ok := err.(uexec.ExitError)
   535  				if !ok {
   536  					framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
   537  				}
   538  				gomega.Expect(ee.ExitStatus()).To(gomega.Equal(42))
   539  			})
   540  
   541  			ginkgo.It("should support port-forward", func(ctx context.Context) {
   542  				ginkgo.By("forwarding the container port to a local port")
   543  				cmd := runPortForward(ns, simplePodName, simplePodPort)
   544  				defer cmd.Stop()
   545  
   546  				ginkgo.By("curling local port output")
   547  				localAddr := fmt.Sprintf("http://localhost:%d", cmd.port)
   548  				body, err := curl(localAddr)
   549  				framework.Logf("got: %s", body)
   550  				if err != nil {
   551  					framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
   552  				}
   553  				if !strings.Contains(body, httpdDefaultOutput) {
   554  					framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", httpdDefaultOutput, body)
   555  				}
   556  			})
   557  
   558  			ginkgo.It("should handle in-cluster config", func(ctx context.Context) {
   559  				// This test does not work for dynamically linked kubectl binaries; only statically linked ones. The
   560  				// problem happens when the kubectl binary is copied to a pod in the cluster. For dynamically linked
   561  				// binaries, the necessary libraries are not also copied. For this reason, the test can not be
   562  				// guaranteed to work with GKE, which sometimes run tests using a dynamically linked kubectl.
   563  				e2eskipper.SkipIfProviderIs("gke")
   564  				// TODO: Find a way to download and copy the appropriate kubectl binary, or maybe a multi-arch kubectl image
   565  				// for now this only works on amd64
   566  				e2eskipper.SkipUnlessNodeOSArchIs("amd64")
   567  
   568  				ginkgo.By("adding rbac permissions")
   569  				// grant the view permission widely to allow inspection of the `invalid` namespace and the default namespace
   570  				err := e2eauth.BindClusterRole(ctx, f.ClientSet.RbacV1(), "view", f.Namespace.Name,
   571  					rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
   572  				framework.ExpectNoError(err)
   573  
   574  				err = e2eauth.WaitForAuthorizationUpdate(ctx, f.ClientSet.AuthorizationV1(),
   575  					serviceaccount.MakeUsername(f.Namespace.Name, "default"),
   576  					f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
   577  				framework.ExpectNoError(err)
   578  
   579  				ginkgo.By("overriding icc with values provided by flags")
   580  				kubectlPath := framework.TestContext.KubectlPath
   581  				// we need the actual kubectl binary, not the script wrapper
   582  				kubectlPathNormalizer := exec.Command("which", kubectlPath)
   583  				if strings.HasSuffix(kubectlPath, "kubectl.sh") {
   584  					kubectlPathNormalizer = exec.Command(kubectlPath, "path")
   585  				}
   586  				kubectlPathNormalized, err := kubectlPathNormalizer.Output()
   587  				framework.ExpectNoError(err)
   588  				kubectlPath = strings.TrimSpace(string(kubectlPathNormalized))
   589  
   590  				inClusterHost := strings.TrimSpace(e2eoutput.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
   591  				inClusterPort := strings.TrimSpace(e2eoutput.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
   592  				inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort)
   593  				framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
   594  				e2ekubectl.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
   595  
   596  				// Build a kubeconfig file that will make use of the injected ca and token,
   597  				// but point at the DNS host and the default namespace
   598  				tmpDir, err := os.MkdirTemp("", "icc-override")
   599  				overrideKubeconfigName := "icc-override.kubeconfig"
   600  				framework.ExpectNoError(err)
   601  				defer func() { os.Remove(tmpDir) }()
   602  				framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, overrideKubeconfigName), []byte(`
   603  kind: Config
   604  apiVersion: v1
   605  clusters:
   606  - cluster:
   607      api-version: v1
   608      server: https://kubernetes.default.svc:443
   609      certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
   610    name: kubeconfig-cluster
   611  contexts:
   612  - context:
   613      cluster: kubeconfig-cluster
   614      namespace: default
   615      user: kubeconfig-user
   616    name: kubeconfig-context
   617  current-context: kubeconfig-context
   618  users:
   619  - name: kubeconfig-user
   620    user:
   621      tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
   622  `), os.FileMode(0755)))
   623  				framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
   624  				e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
   625  
   626  				framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
   627  kind: ConfigMap
   628  apiVersion: v1
   629  metadata:
   630    name: "configmap with namespace and invalid name"
   631    namespace: configmap-namespace
   632  `), os.FileMode(0755)))
   633  				framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), []byte(`
   634  kind: ConfigMap
   635  apiVersion: v1
   636  metadata:
   637    name: "configmap without namespace and invalid name"
   638  `), os.FileMode(0755)))
   639  				framework.Logf("copying configmap manifests to the %s pod", simplePodName)
   640  				e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
   641  				e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
   642  
   643  				ginkgo.By("getting pods with in-cluster configs")
   644  				execOutput := e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1")
   645  				gomega.Expect(execOutput).To(gomega.MatchRegexp("httpd +1/1 +Running"))
   646  				gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace"))
   647  				gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
   648  
   649  				ginkgo.By("creating an object containing a namespace with in-cluster config")
   650  				_, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1")
   651  				gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
   652  				gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
   653  
   654  				gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterURL)))
   655  
   656  				ginkgo.By("creating an object not containing a namespace with in-cluster config")
   657  				_, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1")
   658  				gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
   659  				gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
   660  				gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/%s/configmaps", inClusterURL, f.Namespace.Name)))
   661  
   662  				ginkgo.By("trying to use kubectl with invalid token")
   663  				_, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
   664  				framework.Logf("got err %v", err)
   665  				gomega.Expect(err).To(gomega.HaveOccurred())
   666  				gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
   667  				gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
   668  				gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized"))
   669  
   670  				ginkgo.By("trying to use kubectl with invalid server")
   671  				_, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
   672  				framework.Logf("got err %v", err)
   673  				gomega.Expect(err).To(gomega.HaveOccurred())
   674  				gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server"))
   675  				gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api"))
   676  
   677  				ginkgo.By("trying to use kubectl with invalid namespace")
   678  				execOutput = e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1")
   679  				gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found"))
   680  				gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
   681  				gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
   682  				gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://[\\[]?%s[\\]]?:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort)))
   683  
   684  				ginkgo.By("trying to use kubectl with kubeconfig")
   685  				execOutput = e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1")
   686  				gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
   687  				gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration"))
   688  				gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods"))
   689  			})
   690  		})
   691  
   692  		ginkgo.Describe("Kubectl run", func() {
   693  			ginkgo.It("running a successful command", func(ctx context.Context) {
   694  				_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec()
   695  				framework.ExpectNoError(err)
   696  			})
   697  
   698  			ginkgo.It("running a failing command", func(ctx context.Context) {
   699  				_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
   700  				ee, ok := err.(uexec.ExitError)
   701  				if !ok {
   702  					framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
   703  				}
   704  				gomega.Expect(ee.ExitStatus()).To(gomega.Equal(42))
   705  			})
   706  
   707  			f.It(f.WithSlow(), "running a failing command without --restart=Never", func(ctx context.Context) {
   708  				_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
   709  					WithStdinData("abcd1234").
   710  					Exec()
   711  				ee, ok := err.(uexec.ExitError)
   712  				if !ok {
   713  					framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
   714  				}
   715  				if !strings.Contains(ee.String(), "timed out") {
   716  					framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
   717  				}
   718  			})
   719  
   720  			f.It(f.WithSlow(), "running a failing command without --restart=Never, but with --rm", func(ctx context.Context) {
   721  				_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
   722  					WithStdinData("abcd1234").
   723  					Exec()
   724  				ee, ok := err.(uexec.ExitError)
   725  				if !ok {
   726  					framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
   727  				}
   728  				if !strings.Contains(ee.String(), "timed out") {
   729  					framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
   730  				}
   731  				framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, "failure-3", ns, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second))
   732  			})
   733  
   734  			f.It(f.WithSlow(), "running a failing command with --leave-stdin-open", func(ctx context.Context) {
   735  				_, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
   736  					WithStdinData("abcd1234").
   737  					Exec()
   738  				framework.ExpectNoError(err)
   739  			})
   740  		})
   741  
   742  		ginkgo.It("should support inline execution and attach", func(ctx context.Context) {
   743  			waitForStdinContent := func(pod, content string) string {
   744  				var logOutput string
   745  				err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
   746  					logOutput = e2ekubectl.RunKubectlOrDie(ns, "logs", pod)
   747  					return strings.Contains(logOutput, content), nil
   748  				})
   749  
   750  				framework.ExpectNoError(err, "waiting for '%v' output", content)
   751  				return logOutput
   752  			}
   753  
   754  			ginkgo.By("executing a command with run and attach with stdin")
   755  			// We wait for a non-empty line so we know kubectl has attached
   756  			e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
   757  				WithStdinData("value\nabcd1234").
   758  				ExecOrDie(ns)
   759  
   760  			runOutput := waitForStdinContent("run-test", "stdin closed")
   761  			gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value"))
   762  			gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
   763  			gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
   764  
   765  			framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test", metav1.DeleteOptions{}))
   766  
   767  			ginkgo.By("executing a command with run and attach without stdin")
   768  			// There is a race on this scenario described in #73099
   769  			// It fails if we are not able to attach before the container prints
   770  			// "stdin closed", but hasn't exited yet.
   771  			// We wait 10 seconds before printing to give time to kubectl to attach
   772  			// to the container, this does not solve the race though.
   773  			e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
   774  				WithStdinData("abcd1234").
   775  				ExecOrDie(ns)
   776  
   777  			runOutput = waitForStdinContent("run-test-2", "stdin closed")
   778  			gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
   779  			gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
   780  
   781  			framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{}))
   782  
   783  			ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
   784  			e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
   785  				WithStdinData("abcd1234\n").
   786  				ExecOrDie(ns)
   787  
   788  			runOutput = waitForStdinContent("run-test-3", "abcd1234")
   789  			gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
   790  			gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed"))
   791  
   792  			g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
   793  			runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
   794  			framework.ExpectNoError(err)
   795  			framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, runTestPod.Name, ns, time.Minute))
   796  
   797  			framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{}))
   798  		})
   799  
   800  		ginkgo.It("should support inline execution and attach with websockets or fallback to spdy", func(ctx context.Context) {
   801  			waitForStdinContent := func(pod, content string) string {
   802  				var logOutput string
   803  				err := wait.PollUntilContextTimeout(ctx, 10*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
   804  					logOutput = e2ekubectl.RunKubectlOrDie(ns, "logs", pod)
   805  					return strings.Contains(logOutput, content), nil
   806  				})
   807  				framework.ExpectNoError(err, "waiting for '%v' output", content)
   808  				return logOutput
   809  			}
   810  
   811  			ginkgo.By("executing a command with run and attach with stdin")
   812  			// We wait for a non-empty line so we know kubectl has attached
   813  			e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
   814  				WithStdinData("value\nabcd1234").
   815  				ExecOrDie(ns)
   816  
   817  			runOutput := waitForStdinContent("run-test", "stdin closed")
   818  			gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value"))
   819  			gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
   820  			gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
   821  
   822  			framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test", metav1.DeleteOptions{}))
   823  
   824  			ginkgo.By("executing a command with run and attach without stdin")
   825  			// There is a race on this scenario described in #73099
   826  			// It fails if we are not able to attach before the container prints
   827  			// "stdin closed", but hasn't exited yet.
   828  			// We wait 10 seconds before printing to give time to kubectl to attach
   829  			// to the container, this does not solve the race though.
   830  			e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
   831  				WithStdinData("abcd1234").
   832  				ExecOrDie(ns)
   833  
   834  			runOutput = waitForStdinContent("run-test-2", "stdin closed")
   835  			gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
   836  			gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
   837  
   838  			framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{}))
   839  
   840  			ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
   841  			e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
   842  				WithStdinData("abcd1234\n").
   843  				ExecOrDie(ns)
   844  
   845  			runOutput = waitForStdinContent("run-test-3", "abcd1234")
   846  			gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
   847  			gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed"))
   848  
   849  			g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
   850  			runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
   851  			framework.ExpectNoError(err)
   852  			framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, runTestPod.Name, ns, time.Minute))
   853  
   854  			framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{}))
   855  		})
   856  
   857  		ginkgo.It("should contain last line of the log", func(ctx context.Context) {
   858  			podName := "run-log-test"
   859  
   860  			ginkgo.By("executing a command with run")
   861  			e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+imageutils.GetE2EImage(imageutils.BusyBox), "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
   862  
   863  			if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) {
   864  				framework.Failf("Pod for run-log-test was not ready")
   865  			}
   866  
   867  			logOutput := e2ekubectl.RunKubectlOrDie(ns, "logs", "-f", "run-log-test")
   868  			gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF"))
   869  		})
   870  	})
   871  
   872  	ginkgo.Describe("Kubectl api-versions", func() {
   873  		/*
   874  			Release: v1.9
   875  			Testname: Kubectl, check version v1
   876  			Description: Run kubectl to get api versions, output MUST contain returned versions with 'v1' listed.
   877  		*/
   878  		framework.ConformanceIt("should check if v1 is in available api versions", func(ctx context.Context) {
   879  			ginkgo.By("validating api versions")
   880  			output := e2ekubectl.RunKubectlOrDie(ns, "api-versions")
   881  			if !strings.Contains(output, "v1") {
   882  				framework.Failf("No v1 in kubectl api-versions")
   883  			}
   884  		})
   885  	})
   886  
   887  	ginkgo.Describe("Kubectl get componentstatuses", func() {
   888  		ginkgo.It("should get componentstatuses", func(ctx context.Context) {
   889  			ginkgo.By("getting list of componentstatuses")
   890  			output := e2ekubectl.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}")
   891  			components := strings.Split(output, " ")
   892  			ginkgo.By("getting details of componentstatuses")
   893  			for _, component := range components {
   894  				ginkgo.By("getting status of " + component)
   895  				e2ekubectl.RunKubectlOrDie(ns, "get", "componentstatuses", component)
   896  			}
   897  		})
   898  	})
   899  
   900  	ginkgo.Describe("Kubectl prune with applyset", func() {
   901  		ginkgo.It("should apply and prune objects", func(ctx context.Context) {
   902  			framework.Logf("applying manifest1")
   903  			manifest1 := `
   904  apiVersion: v1
   905  kind: ConfigMap
   906  metadata:
   907    name: cm1
   908    namespace: {{ns}}
   909  ---
   910  apiVersion: v1
   911  kind: ConfigMap
   912  metadata:
   913    name: cm2
   914    namespace: {{ns}}
   915  `
   916  
   917  			manifest1 = strings.ReplaceAll(manifest1, "{{ns}}", ns)
   918  			args := []string{"apply", "--prune", "--applyset=applyset1", "-f", "-"}
   919  			e2ekubectl.NewKubectlCommand(ns, args...).AppendEnv([]string{"KUBECTL_APPLYSET=true"}).WithStdinData(manifest1).ExecOrDie(ns)
   920  
   921  			framework.Logf("checking which objects exist")
   922  			objects := mustListObjectsInNamespace(ctx, c, ns)
   923  			names := mustGetNames(objects)
   924  			if diff := cmp.Diff(names, []string{"cm1", "cm2"}); diff != "" {
   925  				framework.Failf("unexpected configmap names (-want +got):\n%s", diff)
   926  			}
   927  
   928  			framework.Logf("applying manifest2")
   929  			manifest2 := `
   930  apiVersion: v1
   931  kind: ConfigMap
   932  metadata:
   933    name: cm1
   934    namespace: {{ns}}
   935  `
   936  			manifest2 = strings.ReplaceAll(manifest2, "{{ns}}", ns)
   937  
   938  			e2ekubectl.NewKubectlCommand(ns, args...).AppendEnv([]string{"KUBECTL_APPLYSET=true"}).WithStdinData(manifest2).ExecOrDie(ns)
   939  
   940  			framework.Logf("checking which objects exist")
   941  			objects = mustListObjectsInNamespace(ctx, c, ns)
   942  			names = mustGetNames(objects)
   943  			if diff := cmp.Diff(names, []string{"cm1"}); diff != "" {
   944  				framework.Failf("unexpected configmap names (-want +got):\n%s", diff)
   945  			}
   946  
   947  			framework.Logf("applying manifest2 (again)")
   948  			e2ekubectl.NewKubectlCommand(ns, args...).AppendEnv([]string{"KUBECTL_APPLYSET=true"}).WithStdinData(manifest2).ExecOrDie(ns)
   949  
   950  			framework.Logf("checking which objects exist")
   951  			objects = mustListObjectsInNamespace(ctx, c, ns)
   952  			names = mustGetNames(objects)
   953  			if diff := cmp.Diff(names, []string{"cm1"}); diff != "" {
   954  				framework.Failf("unexpected configmap names (-want +got):\n%s", diff)
   955  			}
   956  		})
   957  	})
   958  
   959  	ginkgo.Describe("Kubectl apply", func() {
   960  		ginkgo.It("should apply a new configuration to an existing RC", func(ctx context.Context) {
   961  			controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
   962  
   963  			ginkgo.By("creating Agnhost RC")
   964  			e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
   965  			ginkgo.By("applying a modified configuration")
   966  			stdin := modifyReplicationControllerConfiguration(controllerJSON)
   967  			e2ekubectl.NewKubectlCommand(ns, "apply", "-f", "-").
   968  				WithStdinReader(stdin).
   969  				ExecOrDie(ns)
   970  			ginkgo.By("checking the result")
   971  			forEachReplicationController(ctx, c, ns, "app", "agnhost", validateReplicationControllerConfiguration)
   972  		})
   973  		ginkgo.It("should reuse port when apply to an existing SVC", func(ctx context.Context) {
   974  			serviceJSON := readTestFileOrDie(agnhostServiceFilename)
   975  
   976  			ginkgo.By("creating Agnhost SVC")
   977  			e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
   978  
   979  			ginkgo.By("getting the original port")
   980  			originalNodePort := e2ekubectl.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
   981  
   982  			ginkgo.By("applying the same configuration")
   983  			e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-")
   984  
   985  			ginkgo.By("getting the port after applying configuration")
   986  			currentNodePort := e2ekubectl.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
   987  
   988  			ginkgo.By("checking the result")
   989  			if originalNodePort != currentNodePort {
   990  				framework.Failf("port should keep the same")
   991  			}
   992  		})
   993  
   994  		ginkgo.It("apply set/view last-applied", func(ctx context.Context) {
   995  			deployment1Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment1Filename)))
   996  			deployment2Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment2Filename)))
   997  			deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
   998  
   999  			ginkgo.By("deployment replicas number is 2")
  1000  			e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-")
  1001  
  1002  			ginkgo.By("check the last-applied matches expectations annotations")
  1003  			output := e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
  1004  			requiredString := "\"replicas\": 2"
  1005  			if !strings.Contains(output, requiredString) {
  1006  				framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
  1007  			}
  1008  
  1009  			ginkgo.By("apply file doesn't have replicas")
  1010  			e2ekubectl.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-")
  1011  
  1012  			ginkgo.By("check last-applied has been updated, annotations doesn't have replicas")
  1013  			output = e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
  1014  			requiredString = "\"replicas\": 2"
  1015  			if strings.Contains(output, requiredString) {
  1016  				framework.Failf("Presenting %s in kubectl view-last-applied", requiredString)
  1017  			}
  1018  
  1019  			ginkgo.By("scale set replicas to 3")
  1020  			httpdDeploy := "httpd-deployment"
  1021  			debugDiscovery()
  1022  			e2ekubectl.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3")
  1023  
  1024  			ginkgo.By("apply file doesn't have replicas but image changed")
  1025  			e2ekubectl.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-")
  1026  
  1027  			ginkgo.By("verify replicas still is 3 and image has been updated")
  1028  			output = e2ekubectl.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", "-o", "json")
  1029  			requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)}
  1030  			for _, item := range requiredItems {
  1031  				if !strings.Contains(output, item) {
  1032  					framework.Failf("Missing %s in kubectl apply", item)
  1033  				}
  1034  			}
  1035  		})
  1036  	})
  1037  
  1038  	ginkgo.Describe("Kubectl diff", func() {
  1039  		/*
  1040  			Release: v1.19
  1041  			Testname: Kubectl, diff Deployment
  1042  			Description: Create a Deployment with httpd image. Declare the same Deployment with a different image, busybox. Diff of live Deployment with declared Deployment MUST include the difference between live and declared image.
  1043  		*/
  1044  		framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func(ctx context.Context) {
  1045  			ginkgo.By("create deployment with httpd image")
  1046  			deployment := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
  1047  			e2ekubectl.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-")
  1048  
  1049  			ginkgo.By("verify diff finds difference between live and declared image")
  1050  			deployment = strings.Replace(deployment, imageutils.GetE2EImage(imageutils.Httpd), imageutils.GetE2EImage(imageutils.BusyBox), 1)
  1051  			if !strings.Contains(deployment, imageutils.GetE2EImage(imageutils.BusyBox)) {
  1052  				framework.Failf("Failed replacing image from %s to %s in:\n%s\n", imageutils.GetE2EImage(imageutils.Httpd), imageutils.GetE2EImage(imageutils.BusyBox), deployment)
  1053  			}
  1054  			output, err := e2ekubectl.RunKubectlInput(ns, deployment, "diff", "-f", "-")
  1055  			if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 {
  1056  				framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err)
  1057  			}
  1058  			requiredItems := []string{imageutils.GetE2EImage(imageutils.Httpd), imageutils.GetE2EImage(imageutils.BusyBox)}
  1059  			for _, item := range requiredItems {
  1060  				if !strings.Contains(output, item) {
  1061  					framework.Failf("Missing %s in kubectl diff output:\n%s\n%v\n", item, output, err)
  1062  				}
  1063  			}
  1064  
  1065  			e2ekubectl.RunKubectlOrDieInput(ns, deployment, "delete", "-f", "-")
  1066  		})
  1067  	})
  1068  
  1069  	ginkgo.Describe("Kubectl server-side dry-run", func() {
  1070  		/*
  1071  			Release: v1.19
  1072  			Testname: Kubectl, server-side dry-run Pod
  1073  			Description: The command 'kubectl run' must create a pod with the specified image name. After, the command 'kubectl patch pod -p {...} --dry-run=server' should update the Pod with the new image name and server-side dry-run enabled. The image name must not change.
  1074  		*/
  1075  		framework.ConformanceIt("should check if kubectl can dry-run update Pods", func(ctx context.Context) {
  1076  			httpdImage := imageutils.GetE2EImage(imageutils.Httpd)
  1077  			ginkgo.By("running the image " + httpdImage)
  1078  			podName := "e2e-test-httpd-pod"
  1079  			e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
  1080  
  1081  			ginkgo.By("replace the image in the pod with server-side dry-run")
  1082  			specImage := fmt.Sprintf(`{"spec":{"containers":[{"name": "%s","image": "%s"}]}}`, podName, imageutils.GetE2EImage(imageutils.BusyBox))
  1083  			e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server")
  1084  
  1085  			ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage)
  1086  			pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
  1087  			if err != nil {
  1088  				framework.Failf("Failed getting pod %s: %v", podName, err)
  1089  			}
  1090  			containers := pod.Spec.Containers
  1091  			if checkContainersImage(containers, httpdImage) {
  1092  				framework.Failf("Failed creating pod with expected image %s", httpdImage)
  1093  			}
  1094  
  1095  			e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName)
  1096  		})
  1097  	})
  1098  
  1099  	// definitionMatchesGVK returns true if the specified GVK is listed as an x-kubernetes-group-version-kind extension
  1100  	definitionMatchesGVK := func(extensions []*openapi_v2.NamedAny, desiredGVK schema.GroupVersionKind) bool {
  1101  		for _, extension := range extensions {
  1102  			if extension.GetValue().GetYaml() == "" ||
  1103  				extension.GetName() != "x-kubernetes-group-version-kind" {
  1104  				continue
  1105  			}
  1106  			var values []map[string]string
  1107  			err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &values)
  1108  			if err != nil {
  1109  				framework.Logf("%v\n%s", err, string(extension.GetValue().GetYaml()))
  1110  				continue
  1111  			}
  1112  			for _, value := range values {
  1113  				if value["group"] != desiredGVK.Group {
  1114  					continue
  1115  				}
  1116  				if value["version"] != desiredGVK.Version {
  1117  					continue
  1118  				}
  1119  				if value["kind"] != desiredGVK.Kind {
  1120  					continue
  1121  				}
  1122  				return true
  1123  			}
  1124  		}
  1125  		return false
  1126  	}
  1127  
  1128  	// schemaForGVK returns a schema (if defined) for the specified GVK
  1129  	schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema {
  1130  		d, err := f.ClientSet.Discovery().OpenAPISchema()
  1131  		if err != nil {
  1132  			framework.Failf("%v", err)
  1133  		}
  1134  		if d == nil || d.Definitions == nil {
  1135  			return nil
  1136  		}
  1137  		for _, p := range d.Definitions.AdditionalProperties {
  1138  			if p == nil || p.Value == nil {
  1139  				continue
  1140  			}
  1141  			if !definitionMatchesGVK(p.Value.VendorExtension, desiredGVK) {
  1142  				continue
  1143  			}
  1144  			return p.Value
  1145  		}
  1146  		return nil
  1147  	}
  1148  
  1149  	ginkgo.Describe("Kubectl validation", func() {
  1150  		ginkgo.It("should create/apply a CR with unknown fields for CRD with no validation schema", func(ctx context.Context) {
  1151  			ginkgo.By("create CRD with no validation schema")
  1152  			crd, err := crd.CreateTestCRD(f)
  1153  			if err != nil {
  1154  				framework.Failf("failed to create test CRD: %v", err)
  1155  			}
  1156  			ginkgo.DeferCleanup(crd.CleanUp)
  1157  
  1158  			ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
  1159  			time.Sleep(10 * time.Second)
  1160  
  1161  			meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
  1162  			randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
  1163  			if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil {
  1164  				framework.Failf("%v", err)
  1165  			}
  1166  		})
  1167  
  1168  		ginkgo.It("should create/apply a valid CR for CRD with validation schema", func(ctx context.Context) {
  1169  			ginkgo.By("prepare CRD with validation schema")
  1170  			crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
  1171  				props := &apiextensionsv1.JSONSchemaProps{}
  1172  				if err := yaml.Unmarshal(schemaFoo, props); err != nil {
  1173  					framework.Failf("failed to unmarshal schema: %v", err)
  1174  				}
  1175  				for i := range crd.Spec.Versions {
  1176  					crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
  1177  				}
  1178  			})
  1179  			if err != nil {
  1180  				framework.Failf("failed to create test CRD: %v", err)
  1181  			}
  1182  			ginkgo.DeferCleanup(crd.CleanUp)
  1183  
  1184  			ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
  1185  			time.Sleep(10 * time.Second)
  1186  
  1187  			meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
  1188  			validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
  1189  			if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil {
  1190  				framework.Failf("%v", err)
  1191  			}
  1192  		})
  1193  
  1194  		ginkgo.It("should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema", func(ctx context.Context) {
  1195  			ginkgo.By("prepare CRD with partially-specified validation schema")
  1196  			crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
  1197  				props := &apiextensionsv1.JSONSchemaProps{}
  1198  				if err := yaml.Unmarshal(schemaFoo, props); err != nil {
  1199  					framework.Failf("failed to unmarshal schema: %v", err)
  1200  				}
  1201  				// Allow for arbitrary-extra properties.
  1202  				props.XPreserveUnknownFields = pointer.BoolPtr(true)
  1203  				for i := range crd.Spec.Versions {
  1204  					crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
  1205  				}
  1206  			})
  1207  			if err != nil {
  1208  				framework.Failf("failed to create test CRD: %v", err)
  1209  			}
  1210  			ginkgo.DeferCleanup(crd.CleanUp)
  1211  
  1212  			ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
  1213  			time.Sleep(10 * time.Second)
  1214  
  1215  			schema := schemaForGVK(schema.GroupVersionKind{Group: crd.Crd.Spec.Group, Version: crd.Crd.Spec.Versions[0].Name, Kind: crd.Crd.Spec.Names.Kind})
  1216  			gomega.Expect(schema).ToNot(gomega.BeNil(), "retrieving a schema for the crd")
  1217  
  1218  			meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
  1219  
  1220  			// XPreserveUnknownFields is defined on the root of the schema so unknown fields within the spec
  1221  			// are still considered invalid
  1222  			invalidArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}],"extraProperty":"arbitrary-value"}}`, meta)
  1223  			err = createApplyCustomResource(invalidArbitraryCR, f.Namespace.Name, "test-cr", crd)
  1224  			gomega.Expect(err).To(gomega.HaveOccurred(), "creating custom resource")
  1225  
  1226  			if !strings.Contains(err.Error(), `unknown field "spec.extraProperty"`) {
  1227  				framework.Failf("incorrect error from createApplyCustomResource: %v", err)
  1228  			}
  1229  
  1230  			// unknown fields on the root are considered valid
  1231  			validArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]},"extraProperty":"arbitrary-value"}`, meta)
  1232  			err = createApplyCustomResource(validArbitraryCR, f.Namespace.Name, "test-cr", crd)
  1233  			framework.ExpectNoError(err, "creating custom resource")
  1234  		})
  1235  
  1236  		ginkgo.It("should detect unknown metadata fields in both the root and embedded object of a CR", func(ctx context.Context) {
  1237  			ginkgo.By("prepare CRD with x-kubernetes-embedded-resource: true")
  1238  			opt := func(crd *apiextensionsv1.CustomResourceDefinition) {
  1239  				props := &apiextensionsv1.JSONSchemaProps{}
  1240  				if err := yaml.Unmarshal(schemaFooEmbedded, props); err != nil {
  1241  					framework.Failf("failed to unmarshal schema: %v", err)
  1242  				}
  1243  				crd.Spec.Versions = []apiextensionsv1.CustomResourceDefinitionVersion{
  1244  					{
  1245  						Name:    "v1",
  1246  						Served:  true,
  1247  						Storage: true,
  1248  						Schema: &apiextensionsv1.CustomResourceValidation{
  1249  							OpenAPIV3Schema: props,
  1250  						},
  1251  					},
  1252  				}
  1253  			}
  1254  
  1255  			group := fmt.Sprintf("%s.example.com", f.BaseName)
  1256  			testCRD, err := crd.CreateMultiVersionTestCRD(f, group, opt)
  1257  			if err != nil {
  1258  				framework.Failf("failed to create test CRD: %v", err)
  1259  			}
  1260  			ginkgo.DeferCleanup(testCRD.CleanUp)
  1261  
  1262  			ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
  1263  			time.Sleep(10 * time.Second)
  1264  
  1265  			ginkgo.By("attempting to create a CR with unknown metadata fields at the root level")
  1266  			gvk := schema.GroupVersionKind{Group: testCRD.Crd.Spec.Group, Version: testCRD.Crd.Spec.Versions[0].Name, Kind: testCRD.Crd.Spec.Names.Kind}
  1267  			schema := schemaForGVK(gvk)
  1268  			gomega.Expect(schema).ToNot(gomega.BeNil(), "retrieving a schema for the crd")
  1269  			embeddedCRPattern := `
  1270  
  1271  {%s,
  1272    "spec": {
  1273      "template": {
  1274        "apiVersion": "foo/v1",
  1275        "kind": "Sub",
  1276        "metadata": {
  1277          %s
  1278          "name": "subobject",
  1279          "namespace": "%s"
  1280        }
  1281      }
  1282    }
  1283  }`
  1284  			meta := unknownFieldMetadataJSON(gvk, "test-cr")
  1285  			unknownRootMetaCR := fmt.Sprintf(embeddedCRPattern, meta, "", ns)
  1286  			_, err = e2ekubectl.RunKubectlInput(ns, unknownRootMetaCR, "create", "--validate=true", "-f", "-")
  1287  			if err == nil {
  1288  				framework.Failf("unexpected nil error when creating CR with unknown root metadata field")
  1289  			}
  1290  			if !(strings.Contains(err.Error(), `unknown field "unknownMeta"`) || strings.Contains(err.Error(), `unknown field "metadata.unknownMeta"`)) {
  1291  				framework.Failf("error missing root unknown metadata field, got: %v", err)
  1292  			}
  1293  			if strings.Contains(err.Error(), `unknown field "namespace"`) || strings.Contains(err.Error(), `unknown field "metadata.namespace"`) {
  1294  				framework.Failf("unexpected error, CR's root metadata namespace field unrecognized: %v", err)
  1295  			}
  1296  
  1297  			ginkgo.By("attempting to create a CR with unknown metadata fields in the embedded object")
  1298  			metaEmbedded := fmt.Sprintf(metaPattern, testCRD.Crd.Spec.Names.Kind, testCRD.Crd.Spec.Group, testCRD.Crd.Spec.Versions[0].Name, "test-cr-embedded")
  1299  			unknownEmbeddedMetaCR := fmt.Sprintf(embeddedCRPattern, metaEmbedded, `"unknownMetaEmbedded": "bar",`, ns)
  1300  			_, err = e2ekubectl.RunKubectlInput(ns, unknownEmbeddedMetaCR, "create", "--validate=true", "-f", "-")
  1301  			if err == nil {
  1302  				framework.Failf("unexpected nil error when creating CR with unknown embedded metadata field")
  1303  			}
  1304  			if !(strings.Contains(err.Error(), `unknown field "unknownMetaEmbedded"`) || strings.Contains(err.Error(), `unknown field "spec.template.metadata.unknownMetaEmbedded"`)) {
  1305  				framework.Failf("error missing embedded unknown metadata field, got: %v", err)
  1306  			}
  1307  			if strings.Contains(err.Error(), `unknown field "namespace"`) || strings.Contains(err.Error(), `unknown field "spec.template.metadata.namespace"`) {
  1308  				framework.Failf("unexpected error, CR's embedded metadata namespace field unrecognized: %v", err)
  1309  			}
  1310  		})
  1311  
  1312  		ginkgo.It("should detect unknown metadata fields of a typed object", func(ctx context.Context) {
  1313  			ginkgo.By("calling kubectl create deployment")
  1314  			invalidMetaDeployment := `
  1315  	{
  1316  		"apiVersion": "apps/v1",
  1317  		"kind": "Deployment",
  1318  		"metadata": {
  1319  			"name": "my-dep",
  1320  			"unknownMeta": "foo",
  1321  			"labels": {"app": "nginx"}
  1322  		},
  1323  		"spec": {
  1324  			"selector": {
  1325  				"matchLabels": {
  1326  					"app": "nginx"
  1327  				}
  1328  			},
  1329  			"template": {
  1330  				"metadata": {
  1331  					"labels": {
  1332  						"app": "nginx"
  1333  					}
  1334  				},
  1335  				"spec": {
  1336  					"containers": [{
  1337  						"name":  "nginx",
  1338  						"image": "nginx:latest"
  1339  					}]
  1340  				}
  1341  			}
  1342  		}
  1343  	}
  1344  		`
  1345  			_, err := e2ekubectl.RunKubectlInput(ns, invalidMetaDeployment, "create", "-f", "-")
  1346  			if err == nil {
  1347  				framework.Failf("unexpected nil error when creating deployment with unknown metadata field")
  1348  			}
  1349  			if !(strings.Contains(err.Error(), `unknown field "unknownMeta"`) || strings.Contains(err.Error(), `unknown field "metadata.unknownMeta"`)) {
  1350  				framework.Failf("error missing unknown metadata field, got: %v", err)
  1351  			}
  1352  			if strings.Contains(err.Error(), `unknown field "namespace"`) || strings.Contains(err.Error(), `unknown field "metadata.namespace"`) {
  1353  				framework.Failf("unexpected error, deployment's metadata namespace field unrecognized: %v", err)
  1354  			}
  1355  
  1356  		})
  1357  	})
  1358  
  1359  	ginkgo.Describe("Kubectl cluster-info", func() {
  1360  		/*
  1361  			Release: v1.9
  1362  			Testname: Kubectl, cluster info
  1363  			Description: Call kubectl to get cluster-info, output MUST contain cluster-info returned and Kubernetes control plane SHOULD be running.
  1364  		*/
  1365  		framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info", func(ctx context.Context) {
  1366  			ginkgo.By("validating cluster-info")
  1367  			output := e2ekubectl.RunKubectlOrDie(ns, "cluster-info")
  1368  			// Can't check exact strings due to terminal control commands (colors)
  1369  			requiredItems := []string{"Kubernetes control plane", "is running at"}
  1370  			for _, item := range requiredItems {
  1371  				if !strings.Contains(output, item) {
  1372  					framework.Failf("Missing %s in kubectl cluster-info", item)
  1373  				}
  1374  			}
  1375  		})
  1376  	})
  1377  
  1378  	ginkgo.Describe("Kubectl cluster-info dump", func() {
  1379  		ginkgo.It("should check if cluster-info dump succeeds", func(ctx context.Context) {
  1380  			ginkgo.By("running cluster-info dump")
  1381  			e2ekubectl.RunKubectlOrDie(ns, "cluster-info", "dump")
  1382  		})
  1383  	})
  1384  
  1385  	ginkgo.Describe("Kubectl describe", func() {
  1386  		/*
  1387  			Release: v1.9
  1388  			Testname: Kubectl, describe pod or rc
  1389  			Description: Deploy an agnhost controller and an agnhost service. Kubectl describe pods SHOULD return the name, namespace, labels, state and other information as expected. Kubectl describe on rc, service, node and namespace SHOULD also return proper information.
  1390  		*/
  1391  		framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods", func(ctx context.Context) {
  1392  			controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
  1393  			serviceJSON := readTestFileOrDie(agnhostServiceFilename)
  1394  
  1395  			e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
  1396  			e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
  1397  
  1398  			ginkgo.By("Waiting for Agnhost primary to start.")
  1399  			waitForOrFailWithDebug(ctx, 1)
  1400  
  1401  			// Pod
  1402  			forEachPod(ctx, func(pod v1.Pod) {
  1403  				output := e2ekubectl.RunKubectlOrDie(ns, "describe", "pod", pod.Name)
  1404  				requiredStrings := [][]string{
  1405  					{"Name:", "agnhost-primary-"},
  1406  					{"Namespace:", ns},
  1407  					{"Node:"},
  1408  					{"Labels:", "app=agnhost"},
  1409  					{"role=primary"},
  1410  					{"Annotations:"},
  1411  					{"Status:", "Running"},
  1412  					{"IP:"},
  1413  					{"Controlled By:", "ReplicationController/agnhost-primary"},
  1414  					{"Image:", imageutils.GetE2EImage(imageutils.Agnhost)},
  1415  					{"State:", "Running"},
  1416  					{"QoS Class:", "BestEffort"},
  1417  				}
  1418  				checkOutput(output, requiredStrings)
  1419  			})
  1420  
  1421  			// Rc
  1422  			requiredStrings := [][]string{
  1423  				{"Name:", "agnhost-primary"},
  1424  				{"Namespace:", ns},
  1425  				{"Selector:", "app=agnhost,role=primary"},
  1426  				{"Labels:", "app=agnhost"},
  1427  				{"role=primary"},
  1428  				{"Annotations:"},
  1429  				{"Replicas:", "1 current", "1 desired"},
  1430  				{"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"},
  1431  				{"Pod Template:"},
  1432  				{"Image:", imageutils.GetE2EImage(imageutils.Agnhost)},
  1433  				{"Events:"}}
  1434  			checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary")
  1435  
  1436  			// Service
  1437  			output := e2ekubectl.RunKubectlOrDie(ns, "describe", "service", "agnhost-primary")
  1438  			requiredStrings = [][]string{
  1439  				{"Name:", "agnhost-primary"},
  1440  				{"Namespace:", ns},
  1441  				{"Labels:", "app=agnhost"},
  1442  				{"role=primary"},
  1443  				{"Annotations:"},
  1444  				{"Selector:", "app=agnhost", "role=primary"},
  1445  				{"Type:", "ClusterIP"},
  1446  				{"IP:"},
  1447  				{"Port:", "<unset>", "6379/TCP"},
  1448  				{"Endpoints:"},
  1449  				{"Session Affinity:", "None"}}
  1450  			checkOutput(output, requiredStrings)
  1451  
  1452  			// Node
  1453  			// It should be OK to list unschedulable Nodes here.
  1454  			nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
  1455  			framework.ExpectNoError(err)
  1456  			node := nodes.Items[0]
  1457  			output = e2ekubectl.RunKubectlOrDie(ns, "describe", "node", node.Name)
  1458  			requiredStrings = [][]string{
  1459  				{"Name:", node.Name},
  1460  				{"Labels:"},
  1461  				{"Annotations:"},
  1462  				{"CreationTimestamp:"},
  1463  				{"Conditions:"},
  1464  				{"Type", "Status", "LastHeartbeatTime", "LastTransitionTime", "Reason", "Message"},
  1465  				{"Addresses:"},
  1466  				{"Capacity:"},
  1467  				{"Version:"},
  1468  				{"Kernel Version:"},
  1469  				{"OS Image:"},
  1470  				{"Container Runtime Version:"},
  1471  				{"Kubelet Version:"},
  1472  				{"Kube-Proxy Version:"},
  1473  				{"Pods:"}}
  1474  			checkOutput(output, requiredStrings)
  1475  
  1476  			// Namespace
  1477  			output = e2ekubectl.RunKubectlOrDie(ns, "describe", "namespace", ns)
  1478  			requiredStrings = [][]string{
  1479  				{"Name:", ns},
  1480  				{"Labels:"},
  1481  				{"Annotations:"},
  1482  				{"Status:", "Active"}}
  1483  			checkOutput(output, requiredStrings)
  1484  
  1485  			// Quota and limitrange are skipped for now.
  1486  		})
  1487  
  1488  		ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func(ctx context.Context) {
  1489  			ginkgo.By("creating a cronjob")
  1490  			cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml.in")))
  1491  			e2ekubectl.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-")
  1492  
  1493  			ginkgo.By("waiting for cronjob to start.")
  1494  			err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
  1495  				cj, err := c.BatchV1().CronJobs(ns).List(ctx, metav1.ListOptions{})
  1496  				if err != nil {
  1497  					return false, fmt.Errorf("Failed getting CronJob %s: %w", ns, err)
  1498  				}
  1499  				return len(cj.Items) > 0, nil
  1500  			})
  1501  			framework.ExpectNoError(err)
  1502  
  1503  			ginkgo.By("verifying kubectl describe prints")
  1504  			output := e2ekubectl.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test")
  1505  			requiredStrings := [][]string{
  1506  				{"Name:", "cronjob-test"},
  1507  				{"Namespace:", ns},
  1508  				{"Labels:"},
  1509  				{"Annotations:"},
  1510  				{"Schedule:", "*/1 * * * *"},
  1511  				{"Concurrency Policy:", "Allow"},
  1512  				{"Suspend:", "False"},
  1513  				{"Successful Job History Limit:", "3"},
  1514  				{"Failed Job History Limit:", "1"},
  1515  				{"Starting Deadline Seconds:", "30s"},
  1516  				{"Selector:"},
  1517  				{"Parallelism:"},
  1518  				{"Completions:"},
  1519  			}
  1520  			checkOutput(output, requiredStrings)
  1521  		})
  1522  	})
  1523  
  1524  	ginkgo.Describe("Kubectl expose", func() {
  1525  		/*
  1526  			Release: v1.9
  1527  			Testname: Kubectl, create service, replication controller
  1528  			Description: Create a Pod running agnhost listening to port 6379. Using kubectl expose the agnhost primary replication controllers at port 1234. Validate that the replication controller is listening on port 1234 and the target port is set to 6379, port that agnhost primary is listening. Using kubectl expose the agnhost primary as a service at port 2345. The service MUST be listening on port 2345 and the target port is set to 6379, port that agnhost primary is listening.
  1529  		*/
  1530  		framework.ConformanceIt("should create services for rc", func(ctx context.Context) {
  1531  			controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
  1532  
  1533  			agnhostPort := 6379
  1534  
  1535  			ginkgo.By("creating Agnhost RC")
  1536  
  1537  			framework.Logf("namespace %v", ns)
  1538  			e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
  1539  
  1540  			// It may take a while for the pods to get registered in some cases, wait to be sure.
  1541  			ginkgo.By("Waiting for Agnhost primary to start.")
  1542  			waitForOrFailWithDebug(ctx, 1)
  1543  			forEachPod(ctx, func(pod v1.Pod) {
  1544  				framework.Logf("wait on agnhost-primary startup in %v ", ns)
  1545  				e2eoutput.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout)
  1546  			})
  1547  			validateService := func(name string, servicePort int, timeout time.Duration) {
  1548  				err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
  1549  					ep, err := c.CoreV1().Endpoints(ns).Get(ctx, name, metav1.GetOptions{})
  1550  					if err != nil {
  1551  						// log the real error
  1552  						framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
  1553  
  1554  						// if the error is API not found or could not find default credentials or TLS handshake timeout, try again
  1555  						if apierrors.IsNotFound(err) ||
  1556  							apierrors.IsUnauthorized(err) ||
  1557  							apierrors.IsServerTimeout(err) {
  1558  							err = nil
  1559  						}
  1560  						return false, err
  1561  					}
  1562  
  1563  					uidToPort := e2eendpoints.GetContainerPortsByPodUID(ep)
  1564  					if len(uidToPort) == 0 {
  1565  						framework.Logf("No endpoint found, retrying")
  1566  						return false, nil
  1567  					}
  1568  					if len(uidToPort) > 1 {
  1569  						framework.Failf("Too many endpoints found")
  1570  					}
  1571  					for _, port := range uidToPort {
  1572  						if port[0] != agnhostPort {
  1573  							framework.Failf("Wrong endpoint port: %d", port[0])
  1574  						}
  1575  					}
  1576  					return true, nil
  1577  				})
  1578  				framework.ExpectNoError(err)
  1579  
  1580  				e2eservice, err := c.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
  1581  				framework.ExpectNoError(err)
  1582  
  1583  				if len(e2eservice.Spec.Ports) != 1 {
  1584  					framework.Failf("1 port is expected")
  1585  				}
  1586  				port := e2eservice.Spec.Ports[0]
  1587  				if port.Port != int32(servicePort) {
  1588  					framework.Failf("Wrong service port: %d", port.Port)
  1589  				}
  1590  				if port.TargetPort.IntValue() != agnhostPort {
  1591  					framework.Failf("Wrong target port: %d", port.TargetPort.IntValue())
  1592  				}
  1593  			}
  1594  
  1595  			ginkgo.By("exposing RC")
  1596  			e2ekubectl.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort))
  1597  			framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout))
  1598  			validateService("rm2", 1234, framework.ServiceStartTimeout)
  1599  
  1600  			ginkgo.By("exposing service")
  1601  			e2ekubectl.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort))
  1602  			framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout))
  1603  			validateService("rm3", 2345, framework.ServiceStartTimeout)
  1604  		})
  1605  	})
  1606  
  1607  	ginkgo.Describe("Kubectl label", func() {
  1608  		var podYaml string
  1609  		ginkgo.BeforeEach(func(ctx context.Context) {
  1610  			ginkgo.By("creating the pod")
  1611  			podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
  1612  			e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
  1613  			framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, pausePodName, ns, framework.PodStartTimeout))
  1614  		})
  1615  		ginkgo.AfterEach(func() {
  1616  			cleanupKubectlInputs(podYaml, ns, pausePodSelector)
  1617  		})
  1618  
  1619  		/*
  1620  			Release: v1.9
  1621  			Testname: Kubectl, label update
  1622  			Description: When a Pod is running, update a Label using 'kubectl label' command. The label MUST be created in the Pod. A 'kubectl get pod' with -l option on the container MUST verify that the label can be read back. Use 'kubectl label label-' to remove the label. 'kubectl get pod' with -l option SHOULD not list the deleted label as the label is removed.
  1623  		*/
  1624  		framework.ConformanceIt("should update the label on a resource", func(ctx context.Context) {
  1625  			labelName := "testing-label"
  1626  			labelValue := "testing-label-value"
  1627  
  1628  			ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod")
  1629  			e2ekubectl.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue)
  1630  			ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
  1631  			output := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
  1632  			if !strings.Contains(output, labelValue) {
  1633  				framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
  1634  			}
  1635  
  1636  			ginkgo.By("removing the label " + labelName + " of a pod")
  1637  			e2ekubectl.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-")
  1638  			ginkgo.By("verifying the pod doesn't have the label " + labelName)
  1639  			output = e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
  1640  			if strings.Contains(output, labelValue) {
  1641  				framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
  1642  			}
  1643  		})
  1644  	})
  1645  
  1646  	ginkgo.Describe("Kubectl copy", func() {
  1647  		var podYaml string
  1648  		ginkgo.BeforeEach(func(ctx context.Context) {
  1649  			ginkgo.By("creating the pod")
  1650  			podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in")))
  1651  			e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
  1652  			framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, busyboxPodName, ns, framework.PodStartTimeout))
  1653  		})
  1654  		ginkgo.AfterEach(func() {
  1655  			cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
  1656  		})
  1657  
  1658  		/*
  1659  			Release: v1.12
  1660  			Testname: Kubectl, copy
  1661  			Description: When a Pod is running, copy a known file from it to a temporary local destination.
  1662  		*/
  1663  		ginkgo.It("should copy a file from a running Pod", func(ctx context.Context) {
  1664  			remoteContents := "foobar\n"
  1665  			podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName)
  1666  			tempDestination, err := os.CreateTemp(os.TempDir(), "copy-foobar")
  1667  			if err != nil {
  1668  				framework.Failf("Failed creating temporary destination file: %v", err)
  1669  			}
  1670  
  1671  			ginkgo.By("specifying a remote filepath " + podSource + " on the pod")
  1672  			e2ekubectl.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name())
  1673  			ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name())
  1674  			localData, err := io.ReadAll(tempDestination)
  1675  			if err != nil {
  1676  				framework.Failf("Failed reading temporary local file: %v", err)
  1677  			}
  1678  			if string(localData) != remoteContents {
  1679  				framework.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData))
  1680  			}
  1681  		})
  1682  	})
  1683  
  1684  	ginkgo.Describe("Kubectl patch", func() {
  1685  		/*
  1686  			Release: v1.9
  1687  			Testname: Kubectl, patch to annotate
  1688  			Description: Start running agnhost and a replication controller. When the pod is running, using 'kubectl patch' command add annotations. The annotation MUST be added to running pods and SHOULD be able to read added annotations from each of the Pods running under the replication controller.
  1689  		*/
  1690  		framework.ConformanceIt("should add annotations for pods in rc", func(ctx context.Context) {
  1691  			controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
  1692  			ginkgo.By("creating Agnhost RC")
  1693  			e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
  1694  			ginkgo.By("Waiting for Agnhost primary to start.")
  1695  			waitForOrFailWithDebug(ctx, 1)
  1696  			ginkgo.By("patching all pods")
  1697  			forEachPod(ctx, func(pod v1.Pod) {
  1698  				e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
  1699  			})
  1700  
  1701  			ginkgo.By("checking annotations")
  1702  			forEachPod(ctx, func(pod v1.Pod) {
  1703  				found := false
  1704  				for key, val := range pod.Annotations {
  1705  					if key == "x" && val == "y" {
  1706  						found = true
  1707  						break
  1708  					}
  1709  				}
  1710  				if !found {
  1711  					framework.Failf("Added annotation not found")
  1712  				}
  1713  			})
  1714  		})
  1715  	})
  1716  
  1717  	ginkgo.Describe("Kubectl version", func() {
  1718  		/*
  1719  			Release: v1.9
  1720  			Testname: Kubectl, version
  1721  			Description: The command 'kubectl version' MUST return the major, minor versions,  GitCommit, etc of the Client and the Server that the kubectl is configured to connect to.
  1722  		*/
  1723  		framework.ConformanceIt("should check is all data is printed", func(ctx context.Context) {
  1724  			versionString := e2ekubectl.RunKubectlOrDie(ns, "version")
  1725  			// we expect following values for: Major -> digit, Minor -> numeric followed by an optional '+',  GitCommit -> alphanumeric
  1726  			requiredItems := []string{"Client Version: ", "Server Version: "}
  1727  			for _, item := range requiredItems {
  1728  				// prior to 1.28 we printed long version information
  1729  				oldMatched, _ := regexp.MatchString(item+`version.Info\{Major:"\d", Minor:"\d+\+?", GitVersion:"v\d\.\d+\.[\d\w\-\.\+]+", GitCommit:"[0-9a-f]+"`, versionString)
  1730  				// 1.28+ prints short information
  1731  				newMatched, _ := regexp.MatchString(item+`v\d\.\d+\.[\d\w\-\.\+]+`, versionString)
  1732  				// due to backwards compatibility we need to match both until 1.30 most likely
  1733  				if !oldMatched && !newMatched {
  1734  					framework.Failf("Item %s value is not valid in %s\n", item, versionString)
  1735  				}
  1736  			}
  1737  		})
  1738  	})
  1739  
  1740  	ginkgo.Describe("Kubectl run pod", func() {
  1741  		var podName string
  1742  
  1743  		ginkgo.BeforeEach(func() {
  1744  			podName = "e2e-test-httpd-pod"
  1745  		})
  1746  
  1747  		ginkgo.AfterEach(func() {
  1748  			e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName)
  1749  		})
  1750  
  1751  		/*
  1752  			Release: v1.9
  1753  			Testname: Kubectl, run pod
  1754  			Description: Command 'kubectl run' MUST create a pod, when a image name is specified in the run command. After the run command there SHOULD be a pod that should exist with one container running the specified image.
  1755  		*/
  1756  		framework.ConformanceIt("should create a pod from an image when restart is Never", func(ctx context.Context) {
  1757  			httpdImage := imageutils.GetE2EImage(imageutils.Httpd)
  1758  			ginkgo.By("running the image " + httpdImage)
  1759  			e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage)
  1760  			ginkgo.By("verifying the pod " + podName + " was created")
  1761  			pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
  1762  			if err != nil {
  1763  				framework.Failf("Failed getting pod %s: %v", podName, err)
  1764  			}
  1765  			containers := pod.Spec.Containers
  1766  			if checkContainersImage(containers, httpdImage) {
  1767  				framework.Failf("Failed creating pod %s with expected image %s", podName, httpdImage)
  1768  			}
  1769  			if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
  1770  				framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
  1771  			}
  1772  		})
  1773  	})
  1774  
  1775  	ginkgo.Describe("Kubectl replace", func() {
  1776  		var podName string
  1777  
  1778  		ginkgo.BeforeEach(func() {
  1779  			podName = "e2e-test-httpd-pod"
  1780  		})
  1781  
  1782  		ginkgo.AfterEach(func() {
  1783  			e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName)
  1784  		})
  1785  
  1786  		/*
  1787  			Release: v1.9
  1788  			Testname: Kubectl, replace
  1789  			Description: Command 'kubectl replace' on a existing Pod with a new spec MUST update the image of the container running in the Pod. A -f option to 'kubectl replace' SHOULD force to re-create the resource. The new Pod SHOULD have the container with new change to the image.
  1790  		*/
  1791  		framework.ConformanceIt("should update a single-container pod's image", func(ctx context.Context) {
  1792  			httpdImage := imageutils.GetE2EImage(imageutils.Httpd)
  1793  			ginkgo.By("running the image " + httpdImage)
  1794  			e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
  1795  
  1796  			ginkgo.By("verifying the pod " + podName + " is running")
  1797  			label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
  1798  			err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
  1799  			if err != nil {
  1800  				framework.Failf("Failed getting pod %s: %v", podName, err)
  1801  			}
  1802  
  1803  			ginkgo.By("verifying the pod " + podName + " was created")
  1804  			podJSON := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json")
  1805  			if !strings.Contains(podJSON, podName) {
  1806  				framework.Failf("Failed to find pod %s in [%s]", podName, podJSON)
  1807  			}
  1808  
  1809  			ginkgo.By("replace the image in the pod")
  1810  			busyboxImage := imageutils.GetE2EImage(imageutils.BusyBox)
  1811  			podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1)
  1812  			e2ekubectl.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-")
  1813  
  1814  			ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage)
  1815  			pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
  1816  			if err != nil {
  1817  				framework.Failf("Failed getting deployment %s: %v", podName, err)
  1818  			}
  1819  			containers := pod.Spec.Containers
  1820  			if checkContainersImage(containers, busyboxImage) {
  1821  				framework.Failf("Failed creating pod with expected image %s", busyboxImage)
  1822  			}
  1823  		})
  1824  	})
  1825  
  1826  	ginkgo.Describe("Proxy server", func() {
  1827  		// TODO: test proxy options (static, prefix, etc)
  1828  		/*
  1829  			Release: v1.9
  1830  			Testname: Kubectl, proxy port zero
  1831  			Description: Start a proxy server on port zero by running 'kubectl proxy' with --port=0. Call the proxy server by requesting api versions from unix socket. The proxy server MUST provide at least one version string.
  1832  		*/
  1833  		framework.ConformanceIt("should support proxy with --port 0", func(ctx context.Context) {
  1834  			ginkgo.By("starting the proxy server")
  1835  			port, cmd, err := startProxyServer(ns)
  1836  			if cmd != nil {
  1837  				defer framework.TryKill(cmd)
  1838  			}
  1839  			if err != nil {
  1840  				framework.Failf("Failed to start proxy server: %v", err)
  1841  			}
  1842  			ginkgo.By("curling proxy /api/ output")
  1843  			localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
  1844  			apiVersions, err := getAPIVersions(localAddr)
  1845  			if err != nil {
  1846  				framework.Failf("Expected at least one supported apiversion, got error %v", err)
  1847  			}
  1848  			if len(apiVersions.Versions) < 1 {
  1849  				framework.Failf("Expected at least one supported apiversion, got %v", apiVersions)
  1850  			}
  1851  		})
  1852  
  1853  		/*
  1854  			Release: v1.9
  1855  			Testname: Kubectl, proxy socket
  1856  			Description: Start a proxy server on by running 'kubectl proxy' with --unix-socket=<some path>. Call the proxy server by requesting api versions from  http://locahost:0/api. The proxy server MUST provide at least one version string
  1857  		*/
  1858  		framework.ConformanceIt("should support --unix-socket=/path", func(ctx context.Context) {
  1859  			ginkgo.By("Starting the proxy")
  1860  			tmpdir, err := os.MkdirTemp("", "kubectl-proxy-unix")
  1861  			if err != nil {
  1862  				framework.Failf("Failed to create temporary directory: %v", err)
  1863  			}
  1864  			path := filepath.Join(tmpdir, "test")
  1865  			defer os.Remove(path)
  1866  			defer os.Remove(tmpdir)
  1867  			tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
  1868  			cmd := tk.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
  1869  			stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
  1870  			if err != nil {
  1871  				framework.Failf("Failed to start kubectl command: %v", err)
  1872  			}
  1873  			defer stdout.Close()
  1874  			defer stderr.Close()
  1875  			defer framework.TryKill(cmd)
  1876  			buf := make([]byte, 128)
  1877  			if _, err = stdout.Read(buf); err != nil {
  1878  				framework.Failf("Expected output from kubectl proxy: %v", err)
  1879  			}
  1880  			ginkgo.By("retrieving proxy /api/ output")
  1881  			_, err = curlUnix("http://unused/api", path)
  1882  			if err != nil {
  1883  				framework.Failf("Failed get of /api at %s: %v", path, err)
  1884  			}
  1885  		})
  1886  	})
  1887  
  1888  	// This test must run [Serial] because it modifies the node so it doesn't allow pods to execute on
  1889  	// it, which will affect anything else running in parallel.
  1890  	f.Describe("Kubectl taint", framework.WithSerial(), func() {
  1891  		ginkgo.It("should update the taint on a node", func(ctx context.Context) {
  1892  			testTaint := v1.Taint{
  1893  				Key:    fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
  1894  				Value:  "testing-taint-value",
  1895  				Effect: v1.TaintEffectNoSchedule,
  1896  			}
  1897  
  1898  			nodeName := scheduling.GetNodeThatCanRunPod(ctx, f)
  1899  
  1900  			ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
  1901  			runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
  1902  			ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, testTaint)
  1903  
  1904  			ginkgo.By("verifying the node has the taint " + testTaint.ToString())
  1905  			output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
  1906  			requiredStrings := [][]string{
  1907  				{"Name:", nodeName},
  1908  				{"Taints:"},
  1909  				{testTaint.ToString()},
  1910  			}
  1911  			checkOutput(output, requiredStrings)
  1912  
  1913  			ginkgo.By("removing the taint " + testTaint.ToString() + " of a node")
  1914  			runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-")
  1915  			ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
  1916  			output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
  1917  			if strings.Contains(output, testTaint.Key) {
  1918  				framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
  1919  			}
  1920  		})
  1921  
  1922  		ginkgo.It("should remove all the taints with the same key off a node", func(ctx context.Context) {
  1923  			testTaint := v1.Taint{
  1924  				Key:    fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
  1925  				Value:  "testing-taint-value",
  1926  				Effect: v1.TaintEffectNoSchedule,
  1927  			}
  1928  
  1929  			nodeName := scheduling.GetNodeThatCanRunPod(ctx, f)
  1930  
  1931  			ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
  1932  			runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
  1933  			ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName,
  1934  				testTaint)
  1935  
  1936  			ginkgo.By("verifying the node has the taint " + testTaint.ToString())
  1937  			output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
  1938  			requiredStrings := [][]string{
  1939  				{"Name:", nodeName},
  1940  				{"Taints:"},
  1941  				{testTaint.ToString()},
  1942  			}
  1943  			checkOutput(output, requiredStrings)
  1944  
  1945  			newTestTaint := v1.Taint{
  1946  				Key:    testTaint.Key,
  1947  				Value:  "another-testing-taint-value",
  1948  				Effect: v1.TaintEffectPreferNoSchedule,
  1949  			}
  1950  			ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node")
  1951  			runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, newTestTaint.ToString())
  1952  			ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, newTestTaint)
  1953  
  1954  			ginkgo.By("verifying the node has the taint " + newTestTaint.ToString())
  1955  			output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
  1956  			requiredStrings = [][]string{
  1957  				{"Name:", nodeName},
  1958  				{"Taints:"},
  1959  				{newTestTaint.ToString()},
  1960  			}
  1961  			checkOutput(output, requiredStrings)
  1962  
  1963  			noExecuteTaint := v1.Taint{
  1964  				Key:    testTaint.Key,
  1965  				Value:  "testing-taint-value-no-execute",
  1966  				Effect: v1.TaintEffectNoExecute,
  1967  			}
  1968  			ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
  1969  			runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, noExecuteTaint.ToString())
  1970  			ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, noExecuteTaint)
  1971  
  1972  			ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString())
  1973  			output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
  1974  			requiredStrings = [][]string{
  1975  				{"Name:", nodeName},
  1976  				{"Taints:"},
  1977  				{noExecuteTaint.ToString()},
  1978  			}
  1979  			checkOutput(output, requiredStrings)
  1980  
  1981  			ginkgo.By("removing all taints that have the same key " + testTaint.Key + " of the node")
  1982  			runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+"-")
  1983  			ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
  1984  			output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
  1985  			if strings.Contains(output, testTaint.Key) {
  1986  				framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
  1987  			}
  1988  		})
  1989  	})
  1990  
  1991  	ginkgo.Describe("Kubectl events", func() {
  1992  		ginkgo.It("should show event when pod is created", func(ctx context.Context) {
  1993  			podName := "e2e-test-httpd-pod"
  1994  			httpdImage := imageutils.GetE2EImage(imageutils.Httpd)
  1995  			ginkgo.By("running the image " + httpdImage)
  1996  			e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
  1997  
  1998  			ginkgo.By("verifying the pod " + podName + " is running")
  1999  			label := labels.SelectorFromSet(map[string]string{"run": podName})
  2000  			err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
  2001  			if err != nil {
  2002  				framework.Failf("Failed getting pod %s: %v", podName, err)
  2003  			}
  2004  
  2005  			ginkgo.By("show started event for this pod")
  2006  			events := e2ekubectl.RunKubectlOrDie(ns, "events", "--for=pod/"+podName)
  2007  
  2008  			// replace multi spaces into single white space
  2009  			eventsStr := strings.Join(strings.Fields(strings.TrimSpace(events)), " ")
  2010  			if !strings.Contains(string(eventsStr), fmt.Sprintf("Normal Scheduled Pod/%s", podName)) {
  2011  				framework.Failf("failed to list expected event")
  2012  			}
  2013  
  2014  			ginkgo.By("expect not showing any WARNING message except timeouts")
  2015  			events = e2ekubectl.RunKubectlOrDie(ns, "events", "--types=WARNING", "--for=pod/"+podName)
  2016  			if events != "" && !strings.Contains(events, "timed out") {
  2017  				framework.Failf("unexpected WARNING event fired")
  2018  			}
  2019  		})
  2020  	})
  2021  
  2022  	ginkgo.Describe("Kubectl create quota", func() {
  2023  		ginkgo.It("should create a quota without scopes", func(ctx context.Context) {
  2024  			quotaName := "million"
  2025  
  2026  			ginkgo.By("calling kubectl quota")
  2027  			e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000")
  2028  
  2029  			ginkgo.By("verifying that the quota was created")
  2030  			quota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
  2031  			if err != nil {
  2032  				framework.Failf("Failed getting quota %s: %v", quotaName, err)
  2033  			}
  2034  
  2035  			if len(quota.Spec.Scopes) != 0 {
  2036  				framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
  2037  			}
  2038  			if len(quota.Spec.Hard) != 2 {
  2039  				framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
  2040  			}
  2041  			r, found := quota.Spec.Hard[v1.ResourcePods]
  2042  			if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
  2043  				framework.Failf("Expected pods=1000000, got %v", r)
  2044  			}
  2045  			r, found = quota.Spec.Hard[v1.ResourceServices]
  2046  			if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
  2047  				framework.Failf("Expected services=1000000, got %v", r)
  2048  			}
  2049  		})
  2050  
  2051  		ginkgo.It("should create a quota with scopes", func(ctx context.Context) {
  2052  			quotaName := "scopes"
  2053  
  2054  			ginkgo.By("calling kubectl quota")
  2055  			e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating")
  2056  
  2057  			ginkgo.By("verifying that the quota was created")
  2058  			quota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
  2059  			if err != nil {
  2060  				framework.Failf("Failed getting quota %s: %v", quotaName, err)
  2061  			}
  2062  
  2063  			if len(quota.Spec.Scopes) != 2 {
  2064  				framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
  2065  			}
  2066  			scopes := make(map[v1.ResourceQuotaScope]struct{})
  2067  			for _, scope := range quota.Spec.Scopes {
  2068  				scopes[scope] = struct{}{}
  2069  			}
  2070  			if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
  2071  				framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
  2072  			}
  2073  			if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
  2074  				framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
  2075  			}
  2076  		})
  2077  
  2078  		ginkgo.It("should reject quota with invalid scopes", func(ctx context.Context) {
  2079  			quotaName := "scopes"
  2080  
  2081  			ginkgo.By("calling kubectl quota")
  2082  			out, err := e2ekubectl.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo")
  2083  			if err == nil {
  2084  				framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
  2085  			}
  2086  		})
  2087  	})
  2088  
  2089  	ginkgo.Describe("kubectl wait", func() {
  2090  		ginkgo.It("should ignore not found error with --for=delete", func(ctx context.Context) {
  2091  			ginkgo.By("calling kubectl wait --for=delete")
  2092  			e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod/doesnotexist")
  2093  			e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod", "--selector=app.kubernetes.io/name=noexist")
  2094  		})
  2095  	})
  2096  
  2097  	ginkgo.Describe("kubectl subresource flag", func() {
  2098  		ginkgo.It("should not be used in a bulk GET", func() {
  2099  			ginkgo.By("calling kubectl get nodes --subresource=status")
  2100  			out, err := e2ekubectl.RunKubectl("", "get", "nodes", "--subresource=status")
  2101  			gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("Expected kubectl to fail, but it succeeded: %s", out))
  2102  			gomega.Expect(err).To(gomega.ContainSubstring("subresource cannot be used when bulk resources are specified"))
  2103  		})
  2104  		ginkgo.It("GET on status subresource of built-in type (node) returns identical info as GET on the built-in type", func(ctx context.Context) {
  2105  			ginkgo.By("first listing nodes in the cluster, and using first node of the list")
  2106  			nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
  2107  			framework.ExpectNoError(err)
  2108  			gomega.Expect(nodes.Items).ToNot(gomega.BeEmpty())
  2109  			node := nodes.Items[0]
  2110  			// Avoid comparing values of fields that might end up
  2111  			// changing between the two invocations of kubectl. We
  2112  			// compare the name and version fields.
  2113  			ginkgo.By(fmt.Sprintf("calling kubectl get nodes %s", node.Name))
  2114  			outBuiltIn := e2ekubectl.RunKubectlOrDie("", "get", "nodes", node.Name,
  2115  				"--output=jsonpath='{.metadata.name}{.status.nodeInfo.kubeletVersion}'",
  2116  			)
  2117  			ginkgo.By(fmt.Sprintf("calling kubectl get nodes %s --subresource=status", node.Name))
  2118  			outStatusSubresource := e2ekubectl.RunKubectlOrDie("", "get", "nodes", node.Name,
  2119  				"--output=jsonpath='{.metadata.name}{.status.nodeInfo.kubeletVersion}'",
  2120  				"--subresource=status",
  2121  			)
  2122  			gomega.Expect(outBuiltIn).To(gomega.Equal(outStatusSubresource))
  2123  		})
  2124  	})
  2125  })
  2126  
  2127  func getTestContextHost() string {
  2128  	if len(framework.TestContext.Host) > 0 {
  2129  		return framework.TestContext.Host
  2130  	}
  2131  	// if there is a kubeconfig, pick the first server from it
  2132  	if framework.TestContext.KubeConfig != "" {
  2133  		c, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
  2134  		if err == nil {
  2135  			for _, v := range c.Clusters {
  2136  				if v.Server != "" {
  2137  					framework.Logf("--host variable was not set, picking up the first server from %s",
  2138  						framework.TestContext.KubeConfig)
  2139  					return v.Server
  2140  				}
  2141  			}
  2142  		}
  2143  	}
  2144  	framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
  2145  	return ""
  2146  }
  2147  
  2148  // Checks whether the output split by line contains the required elements.
  2149  func checkOutputReturnError(output string, required [][]string) error {
  2150  	outputLines := strings.Split(output, "\n")
  2151  	currentLine := 0
  2152  	for _, requirement := range required {
  2153  		for currentLine < len(outputLines) && !strings.Contains(outputLines[currentLine], requirement[0]) {
  2154  			currentLine++
  2155  		}
  2156  		if currentLine == len(outputLines) {
  2157  			return fmt.Errorf("failed to find %s in %s", requirement[0], output)
  2158  		}
  2159  		for _, item := range requirement[1:] {
  2160  			if !strings.Contains(outputLines[currentLine], item) {
  2161  				return fmt.Errorf("failed to find %s in %s", item, outputLines[currentLine])
  2162  			}
  2163  		}
  2164  	}
  2165  	return nil
  2166  }
  2167  
  2168  func checkOutput(output string, required [][]string) {
  2169  	err := checkOutputReturnError(output, required)
  2170  	if err != nil {
  2171  		framework.Failf("%v", err)
  2172  	}
  2173  }
  2174  
  2175  func checkKubectlOutputWithRetry(namespace string, required [][]string, args ...string) {
  2176  	var pollErr error
  2177  	wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
  2178  		output := e2ekubectl.RunKubectlOrDie(namespace, args...)
  2179  		err := checkOutputReturnError(output, required)
  2180  		if err != nil {
  2181  			pollErr = err
  2182  			return false, nil
  2183  		}
  2184  		pollErr = nil
  2185  		return true, nil
  2186  	})
  2187  	if pollErr != nil {
  2188  		framework.Failf("%v", pollErr)
  2189  	}
  2190  	return
  2191  }
  2192  
  2193  func checkContainersImage(containers []v1.Container, expectImage string) bool {
  2194  	return containers == nil || len(containers) != 1 || containers[0].Image != expectImage
  2195  }
  2196  
  2197  func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
  2198  	body, err := curl(apiEndpoint)
  2199  	if err != nil {
  2200  		return nil, fmt.Errorf("Failed http.Get of %s: %w", apiEndpoint, err)
  2201  	}
  2202  	var apiVersions metav1.APIVersions
  2203  	if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
  2204  		return nil, fmt.Errorf("Failed to parse /api output %s: %w", body, err)
  2205  	}
  2206  	return &apiVersions, nil
  2207  }
  2208  
  2209  func startProxyServer(ns string) (int, *exec.Cmd, error) {
  2210  	// Specifying port 0 indicates we want the os to pick a random port.
  2211  	tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
  2212  	cmd := tk.KubectlCmd("proxy", "-p", "0", "--disable-filter")
  2213  	stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
  2214  	if err != nil {
  2215  		return -1, nil, err
  2216  	}
  2217  	buf := make([]byte, 128)
  2218  	var n int
  2219  	if n, err = stdout.Read(buf); err != nil {
  2220  		return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %w", err)
  2221  	}
  2222  	go func() {
  2223  		out, _ := io.ReadAll(stdout)
  2224  		framework.Logf("kubectl proxy stdout: %s", string(buf[:n])+string(out))
  2225  		stdout.Close()
  2226  	}()
  2227  	go func() {
  2228  		err, _ := io.ReadAll(stderr)
  2229  		framework.Logf("kubectl proxy stderr: %s", string(err))
  2230  		stderr.Close()
  2231  	}()
  2232  	output := string(buf[:n])
  2233  	match := proxyRegexp.FindStringSubmatch(output)
  2234  	if len(match) == 2 {
  2235  		if port, err := strconv.Atoi(match[1]); err == nil {
  2236  			return port, cmd, nil
  2237  		}
  2238  	}
  2239  	return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output)
  2240  }
  2241  
  2242  func curlUnix(url string, path string) (string, error) {
  2243  	dial := func(ctx context.Context, proto, addr string) (net.Conn, error) {
  2244  		var d net.Dialer
  2245  		return d.DialContext(ctx, "unix", path)
  2246  	}
  2247  	transport := utilnet.SetTransportDefaults(&http.Transport{
  2248  		DialContext: dial,
  2249  	})
  2250  	return curlTransport(url, transport)
  2251  }
  2252  
  2253  func curlTransport(url string, transport *http.Transport) (string, error) {
  2254  	client := &http.Client{Transport: transport}
  2255  	resp, err := client.Get(url)
  2256  	if err != nil {
  2257  		return "", err
  2258  	}
  2259  	defer resp.Body.Close()
  2260  	body, err := io.ReadAll(resp.Body)
  2261  	if err != nil {
  2262  		return "", err
  2263  	}
  2264  	return string(body[:]), nil
  2265  }
  2266  
  2267  func curl(url string) (string, error) {
  2268  	return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
  2269  }
  2270  
  2271  func validateGuestbookApp(ctx context.Context, c clientset.Interface, ns string) {
  2272  	framework.Logf("Waiting for all frontend pods to be Running.")
  2273  	label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
  2274  	err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
  2275  	framework.ExpectNoError(err)
  2276  	framework.Logf("Waiting for frontend to serve content.")
  2277  	if !waitForGuestbookResponse(ctx, c, "get", "", `{"data":""}`, guestbookStartupTimeout, ns) {
  2278  		framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
  2279  	}
  2280  
  2281  	framework.Logf("Trying to add a new entry to the guestbook.")
  2282  	if !waitForGuestbookResponse(ctx, c, "set", "TestEntry", `{"message":"Updated"}`, guestbookResponseTimeout, ns) {
  2283  		framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
  2284  	}
  2285  
  2286  	framework.Logf("Verifying that added entry can be retrieved.")
  2287  	if !waitForGuestbookResponse(ctx, c, "get", "", `{"data":"TestEntry"}`, guestbookResponseTimeout, ns) {
  2288  		framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
  2289  	}
  2290  }
  2291  
  2292  // Returns whether received expected response from guestbook on time.
  2293  func waitForGuestbookResponse(ctx context.Context, c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
  2294  	for start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {
  2295  		res, err := makeRequestToGuestbook(ctx, c, cmd, arg, ns)
  2296  		if err == nil && res == expectedResponse {
  2297  			return true
  2298  		}
  2299  		framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
  2300  	}
  2301  	return false
  2302  }
  2303  
  2304  func makeRequestToGuestbook(ctx context.Context, c clientset.Interface, cmd, value string, ns string) (string, error) {
  2305  	proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
  2306  	if errProxy != nil {
  2307  		return "", errProxy
  2308  	}
  2309  
  2310  	ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout)
  2311  	defer cancel()
  2312  
  2313  	result, err := proxyRequest.Namespace(ns).
  2314  		Name("frontend").
  2315  		Suffix("/guestbook").
  2316  		Param("cmd", cmd).
  2317  		Param("key", "messages").
  2318  		Param("value", value).
  2319  		Do(ctx).
  2320  		Raw()
  2321  	return string(result), err
  2322  }
  2323  
  2324  type updateDemoData struct {
  2325  	Image string
  2326  }
  2327  
  2328  const applyTestLabel = "kubectl.kubernetes.io/apply-test"
  2329  
  2330  func readReplicationControllerFromString(contents string) *v1.ReplicationController {
  2331  	rc := v1.ReplicationController{}
  2332  	if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
  2333  		framework.Failf(err.Error())
  2334  	}
  2335  
  2336  	return &rc
  2337  }
  2338  
  2339  func modifyReplicationControllerConfiguration(contents string) io.Reader {
  2340  	rc := readReplicationControllerFromString(contents)
  2341  	rc.Labels[applyTestLabel] = "ADDED"
  2342  	rc.Spec.Selector[applyTestLabel] = "ADDED"
  2343  	rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
  2344  	data, err := json.Marshal(rc)
  2345  	if err != nil {
  2346  		framework.Failf("json marshal failed: %s\n", err)
  2347  	}
  2348  
  2349  	return bytes.NewReader(data)
  2350  }
  2351  
  2352  func forEachReplicationController(ctx context.Context, c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
  2353  	var rcs *v1.ReplicationControllerList
  2354  	var err error
  2355  	for t := time.Now(); time.Since(t) < framework.PodListTimeout && ctx.Err() == nil; time.Sleep(framework.Poll) {
  2356  		label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
  2357  		options := metav1.ListOptions{LabelSelector: label.String()}
  2358  		rcs, err = c.CoreV1().ReplicationControllers(ns).List(ctx, options)
  2359  		framework.ExpectNoError(err)
  2360  		if len(rcs.Items) > 0 {
  2361  			break
  2362  		}
  2363  	}
  2364  
  2365  	if rcs == nil || len(rcs.Items) == 0 {
  2366  		framework.Failf("No replication controllers found")
  2367  	}
  2368  
  2369  	for _, rc := range rcs.Items {
  2370  		fn(rc)
  2371  	}
  2372  }
  2373  
  2374  func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
  2375  	if rc.Name == "agnhost-primary" {
  2376  		if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
  2377  			framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
  2378  		}
  2379  
  2380  		if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
  2381  			framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
  2382  		}
  2383  	}
  2384  }
  2385  
  2386  // getUDData creates a validator function based on the input string (i.e. kitten.jpg).
  2387  // For example, if you send "kitten.jpg", this function verifies that the image jpg = kitten.jpg
  2388  // in the container's json field.
  2389  func getUDData(jpgExpected string, ns string) func(context.Context, clientset.Interface, string) error {
  2390  
  2391  	// getUDData validates data.json in the update-demo (returns nil if data is ok).
  2392  	return func(ctx context.Context, c clientset.Interface, podID string) error {
  2393  		framework.Logf("validating pod %s", podID)
  2394  
  2395  		ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout)
  2396  		defer cancel()
  2397  
  2398  		body, err := c.CoreV1().RESTClient().Get().
  2399  			Namespace(ns).
  2400  			Resource("pods").
  2401  			SubResource("proxy").
  2402  			Name(podID).
  2403  			Suffix("data.json").
  2404  			Do(ctx).
  2405  			Raw()
  2406  
  2407  		if err != nil {
  2408  			if ctx.Err() != nil {
  2409  				framework.Failf("Failed to retrieve data from container: %v", err)
  2410  			}
  2411  			return err
  2412  		}
  2413  		framework.Logf("got data: %s", body)
  2414  		var data updateDemoData
  2415  		if err := json.Unmarshal(body, &data); err != nil {
  2416  			return err
  2417  		}
  2418  		framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected)
  2419  		if strings.Contains(data.Image, jpgExpected) {
  2420  			return nil
  2421  		}
  2422  		return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected)
  2423  	}
  2424  }
  2425  
  2426  // newBlockingReader returns a reader that allows reading the given string,
  2427  // then blocks until Close() is called on the returned closer.
  2428  //
  2429  // We're explicitly returning the reader and closer separately, because
  2430  // the closer needs to be the *os.File we get from os.Pipe(). This is required
  2431  // so the exec of kubectl can pass the underlying file descriptor to the exec
  2432  // syscall, instead of creating another os.Pipe and blocking on the io.Copy
  2433  // between the source (e.g. stdin) and the write half of the pipe.
  2434  func newBlockingReader(s string) (io.Reader, io.Closer, error) {
  2435  	r, w, err := os.Pipe()
  2436  	if err != nil {
  2437  		return nil, nil, err
  2438  	}
  2439  	w.Write([]byte(s))
  2440  	return r, w, nil
  2441  }
  2442  
  2443  // createApplyCustomResource asserts that given CustomResource be created and applied
  2444  // without being rejected by kubectl validation
  2445  func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error {
  2446  	ginkgo.By("successfully create CR")
  2447  	if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil {
  2448  		return fmt.Errorf("failed to create CR %s in namespace %s: %w", resource, namespace, err)
  2449  	}
  2450  	if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
  2451  		return fmt.Errorf("failed to delete CR %s: %w", name, err)
  2452  	}
  2453  	ginkgo.By("successfully apply CR")
  2454  	if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil {
  2455  		return fmt.Errorf("failed to apply CR %s in namespace %s: %w", resource, namespace, err)
  2456  	}
  2457  	if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
  2458  		return fmt.Errorf("failed to delete CR %s: %w", name, err)
  2459  	}
  2460  	return nil
  2461  }
  2462  
  2463  // trimDockerRegistry is the function for trimming the docker.io/library from the beginning of the imagename.
  2464  // If community docker installed it will not prefix the registry names with the dockerimages vs registry names prefixed with other runtimes or docker installed via RHEL extra repo.
  2465  // So this function will help to trim the docker.io/library if exists
  2466  func trimDockerRegistry(imagename string) string {
  2467  	imagename = strings.Replace(imagename, "docker.io/", "", 1)
  2468  	return strings.Replace(imagename, "library/", "", 1)
  2469  }
  2470  
  2471  // validatorFn is the function which is individual tests will implement.
  2472  // we may want it to return more than just an error, at some point.
  2473  type validatorFn func(ctx context.Context, c clientset.Interface, podID string) error
  2474  
  2475  // validateController is a generic mechanism for testing RC's that are running.
  2476  // It takes a container name, a test name, and a validator function which is plugged in by a specific test.
  2477  // "containername": this is grepped for.
  2478  // "containerImage" : this is the name of the image we expect to be launched.  Not to confuse w/ images (kitten.jpg)  which are validated.
  2479  // "testname":  which gets bubbled up to the logging/failure messages if errors happen.
  2480  // "validator" function: This function is given a podID and a client, and it can do some specific validations that way.
  2481  func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
  2482  	containerImage = trimDockerRegistry(containerImage)
  2483  	getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
  2484  
  2485  	getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
  2486  
  2487  	getImageTemplate := fmt.Sprintf(`--template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
  2488  
  2489  	ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname)) //testname should be selector
  2490  waitLoop:
  2491  	for start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {
  2492  		getPodsOutput := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname)
  2493  		pods := strings.Fields(getPodsOutput)
  2494  		if numPods := len(pods); numPods != replicas {
  2495  			ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
  2496  			continue
  2497  		}
  2498  		var runningPods []string
  2499  		for _, podID := range pods {
  2500  			running := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate)
  2501  			if running != "true" {
  2502  				framework.Logf("%s is created but not running", podID)
  2503  				continue waitLoop
  2504  			}
  2505  
  2506  			currentImage := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate)
  2507  			currentImage = trimDockerRegistry(currentImage)
  2508  			if currentImage != containerImage {
  2509  				framework.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
  2510  				continue waitLoop
  2511  			}
  2512  
  2513  			// Call the generic validator function here.
  2514  			// This might validate for example, that (1) getting a url works and (2) url is serving correct content.
  2515  			if err := validator(ctx, c, podID); err != nil {
  2516  				framework.Logf("%s is running right image but validator function failed: %v", podID, err)
  2517  				continue waitLoop
  2518  			}
  2519  
  2520  			framework.Logf("%s is verified up and running", podID)
  2521  			runningPods = append(runningPods, podID)
  2522  		}
  2523  		// If we reach here, then all our checks passed.
  2524  		if len(runningPods) == replicas {
  2525  			return
  2526  		}
  2527  	}
  2528  	// Reaching here means that one of more checks failed multiple times.  Assuming its not a race condition, something is broken.
  2529  	framework.Failf("Timed out after %v seconds waiting for %s pods to reach valid state", framework.PodStartTimeout.Seconds(), testname)
  2530  }
  2531  
  2532  // mustListObjectsInNamespace queries all the objects we use for testing in the given namespace.
  2533  // Currently this is just ConfigMaps.
  2534  // We filter our "system" configmaps, like "kube-root-ca.crt".
  2535  func mustListObjectsInNamespace(ctx context.Context, c clientset.Interface, ns string) []runtime.Object {
  2536  	var objects []runtime.Object
  2537  	configMaps, err := c.CoreV1().ConfigMaps(ns).List(ctx, metav1.ListOptions{})
  2538  	if err != nil {
  2539  		framework.Failf("error listing configmaps: %v", err)
  2540  	}
  2541  	for i := range configMaps.Items {
  2542  		cm := &configMaps.Items[i]
  2543  		if cm.Name == "kube-root-ca.crt" {
  2544  			// Ignore system objects
  2545  			continue
  2546  		}
  2547  		objects = append(objects, cm)
  2548  	}
  2549  	return objects
  2550  }
  2551  
  2552  // mustGetNames returns a slice containing the metadata.name for each object.
  2553  func mustGetNames(objects []runtime.Object) []string {
  2554  	var names []string
  2555  	for _, obj := range objects {
  2556  		metaAccessor, err := meta.Accessor(obj)
  2557  		if err != nil {
  2558  			framework.Failf("error getting accessor for %T: %v", obj, err)
  2559  		}
  2560  		name := metaAccessor.GetName()
  2561  		names = append(names, name)
  2562  	}
  2563  	sort.Strings(names)
  2564  	return names
  2565  }