sigs.k8s.io/blob-csi-driver@v1.24.1/test/e2e/testsuites/testsuites.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/rand"
    23  	"strings"
    24  	"time"
    25  
    26  	"sigs.k8s.io/blob-csi-driver/pkg/blob"
    27  
    28  	"github.com/container-storage-interface/spec/lib/go/csi"
    29  	"github.com/onsi/ginkgo/v2"
    30  	"github.com/onsi/gomega"
    31  	apps "k8s.io/api/apps/v1"
    32  	v1 "k8s.io/api/core/v1"
    33  	storagev1 "k8s.io/api/storage/v1"
    34  	apierrs "k8s.io/apimachinery/pkg/api/errors"
    35  	"k8s.io/apimachinery/pkg/api/resource"
    36  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    37  	"k8s.io/apimachinery/pkg/fields"
    38  	"k8s.io/apimachinery/pkg/util/errors"
    39  	"k8s.io/apimachinery/pkg/util/wait"
    40  	clientset "k8s.io/client-go/kubernetes"
    41  	"k8s.io/kubernetes/pkg/kubelet/events"
    42  	"k8s.io/kubernetes/test/e2e/framework"
    43  	"k8s.io/kubernetes/test/e2e/framework/deployment"
    44  	e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
    45  	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
    46  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    47  	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
    48  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    49  	testutil "k8s.io/kubernetes/test/utils"
    50  	imageutils "k8s.io/kubernetes/test/utils/image"
    51  	"k8s.io/utils/pointer"
    52  )
    53  
    54  const (
    55  	execTimeout = 10 * time.Second
    56  	// Some pods can take much longer to get ready due to volume attach/detach latency.
    57  	slowPodStartTimeout = 15 * time.Minute
    58  	// Description that will printed during tests
    59  	failedConditionDescription = "Error status code"
    60  
    61  	poll                 = 2 * time.Second
    62  	pollLongTimeout      = 5 * time.Minute
    63  	pollForStringTimeout = 1 * time.Minute
    64  )
    65  
    66  type TestStorageClass struct {
    67  	client       clientset.Interface
    68  	storageClass *storagev1.StorageClass
    69  	namespace    *v1.Namespace
    70  }
    71  
    72  func NewTestStorageClass(c clientset.Interface, ns *v1.Namespace, sc *storagev1.StorageClass) *TestStorageClass {
    73  	return &TestStorageClass{
    74  		client:       c,
    75  		storageClass: sc,
    76  		namespace:    ns,
    77  	}
    78  }
    79  
    80  func (t *TestStorageClass) Create(ctx context.Context) storagev1.StorageClass {
    81  	var err error
    82  
    83  	ginkgo.By("creating a StorageClass " + t.storageClass.Name)
    84  	t.storageClass, err = t.client.StorageV1().StorageClasses().Create(ctx, t.storageClass, metav1.CreateOptions{})
    85  	framework.ExpectNoError(err)
    86  	return *t.storageClass
    87  }
    88  
    89  func (t *TestStorageClass) Cleanup(ctx context.Context) {
    90  	framework.Logf("deleting StorageClass %s", t.storageClass.Name)
    91  	err := t.client.StorageV1().StorageClasses().Delete(ctx, t.storageClass.Name, metav1.DeleteOptions{})
    92  	framework.ExpectNoError(err)
    93  }
    94  
    95  type TestPreProvisionedPersistentVolume struct {
    96  	client                    clientset.Interface
    97  	persistentVolume          *v1.PersistentVolume
    98  	requestedPersistentVolume *v1.PersistentVolume
    99  }
   100  
   101  func NewTestPreProvisionedPersistentVolume(c clientset.Interface, pv *v1.PersistentVolume) *TestPreProvisionedPersistentVolume {
   102  	return &TestPreProvisionedPersistentVolume{
   103  		client:                    c,
   104  		requestedPersistentVolume: pv,
   105  	}
   106  }
   107  
   108  func (pv *TestPreProvisionedPersistentVolume) Create(ctx context.Context) v1.PersistentVolume {
   109  	var err error
   110  	ginkgo.By("creating a PV")
   111  	pv.persistentVolume, err = pv.client.CoreV1().PersistentVolumes().Create(ctx, pv.requestedPersistentVolume, metav1.CreateOptions{})
   112  	framework.ExpectNoError(err)
   113  	return *pv.persistentVolume
   114  }
   115  
   116  type TestPersistentVolumeClaim struct {
   117  	client                         clientset.Interface
   118  	claimSize                      string
   119  	volumeMode                     v1.PersistentVolumeMode
   120  	storageClass                   *storagev1.StorageClass
   121  	namespace                      *v1.Namespace
   122  	persistentVolume               *v1.PersistentVolume
   123  	persistentVolumeClaim          *v1.PersistentVolumeClaim
   124  	requestedPersistentVolumeClaim *v1.PersistentVolumeClaim
   125  	dataSource                     *v1.TypedLocalObjectReference
   126  }
   127  
   128  func NewTestPersistentVolumeClaim(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass) *TestPersistentVolumeClaim {
   129  	mode := v1.PersistentVolumeFilesystem
   130  	if volumeMode == Block {
   131  		mode = v1.PersistentVolumeBlock
   132  	}
   133  	return &TestPersistentVolumeClaim{
   134  		client:       c,
   135  		claimSize:    claimSize,
   136  		volumeMode:   mode,
   137  		namespace:    ns,
   138  		storageClass: sc,
   139  	}
   140  }
   141  
   142  func NewTestPersistentVolumeClaimWithDataSource(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass, dataSource *v1.TypedLocalObjectReference) *TestPersistentVolumeClaim {
   143  	mode := v1.PersistentVolumeFilesystem
   144  	if volumeMode == Block {
   145  		mode = v1.PersistentVolumeBlock
   146  	}
   147  	return &TestPersistentVolumeClaim{
   148  		client:       c,
   149  		claimSize:    claimSize,
   150  		volumeMode:   mode,
   151  		namespace:    ns,
   152  		storageClass: sc,
   153  		dataSource:   dataSource,
   154  	}
   155  }
   156  
   157  func (t *TestPersistentVolumeClaim) Create(ctx context.Context) {
   158  	var err error
   159  
   160  	ginkgo.By("creating a PVC")
   161  	storageClassName := ""
   162  	if t.storageClass != nil {
   163  		storageClassName = t.storageClass.Name
   164  	}
   165  	t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode, t.dataSource)
   166  	t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(ctx, t.requestedPersistentVolumeClaim, metav1.CreateOptions{})
   167  	framework.ExpectNoError(err)
   168  }
   169  
   170  func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume(ctx context.Context) {
   171  	var err error
   172  
   173  	// Get the bound PersistentVolume
   174  	ginkgo.By("validating provisioned PV")
   175  	t.persistentVolume, err = t.client.CoreV1().PersistentVolumes().Get(ctx, t.persistentVolumeClaim.Spec.VolumeName, metav1.GetOptions{})
   176  	framework.ExpectNoError(err)
   177  
   178  	// Check sizes
   179  	expectedCapacity := t.requestedPersistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
   180  	claimCapacity := t.persistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
   181  	gomega.Expect(claimCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "claimCapacity is not equal to requestedCapacity")
   182  
   183  	pvCapacity := t.persistentVolume.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
   184  	gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "pvCapacity is not equal to requestedCapacity")
   185  
   186  	// Check PV properties
   187  	ginkgo.By("checking the PV")
   188  	expectedAccessModes := t.requestedPersistentVolumeClaim.Spec.AccessModes
   189  	gomega.Expect(t.persistentVolume.Spec.AccessModes).To(gomega.Equal(expectedAccessModes))
   190  	gomega.Expect(t.persistentVolume.Spec.ClaimRef.Name).To(gomega.Equal(t.persistentVolumeClaim.ObjectMeta.Name))
   191  	gomega.Expect(t.persistentVolume.Spec.ClaimRef.Namespace).To(gomega.Equal(t.persistentVolumeClaim.ObjectMeta.Namespace))
   192  	// If storageClass is nil, PV was pre-provisioned with these values already set
   193  	if t.storageClass != nil {
   194  		gomega.Expect(t.persistentVolume.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*t.storageClass.ReclaimPolicy))
   195  		gomega.Expect(t.persistentVolume.Spec.MountOptions).To(gomega.Equal(t.storageClass.MountOptions))
   196  		if *t.storageClass.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
   197  			gomega.Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values).
   198  				To(gomega.HaveLen(1))
   199  		}
   200  		if len(t.storageClass.AllowedTopologies) > 0 {
   201  			gomega.Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key).
   202  				To(gomega.Equal(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Key))
   203  			for _, v := range t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values {
   204  				gomega.Expect(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Values).To(gomega.ContainElement(v))
   205  			}
   206  
   207  		}
   208  	}
   209  }
   210  
   211  func (t *TestPersistentVolumeClaim) WaitForBound(ctx context.Context) v1.PersistentVolumeClaim {
   212  	var err error
   213  
   214  	ginkgo.By(fmt.Sprintf("waiting for PVC to be in phase %q", v1.ClaimBound))
   215  	err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
   216  	framework.ExpectNoError(err)
   217  
   218  	ginkgo.By("checking the PVC")
   219  	// Get new copy of the claim
   220  	t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Get(ctx, t.persistentVolumeClaim.Name, metav1.GetOptions{})
   221  	framework.ExpectNoError(err)
   222  
   223  	return *t.persistentVolumeClaim
   224  }
   225  
   226  func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.PersistentVolumeMode, dataSource *v1.TypedLocalObjectReference) *v1.PersistentVolumeClaim {
   227  	return &v1.PersistentVolumeClaim{
   228  		ObjectMeta: metav1.ObjectMeta{
   229  			GenerateName: "pvc-",
   230  			Namespace:    namespace,
   231  		},
   232  		Spec: v1.PersistentVolumeClaimSpec{
   233  			StorageClassName: &storageClassName,
   234  			AccessModes: []v1.PersistentVolumeAccessMode{
   235  				v1.ReadWriteMany,
   236  			},
   237  			Resources: v1.VolumeResourceRequirements{
   238  				Requests: v1.ResourceList{
   239  					v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
   240  				},
   241  			},
   242  			VolumeMode: &volumeMode,
   243  			DataSource: dataSource,
   244  		},
   245  	}
   246  }
   247  
   248  func (t *TestPersistentVolumeClaim) Cleanup(ctx context.Context) {
   249  	framework.Logf("deleting PVC %q/%q", t.namespace.Name, t.persistentVolumeClaim.Name)
   250  	err := e2epv.DeletePersistentVolumeClaim(ctx, t.client, t.persistentVolumeClaim.Name, t.namespace.Name)
   251  	framework.ExpectNoError(err)
   252  	// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
   253  	// Retain, there's no use waiting because the PV won't be auto-deleted and
   254  	// it's expected for the caller to do it.) Technically, the first few delete
   255  	// attempts may fail, as the volume is still attached to a node because
   256  	// kubelet is slowly cleaning up the previous pod, however it should succeed
   257  	// in a couple of minutes.
   258  	if t.persistentVolume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
   259  		ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
   260  		err := e2epv.WaitForPersistentVolumeDeleted(ctx, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
   261  		framework.ExpectNoError(err)
   262  	}
   263  	// Wait for the PVC to be deleted
   264  	err = waitForPersistentVolumeClaimDeleted(ctx, t.client, t.persistentVolumeClaim.Name, t.namespace.Name, 5*time.Second, 5*time.Minute)
   265  	framework.ExpectNoError(err)
   266  }
   267  
   268  func (t *TestPersistentVolumeClaim) ReclaimPolicy() v1.PersistentVolumeReclaimPolicy {
   269  	return t.persistentVolume.Spec.PersistentVolumeReclaimPolicy
   270  }
   271  
   272  func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(ctx context.Context, phase v1.PersistentVolumePhase) {
   273  	err := e2epv.WaitForPersistentVolumePhase(ctx, phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
   274  	framework.ExpectNoError(err)
   275  }
   276  
   277  func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume(ctx context.Context) {
   278  	ginkgo.By(fmt.Sprintf("deleting PV %q", t.persistentVolume.Name))
   279  	err := e2epv.DeletePersistentVolume(ctx, t.client, t.persistentVolume.Name)
   280  	framework.ExpectNoError(err)
   281  	ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
   282  	err = e2epv.WaitForPersistentVolumeDeleted(ctx, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
   283  	framework.ExpectNoError(err)
   284  }
   285  
   286  func (t *TestPersistentVolumeClaim) DeleteBackingVolume(ctx context.Context, azfile *blob.Driver) {
   287  	volumeID := t.persistentVolume.Spec.CSI.VolumeHandle
   288  	ginkgo.By(fmt.Sprintf("deleting blob volume %q", volumeID))
   289  	req := &csi.DeleteVolumeRequest{
   290  		VolumeId: volumeID,
   291  	}
   292  	_, err := azfile.DeleteVolume(ctx, req)
   293  	if err != nil {
   294  		ginkgo.Fail(fmt.Sprintf("could not delete volume %q: %v", volumeID, err))
   295  	}
   296  }
   297  
   298  type TestDeployment struct {
   299  	client     clientset.Interface
   300  	deployment *apps.Deployment
   301  	namespace  *v1.Namespace
   302  	podName    string
   303  }
   304  
   305  func NewTestDeployment(c clientset.Interface, ns *v1.Namespace, command string, pvc *v1.PersistentVolumeClaim, volumeName, mountPath string, readOnly bool) *TestDeployment {
   306  	generateName := "blob-volume-tester-"
   307  	selectorValue := fmt.Sprintf("%s%d", generateName, rand.Int())
   308  	replicas := int32(1)
   309  	return &TestDeployment{
   310  		client:    c,
   311  		namespace: ns,
   312  		deployment: &apps.Deployment{
   313  			ObjectMeta: metav1.ObjectMeta{
   314  				GenerateName: generateName,
   315  			},
   316  			Spec: apps.DeploymentSpec{
   317  				Replicas: &replicas,
   318  				Selector: &metav1.LabelSelector{
   319  					MatchLabels: map[string]string{"app": selectorValue},
   320  				},
   321  				Template: v1.PodTemplateSpec{
   322  					ObjectMeta: metav1.ObjectMeta{
   323  						Labels: map[string]string{"app": selectorValue},
   324  					},
   325  					Spec: v1.PodSpec{
   326  						NodeSelector: map[string]string{"kubernetes.io/os": "linux"},
   327  						Containers: []v1.Container{
   328  							{
   329  								Name:    "volume-tester",
   330  								Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   331  								Command: []string{"/bin/sh"},
   332  								Args:    []string{"-c", command},
   333  								VolumeMounts: []v1.VolumeMount{
   334  									{
   335  										Name:      volumeName,
   336  										MountPath: mountPath,
   337  										ReadOnly:  readOnly,
   338  									},
   339  								},
   340  							},
   341  						},
   342  						RestartPolicy: v1.RestartPolicyAlways,
   343  						Volumes: []v1.Volume{
   344  							{
   345  								Name: volumeName,
   346  								VolumeSource: v1.VolumeSource{
   347  									PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   348  										ClaimName: pvc.Name,
   349  									},
   350  								},
   351  							},
   352  						},
   353  					},
   354  				},
   355  			},
   356  		},
   357  	}
   358  }
   359  
   360  func (t *TestDeployment) Create(ctx context.Context) {
   361  	var err error
   362  	t.deployment, err = t.client.AppsV1().Deployments(t.namespace.Name).Create(ctx, t.deployment, metav1.CreateOptions{})
   363  	framework.ExpectNoError(err)
   364  	err = testutil.WaitForDeploymentComplete(t.client, t.deployment, framework.Logf, poll, pollLongTimeout)
   365  	framework.ExpectNoError(err)
   366  	pods, err := deployment.GetPodsForDeployment(ctx, t.client, t.deployment)
   367  	framework.ExpectNoError(err)
   368  	// always get first pod as there should only be one
   369  	t.podName = pods.Items[0].Name
   370  }
   371  
   372  func (t *TestDeployment) WaitForPodReady(ctx context.Context) {
   373  	pods, err := deployment.GetPodsForDeployment(ctx, t.client, t.deployment)
   374  	framework.ExpectNoError(err)
   375  	// always get first pod as there should only be one
   376  	pod := pods.Items[0]
   377  	t.podName = pod.Name
   378  	err = e2epod.WaitForPodRunningInNamespace(ctx, t.client, &pod)
   379  	framework.ExpectNoError(err)
   380  }
   381  
   382  func (t *TestDeployment) Exec(command []string, expectedString string) {
   383  	_, err := e2eoutput.LookForStringInPodExec(t.namespace.Name, t.podName, command, expectedString, execTimeout)
   384  	framework.ExpectNoError(err)
   385  }
   386  
   387  func (t *TestDeployment) PollForStringInPodsExec(command []string, expectedString string) {
   388  	pollForStringInPodsExec(t.namespace.Name, []string{t.podName}, command, expectedString)
   389  }
   390  
   391  func (t *TestDeployment) DeletePodAndWait(ctx context.Context) {
   392  	framework.Logf("Deleting pod %q in namespace %q", t.podName, t.namespace.Name)
   393  	err := t.client.CoreV1().Pods(t.namespace.Name).Delete(ctx, t.podName, metav1.DeleteOptions{})
   394  	if err != nil {
   395  		if !apierrs.IsNotFound(err) {
   396  			framework.ExpectNoError(fmt.Errorf("pod %q Delete API error: %w", t.podName, err))
   397  		}
   398  		return
   399  	}
   400  	framework.Logf("Waiting for pod %q in namespace %q to be fully deleted", t.podName, t.namespace.Name)
   401  	err = e2epod.WaitForPodNotFoundInNamespace(ctx, t.client, t.podName, t.namespace.Name, e2epod.DefaultPodDeletionTimeout)
   402  	if err != nil {
   403  		framework.ExpectNoError(fmt.Errorf("pod %q error waiting for delete: %w", t.podName, err))
   404  	}
   405  }
   406  
   407  func (t *TestDeployment) Cleanup(ctx context.Context) {
   408  	framework.Logf("deleting Deployment %q/%q", t.namespace.Name, t.deployment.Name)
   409  	body, err := t.Logs(ctx)
   410  	if err != nil {
   411  		framework.Logf("Error getting logs for pod %s: %w", t.podName, err)
   412  	} else {
   413  		framework.Logf("Pod %s has the following logs: %s", t.podName, body)
   414  	}
   415  	err = t.client.AppsV1().Deployments(t.namespace.Name).Delete(ctx, t.deployment.Name, metav1.DeleteOptions{})
   416  	framework.ExpectNoError(err)
   417  }
   418  
   419  func (t *TestDeployment) Logs(ctx context.Context) ([]byte, error) {
   420  	return podLogs(ctx, t.client, t.podName, t.namespace.Name)
   421  }
   422  
   423  type TestPod struct {
   424  	client    clientset.Interface
   425  	pod       *v1.Pod
   426  	namespace *v1.Namespace
   427  }
   428  
   429  func NewTestPod(c clientset.Interface, ns *v1.Namespace, command string) *TestPod {
   430  	return &TestPod{
   431  		client:    c,
   432  		namespace: ns,
   433  		pod: &v1.Pod{
   434  			ObjectMeta: metav1.ObjectMeta{
   435  				GenerateName: "blob-volume-tester-",
   436  			},
   437  			Spec: v1.PodSpec{
   438  				NodeSelector: map[string]string{"kubernetes.io/os": "linux"},
   439  				Containers: []v1.Container{
   440  					{
   441  						Name:         "volume-tester",
   442  						Image:        imageutils.GetE2EImage(imageutils.BusyBox),
   443  						Command:      []string{"/bin/sh"},
   444  						Args:         []string{"-c", command},
   445  						VolumeMounts: make([]v1.VolumeMount, 0),
   446  					},
   447  				},
   448  				RestartPolicy:                v1.RestartPolicyNever,
   449  				Volumes:                      make([]v1.Volume, 0),
   450  				AutomountServiceAccountToken: pointer.Bool(false),
   451  			},
   452  		},
   453  	}
   454  }
   455  
   456  func (t *TestPod) Create(ctx context.Context) {
   457  	var err error
   458  
   459  	t.pod, err = t.client.CoreV1().Pods(t.namespace.Name).Create(ctx, t.pod, metav1.CreateOptions{})
   460  	framework.ExpectNoError(err)
   461  }
   462  
   463  func (t *TestPod) WaitForSuccess(ctx context.Context) {
   464  	err := e2epod.WaitForPodSuccessInNamespaceSlow(ctx, t.client, t.pod.Name, t.namespace.Name)
   465  	framework.ExpectNoError(err)
   466  }
   467  
   468  func (t *TestPod) WaitForRunning(ctx context.Context) {
   469  	err := e2epod.WaitForPodRunningInNamespace(ctx, t.client, t.pod)
   470  	framework.ExpectNoError(err)
   471  }
   472  
   473  func (t *TestPod) WaitForFailedMountError(ctx context.Context) {
   474  	err := e2eevents.WaitTimeoutForEvent(
   475  		ctx,
   476  		t.client,
   477  		t.namespace.Name,
   478  		fields.Set{"reason": events.FailedMountVolume}.AsSelector().String(),
   479  		"",
   480  		pollLongTimeout)
   481  	framework.ExpectNoError(err)
   482  }
   483  
   484  // Ideally this would be in "k8s.io/kubernetes/test/e2e/framework"
   485  // Similar to framework.WaitForPodSuccessInNamespaceSlow
   486  var podFailedCondition = func(pod *v1.Pod) (bool, error) {
   487  	switch pod.Status.Phase {
   488  	case v1.PodFailed:
   489  		ginkgo.By("Saw pod failure")
   490  		return true, nil
   491  	case v1.PodSucceeded:
   492  		return true, fmt.Errorf("pod %q succeeded with reason: %q, message: %q", pod.Name, pod.Status.Reason, pod.Status.Message)
   493  	default:
   494  		return false, nil
   495  	}
   496  }
   497  
   498  func (t *TestPod) WaitForFailure(ctx context.Context) {
   499  	err := e2epod.WaitForPodCondition(ctx, t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition)
   500  	framework.ExpectNoError(err)
   501  }
   502  
   503  func (t *TestPod) SetupVolume(pvc *v1.PersistentVolumeClaim, name, mountPath string, readOnly bool) {
   504  	volumeMount := v1.VolumeMount{
   505  		Name:      name,
   506  		MountPath: mountPath,
   507  		ReadOnly:  readOnly,
   508  	}
   509  	t.pod.Spec.Containers[0].VolumeMounts = append(t.pod.Spec.Containers[0].VolumeMounts, volumeMount)
   510  
   511  	volume := v1.Volume{
   512  		Name: name,
   513  		VolumeSource: v1.VolumeSource{
   514  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   515  				ClaimName: pvc.Name,
   516  			},
   517  		},
   518  	}
   519  	t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
   520  }
   521  
   522  func (t *TestPod) SetupRawBlockVolume(pvc *v1.PersistentVolumeClaim, name, devicePath string) {
   523  	volumeDevice := v1.VolumeDevice{
   524  		Name:       name,
   525  		DevicePath: devicePath,
   526  	}
   527  	t.pod.Spec.Containers[0].VolumeDevices = append(t.pod.Spec.Containers[0].VolumeDevices, volumeDevice)
   528  
   529  	volume := v1.Volume{
   530  		Name: name,
   531  		VolumeSource: v1.VolumeSource{
   532  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   533  				ClaimName: pvc.Name,
   534  			},
   535  		},
   536  	}
   537  	t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
   538  }
   539  
   540  func (t *TestPod) SetupInlineVolume(name, mountPath, secretName, containerName string, readOnly bool) {
   541  	volumeMount := v1.VolumeMount{
   542  		Name:      name,
   543  		MountPath: mountPath,
   544  		ReadOnly:  readOnly,
   545  	}
   546  	t.pod.Spec.Containers[0].VolumeMounts = append(t.pod.Spec.Containers[0].VolumeMounts, volumeMount)
   547  
   548  	volume := v1.Volume{
   549  		Name: name,
   550  		VolumeSource: v1.VolumeSource{
   551  			CSI: &v1.CSIVolumeSource{
   552  				Driver: blob.DefaultDriverName,
   553  				VolumeAttributes: map[string]string{
   554  					"secretName":      secretName,
   555  					"secretNamespace": t.namespace.Name,
   556  					"containerName":   containerName,
   557  					"mountOptions":    "-o allow_other --file-cache-timeout-in-seconds=240",
   558  				},
   559  				ReadOnly: pointer.Bool(readOnly),
   560  			},
   561  		},
   562  	}
   563  	t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
   564  }
   565  
   566  func (t *TestPod) SetNodeSelector(nodeSelector map[string]string) {
   567  	t.pod.Spec.NodeSelector = nodeSelector
   568  }
   569  
   570  func (t *TestPod) Cleanup(ctx context.Context) {
   571  	cleanupPodOrFail(ctx, t.client, t.pod.Name, t.namespace.Name)
   572  }
   573  
   574  func (t *TestPod) Logs(ctx context.Context) ([]byte, error) {
   575  	return podLogs(ctx, t.client, t.pod.Name, t.namespace.Name)
   576  }
   577  
   578  type TestSecret struct {
   579  	client    clientset.Interface
   580  	secret    *v1.Secret
   581  	namespace *v1.Namespace
   582  }
   583  
   584  func NewTestSecret(c clientset.Interface, ns *v1.Namespace, name string, data map[string]string) *TestSecret {
   585  	return &TestSecret{
   586  		client:    c,
   587  		namespace: ns,
   588  		secret: &v1.Secret{
   589  			ObjectMeta: metav1.ObjectMeta{
   590  				Name: name,
   591  			},
   592  			StringData: data,
   593  			Type:       v1.SecretTypeOpaque,
   594  		},
   595  	}
   596  }
   597  
   598  func (t *TestSecret) Create(ctx context.Context) {
   599  	var err error
   600  	t.secret, err = t.client.CoreV1().Secrets(t.namespace.Name).Create(ctx, t.secret, metav1.CreateOptions{})
   601  	framework.ExpectNoError(err)
   602  }
   603  
   604  func (t *TestSecret) Cleanup(ctx context.Context) {
   605  	framework.Logf("deleting Secret %s", t.secret.Name)
   606  	err := t.client.CoreV1().Secrets(t.namespace.Name).Delete(ctx, t.secret.Name, metav1.DeleteOptions{})
   607  	framework.ExpectNoError(err)
   608  }
   609  
   610  func cleanupPodOrFail(ctx context.Context, client clientset.Interface, name, namespace string) {
   611  	framework.Logf("deleting Pod %q/%q", namespace, name)
   612  	body, err := podLogs(ctx, client, name, namespace)
   613  	if err != nil {
   614  		framework.Logf("Error getting logs for pod %s: %w", name, err)
   615  	} else {
   616  		framework.Logf("Pod %s has the following logs: %s", name, body)
   617  	}
   618  	e2epod.DeletePodOrFail(ctx, client, namespace, name)
   619  }
   620  
   621  func podLogs(ctx context.Context, client clientset.Interface, name, namespace string) ([]byte, error) {
   622  	return client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Do(ctx).Raw()
   623  }
   624  
   625  // waitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first.
   626  func waitForPersistentVolumeClaimDeleted(ctx context.Context, c clientset.Interface, pvcName string, ns string, Poll, timeout time.Duration) error {
   627  	framework.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName)
   628  	for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
   629  		_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvcName, metav1.GetOptions{})
   630  		if err != nil {
   631  			if apierrs.IsNotFound(err) {
   632  				framework.Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
   633  				return nil
   634  			}
   635  			framework.Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %w", pvcName, ns, Poll, err)
   636  		}
   637  	}
   638  	return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout)
   639  }
   640  
   641  func pollForStringWorker(namespace string, pod string, command []string, expectedString string, ch chan<- error) {
   642  	args := append([]string{"exec", pod, "--"}, command...)
   643  	err := wait.PollImmediate(poll, pollForStringTimeout, func() (bool, error) {
   644  		stdout, err := e2ekubectl.RunKubectl(namespace, args...)
   645  		if err != nil {
   646  			framework.Logf("Error waiting for output %q in pod %q: %v.", expectedString, pod, err)
   647  			return false, nil
   648  		}
   649  		if !strings.Contains(stdout, expectedString) {
   650  			framework.Logf("The stdout did not contain output %q in pod %q, found: %q.", expectedString, pod, stdout)
   651  			return false, nil
   652  		}
   653  		return true, nil
   654  	})
   655  	ch <- err
   656  }
   657  
   658  // Execute the command for all pods in the namespace, looking for expectedString in stdout
   659  func pollForStringInPodsExec(namespace string, pods []string, command []string, expectedString string) {
   660  	ch := make(chan error, len(pods))
   661  	for _, pod := range pods {
   662  		go pollForStringWorker(namespace, pod, command, expectedString, ch)
   663  	}
   664  	errs := make([]error, 0, len(pods))
   665  	for range pods {
   666  		errs = append(errs, <-ch)
   667  	}
   668  	framework.ExpectNoError(errors.NewAggregate(errs), "Failed to find %q in at least one pod's output.", expectedString)
   669  }