github.com/kubernetes-sigs/blobfuse-csi-driver@v0.5.0/test/e2e/testsuites/testsuites.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/rand"
    23  	"time"
    24  
    25  	"sigs.k8s.io/blobfuse-csi-driver/pkg/blobfuse"
    26  
    27  	"github.com/container-storage-interface/spec/lib/go/csi"
    28  	"github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1alpha1"
    29  	snapshotclientset "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned"
    30  	"github.com/onsi/ginkgo"
    31  	"github.com/onsi/gomega"
    32  	apps "k8s.io/api/apps/v1"
    33  	v1 "k8s.io/api/core/v1"
    34  	storagev1 "k8s.io/api/storage/v1"
    35  	apierrs "k8s.io/apimachinery/pkg/api/errors"
    36  	"k8s.io/apimachinery/pkg/api/resource"
    37  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    38  	"k8s.io/apimachinery/pkg/util/wait"
    39  	clientset "k8s.io/client-go/kubernetes"
    40  	restclientset "k8s.io/client-go/rest"
    41  	deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
    42  	"k8s.io/kubernetes/test/e2e/framework"
    43  	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
    44  	testutil "k8s.io/kubernetes/test/utils"
    45  	imageutils "k8s.io/kubernetes/test/utils/image"
    46  )
    47  
    48  const (
    49  	execTimeout = 10 * time.Second
    50  	// Some pods can take much longer to get ready due to volume attach/detach latency.
    51  	slowPodStartTimeout = 15 * time.Minute
    52  	// Description that will printed during tests
    53  	failedConditionDescription = "Error status code"
    54  
    55  	poll            = 2 * time.Second
    56  	pollLongTimeout = 5 * time.Minute
    57  )
    58  
    59  type TestStorageClass struct {
    60  	client       clientset.Interface
    61  	storageClass *storagev1.StorageClass
    62  	namespace    *v1.Namespace
    63  }
    64  
    65  func NewTestStorageClass(c clientset.Interface, ns *v1.Namespace, sc *storagev1.StorageClass) *TestStorageClass {
    66  	return &TestStorageClass{
    67  		client:       c,
    68  		storageClass: sc,
    69  		namespace:    ns,
    70  	}
    71  }
    72  
    73  func (t *TestStorageClass) Create() storagev1.StorageClass {
    74  	var err error
    75  
    76  	ginkgo.By("creating a StorageClass " + t.storageClass.Name)
    77  	t.storageClass, err = t.client.StorageV1().StorageClasses().Create(t.storageClass)
    78  	framework.ExpectNoError(err)
    79  	return *t.storageClass
    80  }
    81  
    82  func (t *TestStorageClass) Cleanup() {
    83  	e2elog.Logf("deleting StorageClass %s", t.storageClass.Name)
    84  	err := t.client.StorageV1().StorageClasses().Delete(t.storageClass.Name, nil)
    85  	framework.ExpectNoError(err)
    86  }
    87  
    88  type TestVolumeSnapshotClass struct {
    89  	client              restclientset.Interface
    90  	volumeSnapshotClass *v1alpha1.VolumeSnapshotClass
    91  	namespace           *v1.Namespace
    92  }
    93  
    94  func NewTestVolumeSnapshotClass(c restclientset.Interface, ns *v1.Namespace, vsc *v1alpha1.VolumeSnapshotClass) *TestVolumeSnapshotClass {
    95  	return &TestVolumeSnapshotClass{
    96  		client:              c,
    97  		volumeSnapshotClass: vsc,
    98  		namespace:           ns,
    99  	}
   100  }
   101  
   102  func (t *TestVolumeSnapshotClass) Create() {
   103  	ginkgo.By("creating a VolumeSnapshotClass")
   104  	var err error
   105  	t.volumeSnapshotClass, err = snapshotclientset.New(t.client).VolumesnapshotV1alpha1().VolumeSnapshotClasses().Create(t.volumeSnapshotClass)
   106  	framework.ExpectNoError(err)
   107  }
   108  
   109  func (t *TestVolumeSnapshotClass) CreateSnapshot(pvc *v1.PersistentVolumeClaim) *v1alpha1.VolumeSnapshot {
   110  	ginkgo.By("creating a VolumeSnapshot for " + pvc.Name)
   111  	snapshot := &v1alpha1.VolumeSnapshot{
   112  		TypeMeta: metav1.TypeMeta{
   113  			Kind:       VolumeSnapshotKind,
   114  			APIVersion: SnapshotAPIVersion,
   115  		},
   116  		ObjectMeta: metav1.ObjectMeta{
   117  			GenerateName: "volume-snapshot-",
   118  			Namespace:    t.namespace.Name,
   119  		},
   120  		Spec: v1alpha1.VolumeSnapshotSpec{
   121  			VolumeSnapshotClassName: &t.volumeSnapshotClass.Name,
   122  			Source: &v1.TypedLocalObjectReference{
   123  				Kind: "PersistentVolumeClaim",
   124  				Name: pvc.Name,
   125  			},
   126  		},
   127  	}
   128  	snapshot, err := snapshotclientset.New(t.client).VolumesnapshotV1alpha1().VolumeSnapshots(t.namespace.Name).Create(snapshot)
   129  	framework.ExpectNoError(err)
   130  	return snapshot
   131  }
   132  
   133  func (t *TestVolumeSnapshotClass) ReadyToUse(snapshot *v1alpha1.VolumeSnapshot) {
   134  	ginkgo.By("waiting for VolumeSnapshot to be ready to use - " + snapshot.Name)
   135  	err := wait.Poll(15*time.Second, 5*time.Minute, func() (bool, error) {
   136  		vs, err := snapshotclientset.New(t.client).VolumesnapshotV1alpha1().VolumeSnapshots(t.namespace.Name).Get(snapshot.Name, metav1.GetOptions{})
   137  		if err != nil {
   138  			return false, fmt.Errorf("did not see ReadyToUse: %v", err)
   139  		}
   140  		return vs.Status.ReadyToUse, nil
   141  	})
   142  	framework.ExpectNoError(err)
   143  }
   144  
   145  func (t *TestVolumeSnapshotClass) DeleteSnapshot(vs *v1alpha1.VolumeSnapshot) {
   146  	ginkgo.By("deleting a VolumeSnapshot " + vs.Name)
   147  	err := snapshotclientset.New(t.client).VolumesnapshotV1alpha1().VolumeSnapshots(t.namespace.Name).Delete(vs.Name, &metav1.DeleteOptions{})
   148  	framework.ExpectNoError(err)
   149  }
   150  
   151  func (t *TestVolumeSnapshotClass) Cleanup() {
   152  	e2elog.Logf("deleting VolumeSnapshotClass %s", t.volumeSnapshotClass.Name)
   153  	err := snapshotclientset.New(t.client).VolumesnapshotV1alpha1().VolumeSnapshotClasses().Delete(t.volumeSnapshotClass.Name, nil)
   154  	framework.ExpectNoError(err)
   155  }
   156  
   157  type TestPreProvisionedPersistentVolume struct {
   158  	client                    clientset.Interface
   159  	persistentVolume          *v1.PersistentVolume
   160  	requestedPersistentVolume *v1.PersistentVolume
   161  }
   162  
   163  func NewTestPreProvisionedPersistentVolume(c clientset.Interface, pv *v1.PersistentVolume) *TestPreProvisionedPersistentVolume {
   164  	return &TestPreProvisionedPersistentVolume{
   165  		client:                    c,
   166  		requestedPersistentVolume: pv,
   167  	}
   168  }
   169  
   170  func (pv *TestPreProvisionedPersistentVolume) Create() v1.PersistentVolume {
   171  	var err error
   172  	ginkgo.By("creating a PV")
   173  	pv.persistentVolume, err = pv.client.CoreV1().PersistentVolumes().Create(pv.requestedPersistentVolume)
   174  	framework.ExpectNoError(err)
   175  	return *pv.persistentVolume
   176  }
   177  
   178  type TestPersistentVolumeClaim struct {
   179  	client                         clientset.Interface
   180  	claimSize                      string
   181  	volumeMode                     v1.PersistentVolumeMode
   182  	storageClass                   *storagev1.StorageClass
   183  	namespace                      *v1.Namespace
   184  	persistentVolume               *v1.PersistentVolume
   185  	persistentVolumeClaim          *v1.PersistentVolumeClaim
   186  	requestedPersistentVolumeClaim *v1.PersistentVolumeClaim
   187  	dataSource                     *v1.TypedLocalObjectReference
   188  }
   189  
   190  func NewTestPersistentVolumeClaim(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass) *TestPersistentVolumeClaim {
   191  	mode := v1.PersistentVolumeFilesystem
   192  	if volumeMode == Block {
   193  		mode = v1.PersistentVolumeBlock
   194  	}
   195  	return &TestPersistentVolumeClaim{
   196  		client:       c,
   197  		claimSize:    claimSize,
   198  		volumeMode:   mode,
   199  		namespace:    ns,
   200  		storageClass: sc,
   201  	}
   202  }
   203  
   204  func NewTestPersistentVolumeClaimWithDataSource(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass, dataSource *v1.TypedLocalObjectReference) *TestPersistentVolumeClaim {
   205  	mode := v1.PersistentVolumeFilesystem
   206  	if volumeMode == Block {
   207  		mode = v1.PersistentVolumeBlock
   208  	}
   209  	return &TestPersistentVolumeClaim{
   210  		client:       c,
   211  		claimSize:    claimSize,
   212  		volumeMode:   mode,
   213  		namespace:    ns,
   214  		storageClass: sc,
   215  		dataSource:   dataSource,
   216  	}
   217  }
   218  
   219  func (t *TestPersistentVolumeClaim) Create() {
   220  	var err error
   221  
   222  	ginkgo.By("creating a PVC")
   223  	storageClassName := ""
   224  	if t.storageClass != nil {
   225  		storageClassName = t.storageClass.Name
   226  	}
   227  	t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode, t.dataSource)
   228  	t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(t.requestedPersistentVolumeClaim)
   229  	framework.ExpectNoError(err)
   230  }
   231  
   232  func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume() {
   233  	var err error
   234  
   235  	// Get the bound PersistentVolume
   236  	ginkgo.By("validating provisioned PV")
   237  	t.persistentVolume, err = t.client.CoreV1().PersistentVolumes().Get(t.persistentVolumeClaim.Spec.VolumeName, metav1.GetOptions{})
   238  	framework.ExpectNoError(err)
   239  
   240  	// Check sizes
   241  	expectedCapacity := t.requestedPersistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
   242  	claimCapacity := t.persistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
   243  	gomega.Expect(claimCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "claimCapacity is not equal to requestedCapacity")
   244  
   245  	pvCapacity := t.persistentVolume.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
   246  	gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "pvCapacity is not equal to requestedCapacity")
   247  
   248  	// Check PV properties
   249  	ginkgo.By("checking the PV")
   250  	expectedAccessModes := t.requestedPersistentVolumeClaim.Spec.AccessModes
   251  	gomega.Expect(t.persistentVolume.Spec.AccessModes).To(gomega.Equal(expectedAccessModes))
   252  	gomega.Expect(t.persistentVolume.Spec.ClaimRef.Name).To(gomega.Equal(t.persistentVolumeClaim.ObjectMeta.Name))
   253  	gomega.Expect(t.persistentVolume.Spec.ClaimRef.Namespace).To(gomega.Equal(t.persistentVolumeClaim.ObjectMeta.Namespace))
   254  	// If storageClass is nil, PV was pre-provisioned with these values already set
   255  	if t.storageClass != nil {
   256  		gomega.Expect(t.persistentVolume.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*t.storageClass.ReclaimPolicy))
   257  		gomega.Expect(t.persistentVolume.Spec.MountOptions).To(gomega.Equal(t.storageClass.MountOptions))
   258  		if *t.storageClass.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
   259  			gomega.Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values).
   260  				To(gomega.HaveLen(1))
   261  		}
   262  		if len(t.storageClass.AllowedTopologies) > 0 {
   263  			gomega.Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key).
   264  				To(gomega.Equal(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Key))
   265  			for _, v := range t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values {
   266  				gomega.Expect(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Values).To(gomega.ContainElement(v))
   267  			}
   268  
   269  		}
   270  	}
   271  }
   272  
   273  func (t *TestPersistentVolumeClaim) WaitForBound() v1.PersistentVolumeClaim {
   274  	var err error
   275  
   276  	ginkgo.By(fmt.Sprintf("waiting for PVC to be in phase %q", v1.ClaimBound))
   277  	err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
   278  	framework.ExpectNoError(err)
   279  
   280  	ginkgo.By("checking the PVC")
   281  	// Get new copy of the claim
   282  	t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Get(t.persistentVolumeClaim.Name, metav1.GetOptions{})
   283  	framework.ExpectNoError(err)
   284  
   285  	return *t.persistentVolumeClaim
   286  }
   287  
   288  func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.PersistentVolumeMode, dataSource *v1.TypedLocalObjectReference) *v1.PersistentVolumeClaim {
   289  	return &v1.PersistentVolumeClaim{
   290  		ObjectMeta: metav1.ObjectMeta{
   291  			GenerateName: "pvc-",
   292  			Namespace:    namespace,
   293  		},
   294  		Spec: v1.PersistentVolumeClaimSpec{
   295  			StorageClassName: &storageClassName,
   296  			AccessModes: []v1.PersistentVolumeAccessMode{
   297  				v1.ReadWriteOnce,
   298  			},
   299  			Resources: v1.ResourceRequirements{
   300  				Requests: v1.ResourceList{
   301  					v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
   302  				},
   303  			},
   304  			VolumeMode: &volumeMode,
   305  			DataSource: dataSource,
   306  		},
   307  	}
   308  }
   309  
   310  func (t *TestPersistentVolumeClaim) Cleanup() {
   311  	e2elog.Logf("deleting PVC %q/%q", t.namespace.Name, t.persistentVolumeClaim.Name)
   312  	err := framework.DeletePersistentVolumeClaim(t.client, t.persistentVolumeClaim.Name, t.namespace.Name)
   313  	framework.ExpectNoError(err)
   314  	// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
   315  	// Retain, there's no use waiting because the PV won't be auto-deleted and
   316  	// it's expected for the caller to do it.) Technically, the first few delete
   317  	// attempts may fail, as the volume is still attached to a node because
   318  	// kubelet is slowly cleaning up the previous pod, however it should succeed
   319  	// in a couple of minutes.
   320  	if t.persistentVolume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
   321  		ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
   322  		err := framework.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
   323  		framework.ExpectNoError(err)
   324  	}
   325  	// Wait for the PVC to be deleted
   326  	err = framework.WaitForPersistentVolumeClaimDeleted(t.client, t.persistentVolumeClaim.Name, t.namespace.Name, 5*time.Second, 5*time.Minute)
   327  	framework.ExpectNoError(err)
   328  }
   329  
   330  func (t *TestPersistentVolumeClaim) ReclaimPolicy() v1.PersistentVolumeReclaimPolicy {
   331  	return t.persistentVolume.Spec.PersistentVolumeReclaimPolicy
   332  }
   333  
   334  func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase) {
   335  	err := framework.WaitForPersistentVolumePhase(phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
   336  	framework.ExpectNoError(err)
   337  }
   338  
   339  func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume() {
   340  	ginkgo.By(fmt.Sprintf("deleting PV %q", t.persistentVolume.Name))
   341  	err := framework.DeletePersistentVolume(t.client, t.persistentVolume.Name)
   342  	framework.ExpectNoError(err)
   343  	ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
   344  	err = framework.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
   345  	framework.ExpectNoError(err)
   346  }
   347  
   348  func (t *TestPersistentVolumeClaim) DeleteBackingVolume(azfile *blobfuse.Driver) {
   349  	volumeID := t.persistentVolume.Spec.CSI.VolumeHandle
   350  	ginkgo.By(fmt.Sprintf("deleting blobfuse volume %q", volumeID))
   351  	req := &csi.DeleteVolumeRequest{
   352  		VolumeId: volumeID,
   353  	}
   354  	_, err := azfile.DeleteVolume(context.Background(), req)
   355  	if err != nil {
   356  		ginkgo.Fail(fmt.Sprintf("could not delete volume %q: %v", volumeID, err))
   357  	}
   358  }
   359  
   360  type TestDeployment struct {
   361  	client     clientset.Interface
   362  	deployment *apps.Deployment
   363  	namespace  *v1.Namespace
   364  	podName    string
   365  }
   366  
   367  func NewTestDeployment(c clientset.Interface, ns *v1.Namespace, command string, pvc *v1.PersistentVolumeClaim, volumeName, mountPath string, readOnly bool) *TestDeployment {
   368  	generateName := "blobfuse-volume-tester-"
   369  	selectorValue := fmt.Sprintf("%s%d", generateName, rand.Int())
   370  	replicas := int32(1)
   371  	return &TestDeployment{
   372  		client:    c,
   373  		namespace: ns,
   374  		deployment: &apps.Deployment{
   375  			ObjectMeta: metav1.ObjectMeta{
   376  				GenerateName: generateName,
   377  			},
   378  			Spec: apps.DeploymentSpec{
   379  				Replicas: &replicas,
   380  				Selector: &metav1.LabelSelector{
   381  					MatchLabels: map[string]string{"app": selectorValue},
   382  				},
   383  				Template: v1.PodTemplateSpec{
   384  					ObjectMeta: metav1.ObjectMeta{
   385  						Labels: map[string]string{"app": selectorValue},
   386  					},
   387  					Spec: v1.PodSpec{
   388  						Containers: []v1.Container{
   389  							{
   390  								Name:    "volume-tester",
   391  								Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   392  								Command: []string{"/bin/sh"},
   393  								Args:    []string{"-c", command},
   394  								VolumeMounts: []v1.VolumeMount{
   395  									{
   396  										Name:      volumeName,
   397  										MountPath: mountPath,
   398  										ReadOnly:  readOnly,
   399  									},
   400  								},
   401  							},
   402  						},
   403  						RestartPolicy: v1.RestartPolicyAlways,
   404  						Volumes: []v1.Volume{
   405  							{
   406  								Name: volumeName,
   407  								VolumeSource: v1.VolumeSource{
   408  									PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   409  										ClaimName: pvc.Name,
   410  									},
   411  								},
   412  							},
   413  						},
   414  					},
   415  				},
   416  			},
   417  		},
   418  	}
   419  }
   420  
   421  func (t *TestDeployment) Create() {
   422  	var err error
   423  	t.deployment, err = t.client.AppsV1().Deployments(t.namespace.Name).Create(t.deployment)
   424  	framework.ExpectNoError(err)
   425  	err = testutil.WaitForDeploymentComplete(t.client, t.deployment, e2elog.Logf, poll, pollLongTimeout)
   426  	framework.ExpectNoError(err)
   427  	pods, err := getPodsForDeployment(t.client, t.deployment)
   428  	framework.ExpectNoError(err)
   429  	// always get first pod as there should only be one
   430  	t.podName = pods.Items[0].Name
   431  }
   432  
   433  func (t *TestDeployment) WaitForPodReady() {
   434  	pods, err := getPodsForDeployment(t.client, t.deployment)
   435  	framework.ExpectNoError(err)
   436  	// always get first pod as there should only be one
   437  	pod := pods.Items[0]
   438  	t.podName = pod.Name
   439  	err = framework.WaitForPodRunningInNamespace(t.client, &pod)
   440  	framework.ExpectNoError(err)
   441  }
   442  
   443  func (t *TestDeployment) Exec(command []string, expectedString string) {
   444  	_, err := framework.LookForStringInPodExec(t.namespace.Name, t.podName, command, expectedString, execTimeout)
   445  	framework.ExpectNoError(err)
   446  }
   447  
   448  func (t *TestDeployment) DeletePodAndWait() {
   449  	e2elog.Logf("Deleting pod %q in namespace %q", t.podName, t.namespace.Name)
   450  	err := t.client.CoreV1().Pods(t.namespace.Name).Delete(t.podName, nil)
   451  	if err != nil {
   452  		if !apierrs.IsNotFound(err) {
   453  			framework.ExpectNoError(fmt.Errorf("pod %q Delete API error: %v", t.podName, err))
   454  		}
   455  		return
   456  	}
   457  	e2elog.Logf("Waiting for pod %q in namespace %q to be fully deleted", t.podName, t.namespace.Name)
   458  	err = framework.WaitForPodNoLongerRunningInNamespace(t.client, t.podName, t.namespace.Name)
   459  	if err != nil {
   460  		if !apierrs.IsNotFound(err) {
   461  			framework.ExpectNoError(fmt.Errorf("pod %q error waiting for delete: %v", t.podName, err))
   462  		}
   463  	}
   464  }
   465  
   466  func (t *TestDeployment) Cleanup() {
   467  	e2elog.Logf("deleting Deployment %q/%q", t.namespace.Name, t.deployment.Name)
   468  	body, err := t.Logs()
   469  	if err != nil {
   470  		e2elog.Logf("Error getting logs for pod %s: %v", t.podName, err)
   471  	} else {
   472  		e2elog.Logf("Pod %s has the following logs: %s", t.podName, body)
   473  	}
   474  	err = t.client.AppsV1().Deployments(t.namespace.Name).Delete(t.deployment.Name, nil)
   475  	framework.ExpectNoError(err)
   476  }
   477  
   478  func (t *TestDeployment) Logs() ([]byte, error) {
   479  	return podLogs(t.client, t.podName, t.namespace.Name)
   480  }
   481  
   482  type TestPod struct {
   483  	client    clientset.Interface
   484  	pod       *v1.Pod
   485  	namespace *v1.Namespace
   486  }
   487  
   488  func NewTestPod(c clientset.Interface, ns *v1.Namespace, command string) *TestPod {
   489  	return &TestPod{
   490  		client:    c,
   491  		namespace: ns,
   492  		pod: &v1.Pod{
   493  			ObjectMeta: metav1.ObjectMeta{
   494  				GenerateName: "blobfuse-volume-tester-",
   495  			},
   496  			Spec: v1.PodSpec{
   497  				Containers: []v1.Container{
   498  					{
   499  						Name:         "volume-tester",
   500  						Image:        imageutils.GetE2EImage(imageutils.BusyBox),
   501  						Command:      []string{"/bin/sh"},
   502  						Args:         []string{"-c", command},
   503  						VolumeMounts: make([]v1.VolumeMount, 0),
   504  					},
   505  				},
   506  				RestartPolicy: v1.RestartPolicyNever,
   507  				Volumes:       make([]v1.Volume, 0),
   508  			},
   509  		},
   510  	}
   511  }
   512  
   513  func (t *TestPod) Create() {
   514  	var err error
   515  
   516  	t.pod, err = t.client.CoreV1().Pods(t.namespace.Name).Create(t.pod)
   517  	framework.ExpectNoError(err)
   518  }
   519  
   520  func (t *TestPod) WaitForSuccess() {
   521  	err := framework.WaitForPodSuccessInNamespaceSlow(t.client, t.pod.Name, t.namespace.Name)
   522  	framework.ExpectNoError(err)
   523  }
   524  
   525  func (t *TestPod) WaitForRunning() {
   526  	err := framework.WaitForPodRunningInNamespace(t.client, t.pod)
   527  	framework.ExpectNoError(err)
   528  }
   529  
   530  // Ideally this would be in "k8s.io/kubernetes/test/e2e/framework"
   531  // Similar to framework.WaitForPodSuccessInNamespaceSlow
   532  var podFailedCondition = func(pod *v1.Pod) (bool, error) {
   533  	switch pod.Status.Phase {
   534  	case v1.PodFailed:
   535  		ginkgo.By("Saw pod failure")
   536  		return true, nil
   537  	case v1.PodSucceeded:
   538  		return true, fmt.Errorf("pod %q successed with reason: %q, message: %q", pod.Name, pod.Status.Reason, pod.Status.Message)
   539  	default:
   540  		return false, nil
   541  	}
   542  }
   543  
   544  func (t *TestPod) WaitForFailure() {
   545  	err := framework.WaitForPodCondition(t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition)
   546  	framework.ExpectNoError(err)
   547  }
   548  
   549  func (t *TestPod) SetupVolume(pvc *v1.PersistentVolumeClaim, name, mountPath string, readOnly bool) {
   550  	volumeMount := v1.VolumeMount{
   551  		Name:      name,
   552  		MountPath: mountPath,
   553  		ReadOnly:  readOnly,
   554  	}
   555  	t.pod.Spec.Containers[0].VolumeMounts = append(t.pod.Spec.Containers[0].VolumeMounts, volumeMount)
   556  
   557  	volume := v1.Volume{
   558  		Name: name,
   559  		VolumeSource: v1.VolumeSource{
   560  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   561  				ClaimName: pvc.Name,
   562  			},
   563  		},
   564  	}
   565  	t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
   566  }
   567  
   568  func (t *TestPod) SetupRawBlockVolume(pvc *v1.PersistentVolumeClaim, name, devicePath string) {
   569  	volumeDevice := v1.VolumeDevice{
   570  		Name:       name,
   571  		DevicePath: devicePath,
   572  	}
   573  	t.pod.Spec.Containers[0].VolumeDevices = append(t.pod.Spec.Containers[0].VolumeDevices, volumeDevice)
   574  
   575  	volume := v1.Volume{
   576  		Name: name,
   577  		VolumeSource: v1.VolumeSource{
   578  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   579  				ClaimName: pvc.Name,
   580  			},
   581  		},
   582  	}
   583  	t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
   584  }
   585  
   586  func (t *TestPod) SetNodeSelector(nodeSelector map[string]string) {
   587  	t.pod.Spec.NodeSelector = nodeSelector
   588  }
   589  
   590  func (t *TestPod) Cleanup() {
   591  	cleanupPodOrFail(t.client, t.pod.Name, t.namespace.Name)
   592  }
   593  
   594  func (t *TestPod) Logs() ([]byte, error) {
   595  	return podLogs(t.client, t.pod.Name, t.namespace.Name)
   596  }
   597  
   598  type TestSecret struct {
   599  	client    clientset.Interface
   600  	secret    *v1.Secret
   601  	namespace *v1.Namespace
   602  }
   603  
   604  func NewTestSecret(c clientset.Interface, ns *v1.Namespace, name string, data map[string]string) *TestSecret {
   605  	return &TestSecret{
   606  		client:    c,
   607  		namespace: ns,
   608  		secret: &v1.Secret{
   609  			ObjectMeta: metav1.ObjectMeta{
   610  				Name: name,
   611  			},
   612  			StringData: data,
   613  			Type:       v1.SecretTypeOpaque,
   614  		},
   615  	}
   616  }
   617  
   618  func (t *TestSecret) Create() {
   619  	var err error
   620  	t.secret, err = t.client.CoreV1().Secrets(t.namespace.Name).Create(t.secret)
   621  	framework.ExpectNoError(err)
   622  }
   623  
   624  func (t *TestSecret) Cleanup() {
   625  	e2elog.Logf("deleting Secret %s", t.secret.Name)
   626  	err := t.client.CoreV1().Secrets(t.namespace.Name).Delete(t.secret.Name, nil)
   627  	framework.ExpectNoError(err)
   628  }
   629  
   630  func cleanupPodOrFail(client clientset.Interface, name, namespace string) {
   631  	e2elog.Logf("deleting Pod %q/%q", namespace, name)
   632  	body, err := podLogs(client, name, namespace)
   633  	if err != nil {
   634  		e2elog.Logf("Error getting logs for pod %s: %v", name, err)
   635  	} else {
   636  		e2elog.Logf("Pod %s has the following logs: %s", name, body)
   637  	}
   638  	framework.DeletePodOrFail(client, namespace, name)
   639  }
   640  
   641  func podLogs(client clientset.Interface, name, namespace string) ([]byte, error) {
   642  	return client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Do().Raw()
   643  }
   644  
   645  func getPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
   646  	replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
   647  	if err != nil {
   648  		return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
   649  	}
   650  	if replicaSet == nil {
   651  		return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
   652  	}
   653  	podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
   654  		return client.CoreV1().Pods(namespace).List(options)
   655  	}
   656  	rsList := []*apps.ReplicaSet{replicaSet}
   657  	podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
   658  	if err != nil {
   659  		return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
   660  	}
   661  	return podList, nil
   662  }