github.com/jmclong/azuredisk-csi-driver@v0.7.0/test/e2e/testsuites/testsuites.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/rand"
    23  	"time"
    24  
    25  	"sigs.k8s.io/azuredisk-csi-driver/pkg/azuredisk"
    26  
    27  	"github.com/container-storage-interface/spec/lib/go/csi"
    28  	"github.com/kubernetes-csi/external-snapshotter/pkg/apis/volumesnapshot/v1beta1"
    29  	snapshotclientset "github.com/kubernetes-csi/external-snapshotter/pkg/client/clientset/versioned"
    30  	"github.com/onsi/ginkgo"
    31  	"github.com/onsi/gomega"
    32  	apps "k8s.io/api/apps/v1"
    33  	v1 "k8s.io/api/core/v1"
    34  	storagev1 "k8s.io/api/storage/v1"
    35  	apierrs "k8s.io/apimachinery/pkg/api/errors"
    36  	"k8s.io/apimachinery/pkg/api/resource"
    37  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    38  	"k8s.io/apimachinery/pkg/fields"
    39  	"k8s.io/apimachinery/pkg/util/wait"
    40  	clientset "k8s.io/client-go/kubernetes"
    41  	restclientset "k8s.io/client-go/rest"
    42  	deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
    43  	"k8s.io/kubernetes/pkg/kubelet/events"
    44  	"k8s.io/kubernetes/test/e2e/framework"
    45  	e2elog "k8s.io/kubernetes/test/e2e/framework/log"
    46  	testutil "k8s.io/kubernetes/test/utils"
    47  	imageutils "k8s.io/kubernetes/test/utils/image"
    48  )
    49  
    50  const (
    51  	execTimeout = 10 * time.Second
    52  	// Some pods can take much longer to get ready due to volume attach/detach latency.
    53  	slowPodStartTimeout = 10 * time.Minute
    54  	// Description that will printed during tests
    55  	failedConditionDescription = "Error status code"
    56  
    57  	poll            = 2 * time.Second
    58  	pollLongTimeout = 5 * time.Minute
    59  )
    60  
    61  type TestStorageClass struct {
    62  	client       clientset.Interface
    63  	storageClass *storagev1.StorageClass
    64  	namespace    *v1.Namespace
    65  }
    66  
    67  func NewTestStorageClass(c clientset.Interface, ns *v1.Namespace, sc *storagev1.StorageClass) *TestStorageClass {
    68  	return &TestStorageClass{
    69  		client:       c,
    70  		storageClass: sc,
    71  		namespace:    ns,
    72  	}
    73  }
    74  
    75  func (t *TestStorageClass) Create() storagev1.StorageClass {
    76  	var err error
    77  
    78  	ginkgo.By("creating a StorageClass " + t.storageClass.Name)
    79  	t.storageClass, err = t.client.StorageV1().StorageClasses().Create(t.storageClass)
    80  	framework.ExpectNoError(err)
    81  	return *t.storageClass
    82  }
    83  
    84  func (t *TestStorageClass) Cleanup() {
    85  	e2elog.Logf("deleting StorageClass %s", t.storageClass.Name)
    86  	err := t.client.StorageV1().StorageClasses().Delete(t.storageClass.Name, nil)
    87  	framework.ExpectNoError(err)
    88  }
    89  
    90  type TestVolumeSnapshotClass struct {
    91  	client              restclientset.Interface
    92  	volumeSnapshotClass *v1beta1.VolumeSnapshotClass
    93  	namespace           *v1.Namespace
    94  }
    95  
    96  func NewTestVolumeSnapshotClass(c restclientset.Interface, ns *v1.Namespace, vsc *v1beta1.VolumeSnapshotClass) *TestVolumeSnapshotClass {
    97  	return &TestVolumeSnapshotClass{
    98  		client:              c,
    99  		volumeSnapshotClass: vsc,
   100  		namespace:           ns,
   101  	}
   102  }
   103  
   104  func (t *TestVolumeSnapshotClass) Create() {
   105  	ginkgo.By("creating a VolumeSnapshotClass")
   106  	var err error
   107  	t.volumeSnapshotClass, err = snapshotclientset.New(t.client).SnapshotV1beta1().VolumeSnapshotClasses().Create(t.volumeSnapshotClass)
   108  	framework.ExpectNoError(err)
   109  }
   110  
   111  func (t *TestVolumeSnapshotClass) CreateSnapshot(pvc *v1.PersistentVolumeClaim) *v1beta1.VolumeSnapshot {
   112  	ginkgo.By("creating a VolumeSnapshot for " + pvc.Name)
   113  	snapshot := &v1beta1.VolumeSnapshot{
   114  		TypeMeta: metav1.TypeMeta{
   115  			Kind:       VolumeSnapshotKind,
   116  			APIVersion: SnapshotAPIVersion,
   117  		},
   118  		ObjectMeta: metav1.ObjectMeta{
   119  			GenerateName: "volume-snapshot-",
   120  			Namespace:    t.namespace.Name,
   121  		},
   122  		Spec: v1beta1.VolumeSnapshotSpec{
   123  			VolumeSnapshotClassName: &t.volumeSnapshotClass.Name,
   124  			Source: v1beta1.VolumeSnapshotSource{
   125  				PersistentVolumeClaimName: &pvc.Name,
   126  			},
   127  		},
   128  	}
   129  	snapshot, err := snapshotclientset.New(t.client).SnapshotV1beta1().VolumeSnapshots(t.namespace.Name).Create(snapshot)
   130  	framework.ExpectNoError(err)
   131  	return snapshot
   132  }
   133  
   134  func (t *TestVolumeSnapshotClass) ReadyToUse(snapshot *v1beta1.VolumeSnapshot) {
   135  	ginkgo.By("waiting for VolumeSnapshot to be ready to use - " + snapshot.Name)
   136  	err := wait.Poll(15*time.Second, 5*time.Minute, func() (bool, error) {
   137  		vs, err := snapshotclientset.New(t.client).SnapshotV1beta1().VolumeSnapshots(t.namespace.Name).Get(snapshot.Name, metav1.GetOptions{})
   138  		if err != nil {
   139  			return false, fmt.Errorf("did not see ReadyToUse: %v", err)
   140  		}
   141  		return *vs.Status.ReadyToUse, nil
   142  	})
   143  	framework.ExpectNoError(err)
   144  }
   145  
   146  func (t *TestVolumeSnapshotClass) DeleteSnapshot(vs *v1beta1.VolumeSnapshot) {
   147  	ginkgo.By("deleting a VolumeSnapshot " + vs.Name)
   148  	err := snapshotclientset.New(t.client).SnapshotV1beta1().VolumeSnapshots(t.namespace.Name).Delete(vs.Name, &metav1.DeleteOptions{})
   149  	framework.ExpectNoError(err)
   150  }
   151  
   152  func (t *TestVolumeSnapshotClass) Cleanup() {
   153  	// skip deleting volume snapshot storage class otherwise snapshot e2e test will fail, details:
   154  	// https://github.com/kubernetes-sigs/azuredisk-csi-driver/pull/260#issuecomment-583296932
   155  	e2elog.Logf("skip deleting VolumeSnapshotClass %s", t.volumeSnapshotClass.Name)
   156  	//err := snapshotclientset.New(t.client).SnapshotV1beta1().VolumeSnapshotClasses().Delete(t.volumeSnapshotClass.Name, nil)
   157  	//framework.ExpectNoError(err)
   158  }
   159  
   160  type TestPreProvisionedPersistentVolume struct {
   161  	client                    clientset.Interface
   162  	persistentVolume          *v1.PersistentVolume
   163  	requestedPersistentVolume *v1.PersistentVolume
   164  }
   165  
   166  func NewTestPreProvisionedPersistentVolume(c clientset.Interface, pv *v1.PersistentVolume) *TestPreProvisionedPersistentVolume {
   167  	return &TestPreProvisionedPersistentVolume{
   168  		client:                    c,
   169  		requestedPersistentVolume: pv,
   170  	}
   171  }
   172  
   173  func (pv *TestPreProvisionedPersistentVolume) Create() v1.PersistentVolume {
   174  	var err error
   175  	ginkgo.By("creating a PV")
   176  	pv.persistentVolume, err = pv.client.CoreV1().PersistentVolumes().Create(pv.requestedPersistentVolume)
   177  	framework.ExpectNoError(err)
   178  	return *pv.persistentVolume
   179  }
   180  
   181  type TestPersistentVolumeClaim struct {
   182  	client                         clientset.Interface
   183  	claimSize                      string
   184  	volumeMode                     v1.PersistentVolumeMode
   185  	storageClass                   *storagev1.StorageClass
   186  	namespace                      *v1.Namespace
   187  	persistentVolume               *v1.PersistentVolume
   188  	persistentVolumeClaim          *v1.PersistentVolumeClaim
   189  	requestedPersistentVolumeClaim *v1.PersistentVolumeClaim
   190  	dataSource                     *v1.TypedLocalObjectReference
   191  }
   192  
   193  func NewTestPersistentVolumeClaim(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass) *TestPersistentVolumeClaim {
   194  	mode := v1.PersistentVolumeFilesystem
   195  	if volumeMode == Block {
   196  		mode = v1.PersistentVolumeBlock
   197  	}
   198  	return &TestPersistentVolumeClaim{
   199  		client:       c,
   200  		claimSize:    claimSize,
   201  		volumeMode:   mode,
   202  		namespace:    ns,
   203  		storageClass: sc,
   204  	}
   205  }
   206  
   207  func NewTestPersistentVolumeClaimWithDataSource(c clientset.Interface, ns *v1.Namespace, claimSize string, volumeMode VolumeMode, sc *storagev1.StorageClass, dataSource *v1.TypedLocalObjectReference) *TestPersistentVolumeClaim {
   208  	mode := v1.PersistentVolumeFilesystem
   209  	if volumeMode == Block {
   210  		mode = v1.PersistentVolumeBlock
   211  	}
   212  	return &TestPersistentVolumeClaim{
   213  		client:       c,
   214  		claimSize:    claimSize,
   215  		volumeMode:   mode,
   216  		namespace:    ns,
   217  		storageClass: sc,
   218  		dataSource:   dataSource,
   219  	}
   220  }
   221  
   222  func (t *TestPersistentVolumeClaim) Create() {
   223  	var err error
   224  
   225  	ginkgo.By("creating a PVC")
   226  	storageClassName := ""
   227  	if t.storageClass != nil {
   228  		storageClassName = t.storageClass.Name
   229  	}
   230  	t.requestedPersistentVolumeClaim = generatePVC(t.namespace.Name, storageClassName, t.claimSize, t.volumeMode, t.dataSource)
   231  	t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Create(t.requestedPersistentVolumeClaim)
   232  	framework.ExpectNoError(err)
   233  }
   234  
   235  func (t *TestPersistentVolumeClaim) ValidateProvisionedPersistentVolume() {
   236  	var err error
   237  
   238  	// Get the bound PersistentVolume
   239  	ginkgo.By("validating provisioned PV")
   240  	t.persistentVolume, err = t.client.CoreV1().PersistentVolumes().Get(t.persistentVolumeClaim.Spec.VolumeName, metav1.GetOptions{})
   241  	framework.ExpectNoError(err)
   242  
   243  	// Check sizes
   244  	expectedCapacity := t.requestedPersistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
   245  	claimCapacity := t.persistentVolumeClaim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
   246  	gomega.Expect(claimCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "claimCapacity is not equal to requestedCapacity")
   247  
   248  	pvCapacity := t.persistentVolume.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
   249  	gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value()), "pvCapacity is not equal to requestedCapacity")
   250  
   251  	// Check PV properties
   252  	ginkgo.By("checking the PV")
   253  	expectedAccessModes := t.requestedPersistentVolumeClaim.Spec.AccessModes
   254  	gomega.Expect(t.persistentVolume.Spec.AccessModes).To(gomega.Equal(expectedAccessModes))
   255  	gomega.Expect(t.persistentVolume.Spec.ClaimRef.Name).To(gomega.Equal(t.persistentVolumeClaim.ObjectMeta.Name))
   256  	gomega.Expect(t.persistentVolume.Spec.ClaimRef.Namespace).To(gomega.Equal(t.persistentVolumeClaim.ObjectMeta.Namespace))
   257  	// If storageClass is nil, PV was pre-provisioned with these values already set
   258  	if t.storageClass != nil {
   259  		gomega.Expect(t.persistentVolume.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*t.storageClass.ReclaimPolicy))
   260  		gomega.Expect(t.persistentVolume.Spec.MountOptions).To(gomega.Equal(t.storageClass.MountOptions))
   261  		if *t.storageClass.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
   262  			gomega.Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values).
   263  				To(gomega.HaveLen(1))
   264  		}
   265  		if len(t.storageClass.AllowedTopologies) > 0 {
   266  			gomega.Expect(t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Key).
   267  				To(gomega.Equal(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Key))
   268  			for _, v := range t.persistentVolume.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0].Values {
   269  				gomega.Expect(t.storageClass.AllowedTopologies[0].MatchLabelExpressions[0].Values).To(gomega.ContainElement(v))
   270  			}
   271  
   272  		}
   273  	}
   274  }
   275  
   276  func (t *TestPersistentVolumeClaim) WaitForBound() v1.PersistentVolumeClaim {
   277  	var err error
   278  
   279  	ginkgo.By(fmt.Sprintf("waiting for PVC to be in phase %q", v1.ClaimBound))
   280  	err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.client, t.namespace.Name, t.persistentVolumeClaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
   281  	framework.ExpectNoError(err)
   282  
   283  	ginkgo.By("checking the PVC")
   284  	// Get new copy of the claim
   285  	t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Get(t.persistentVolumeClaim.Name, metav1.GetOptions{})
   286  	framework.ExpectNoError(err)
   287  
   288  	return *t.persistentVolumeClaim
   289  }
   290  
   291  func generatePVC(namespace, storageClassName, claimSize string, volumeMode v1.PersistentVolumeMode, dataSource *v1.TypedLocalObjectReference) *v1.PersistentVolumeClaim {
   292  	return &v1.PersistentVolumeClaim{
   293  		ObjectMeta: metav1.ObjectMeta{
   294  			GenerateName: "pvc-",
   295  			Namespace:    namespace,
   296  		},
   297  		Spec: v1.PersistentVolumeClaimSpec{
   298  			StorageClassName: &storageClassName,
   299  			AccessModes: []v1.PersistentVolumeAccessMode{
   300  				v1.ReadWriteOnce,
   301  			},
   302  			Resources: v1.ResourceRequirements{
   303  				Requests: v1.ResourceList{
   304  					v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
   305  				},
   306  			},
   307  			VolumeMode: &volumeMode,
   308  			DataSource: dataSource,
   309  		},
   310  	}
   311  }
   312  
   313  func (t *TestPersistentVolumeClaim) Cleanup() {
   314  	// Since PV is created after pod creation when the volume binding mode is WaitForFirstConsumer,
   315  	// we need to populate fields such as PVC and PV info in TestPersistentVolumeClaim, and valid it
   316  	if t.storageClass != nil && *t.storageClass.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
   317  		var err error
   318  		t.persistentVolumeClaim, err = t.client.CoreV1().PersistentVolumeClaims(t.namespace.Name).Get(t.persistentVolumeClaim.Name, metav1.GetOptions{})
   319  		framework.ExpectNoError(err)
   320  		t.ValidateProvisionedPersistentVolume()
   321  	}
   322  	e2elog.Logf("deleting PVC %q/%q", t.namespace.Name, t.persistentVolumeClaim.Name)
   323  	err := framework.DeletePersistentVolumeClaim(t.client, t.persistentVolumeClaim.Name, t.namespace.Name)
   324  	framework.ExpectNoError(err)
   325  	// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
   326  	// Retain, there's no use waiting because the PV won't be auto-deleted and
   327  	// it's expected for the caller to do it.) Technically, the first few delete
   328  	// attempts may fail, as the volume is still attached to a node because
   329  	// kubelet is slowly cleaning up the previous pod, however it should succeed
   330  	// in a couple of minutes.
   331  	if t.persistentVolume.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
   332  		ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
   333  		err := framework.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
   334  		framework.ExpectNoError(err)
   335  	}
   336  	// Wait for the PVC to be deleted
   337  	err = framework.WaitForPersistentVolumeClaimDeleted(t.client, t.persistentVolumeClaim.Name, t.namespace.Name, 5*time.Second, 5*time.Minute)
   338  	framework.ExpectNoError(err)
   339  }
   340  
   341  func (t *TestPersistentVolumeClaim) ReclaimPolicy() v1.PersistentVolumeReclaimPolicy {
   342  	return t.persistentVolume.Spec.PersistentVolumeReclaimPolicy
   343  }
   344  
   345  func (t *TestPersistentVolumeClaim) WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase) {
   346  	err := framework.WaitForPersistentVolumePhase(phase, t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
   347  	framework.ExpectNoError(err)
   348  }
   349  
   350  func (t *TestPersistentVolumeClaim) DeleteBoundPersistentVolume() {
   351  	ginkgo.By(fmt.Sprintf("deleting PV %q", t.persistentVolume.Name))
   352  	err := framework.DeletePersistentVolume(t.client, t.persistentVolume.Name)
   353  	framework.ExpectNoError(err)
   354  	ginkgo.By(fmt.Sprintf("waiting for claim's PV %q to be deleted", t.persistentVolume.Name))
   355  	err = framework.WaitForPersistentVolumeDeleted(t.client, t.persistentVolume.Name, 5*time.Second, 10*time.Minute)
   356  	framework.ExpectNoError(err)
   357  }
   358  
   359  func (t *TestPersistentVolumeClaim) DeleteBackingVolume(driver *azuredisk.Driver) {
   360  	volumeID := t.persistentVolume.Spec.CSI.VolumeHandle
   361  	ginkgo.By(fmt.Sprintf("deleting azuredisk volume %q", volumeID))
   362  	req := &csi.DeleteVolumeRequest{
   363  		VolumeId: volumeID,
   364  	}
   365  	_, err := driver.DeleteVolume(context.Background(), req)
   366  	if err != nil {
   367  		ginkgo.Fail(fmt.Sprintf("could not delete volume %q: %v", volumeID, err))
   368  	}
   369  }
   370  
   371  type TestDeployment struct {
   372  	client     clientset.Interface
   373  	deployment *apps.Deployment
   374  	namespace  *v1.Namespace
   375  	podName    string
   376  }
   377  
   378  func NewTestDeployment(c clientset.Interface, ns *v1.Namespace, command string, pvc *v1.PersistentVolumeClaim, volumeName, mountPath string, readOnly, isWindows bool) *TestDeployment {
   379  	generateName := "azuredisk-volume-tester-"
   380  	selectorValue := fmt.Sprintf("%s%d", generateName, rand.Int())
   381  	replicas := int32(1)
   382  	testDeployment := &TestDeployment{
   383  		client:    c,
   384  		namespace: ns,
   385  		deployment: &apps.Deployment{
   386  			ObjectMeta: metav1.ObjectMeta{
   387  				GenerateName: generateName,
   388  			},
   389  			Spec: apps.DeploymentSpec{
   390  				Replicas: &replicas,
   391  				Selector: &metav1.LabelSelector{
   392  					MatchLabels: map[string]string{"app": selectorValue},
   393  				},
   394  				Template: v1.PodTemplateSpec{
   395  					ObjectMeta: metav1.ObjectMeta{
   396  						Labels: map[string]string{"app": selectorValue},
   397  					},
   398  					Spec: v1.PodSpec{
   399  						Containers: []v1.Container{
   400  							{
   401  								Name:    "volume-tester",
   402  								Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   403  								Command: []string{"/bin/sh"},
   404  								Args:    []string{"-c", command},
   405  								VolumeMounts: []v1.VolumeMount{
   406  									{
   407  										Name:      volumeName,
   408  										MountPath: mountPath,
   409  										ReadOnly:  readOnly,
   410  									},
   411  								},
   412  							},
   413  						},
   414  						RestartPolicy: v1.RestartPolicyAlways,
   415  						Volumes: []v1.Volume{
   416  							{
   417  								Name: volumeName,
   418  								VolumeSource: v1.VolumeSource{
   419  									PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   420  										ClaimName: pvc.Name,
   421  									},
   422  								},
   423  							},
   424  						},
   425  					},
   426  				},
   427  			},
   428  		},
   429  	}
   430  
   431  	if isWindows {
   432  		testDeployment.deployment.Spec.Template.Spec.NodeSelector = map[string]string{
   433  			"kubernetes.io/os": "windows",
   434  		}
   435  		testDeployment.deployment.Spec.Template.Spec.Containers[0].Image = "e2eteam/busybox:1.29"
   436  		testDeployment.deployment.Spec.Template.Spec.Containers[0].Command = []string{"powershell.exe"}
   437  		testDeployment.deployment.Spec.Template.Spec.Containers[0].Args = []string{command}
   438  	}
   439  
   440  	return testDeployment
   441  }
   442  
   443  func (t *TestDeployment) Create() {
   444  	var err error
   445  	t.deployment, err = t.client.AppsV1().Deployments(t.namespace.Name).Create(t.deployment)
   446  	framework.ExpectNoError(err)
   447  	err = testutil.WaitForDeploymentComplete(t.client, t.deployment, e2elog.Logf, poll, pollLongTimeout)
   448  	framework.ExpectNoError(err)
   449  	pods, err := getPodsForDeployment(t.client, t.deployment)
   450  	framework.ExpectNoError(err)
   451  	// always get first pod as there should only be one
   452  	t.podName = pods.Items[0].Name
   453  }
   454  
   455  func (t *TestDeployment) WaitForPodReady() {
   456  	pods, err := getPodsForDeployment(t.client, t.deployment)
   457  	framework.ExpectNoError(err)
   458  	// always get first pod as there should only be one
   459  	pod := pods.Items[0]
   460  	t.podName = pod.Name
   461  	err = framework.WaitForPodRunningInNamespace(t.client, &pod)
   462  	framework.ExpectNoError(err)
   463  }
   464  
   465  func (t *TestDeployment) Exec(command []string, expectedString string) {
   466  	_, err := framework.LookForStringInPodExec(t.namespace.Name, t.podName, command, expectedString, execTimeout)
   467  	framework.ExpectNoError(err)
   468  }
   469  
   470  func (t *TestDeployment) DeletePodAndWait() {
   471  	e2elog.Logf("Deleting pod %q in namespace %q", t.podName, t.namespace.Name)
   472  	err := t.client.CoreV1().Pods(t.namespace.Name).Delete(t.podName, nil)
   473  	if err != nil {
   474  		if !apierrs.IsNotFound(err) {
   475  			framework.ExpectNoError(fmt.Errorf("pod %q Delete API error: %v", t.podName, err))
   476  		}
   477  		return
   478  	}
   479  	e2elog.Logf("Waiting for pod %q in namespace %q to be fully deleted", t.podName, t.namespace.Name)
   480  	err = framework.WaitForPodNoLongerRunningInNamespace(t.client, t.podName, t.namespace.Name)
   481  	if err != nil {
   482  		if !apierrs.IsNotFound(err) {
   483  			framework.ExpectNoError(fmt.Errorf("pod %q error waiting for delete: %v", t.podName, err))
   484  		}
   485  	}
   486  }
   487  
   488  func (t *TestDeployment) Cleanup() {
   489  	e2elog.Logf("deleting Deployment %q/%q", t.namespace.Name, t.deployment.Name)
   490  	body, err := t.Logs()
   491  	if err != nil {
   492  		e2elog.Logf("Error getting logs for pod %s: %v", t.podName, err)
   493  	} else {
   494  		e2elog.Logf("Pod %s has the following logs: %s", t.podName, body)
   495  	}
   496  	err = t.client.AppsV1().Deployments(t.namespace.Name).Delete(t.deployment.Name, nil)
   497  	framework.ExpectNoError(err)
   498  }
   499  
   500  func (t *TestDeployment) Logs() ([]byte, error) {
   501  	return podLogs(t.client, t.podName, t.namespace.Name)
   502  }
   503  
   504  type TestPod struct {
   505  	client    clientset.Interface
   506  	pod       *v1.Pod
   507  	namespace *v1.Namespace
   508  }
   509  
   510  func NewTestPod(c clientset.Interface, ns *v1.Namespace, command string, isWindows bool) *TestPod {
   511  	testPod := &TestPod{
   512  		client:    c,
   513  		namespace: ns,
   514  		pod: &v1.Pod{
   515  			ObjectMeta: metav1.ObjectMeta{
   516  				GenerateName: "azuredisk-volume-tester-",
   517  			},
   518  			Spec: v1.PodSpec{
   519  				Containers: []v1.Container{
   520  					{
   521  						Name:    "volume-tester",
   522  						Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   523  						Command: []string{"/bin/sh"},
   524  						Args:    []string{"-c", command},
   525  					},
   526  				},
   527  				RestartPolicy: v1.RestartPolicyNever,
   528  				Volumes:       make([]v1.Volume, 0),
   529  			},
   530  		},
   531  	}
   532  	if isWindows {
   533  		testPod.pod.Spec.NodeSelector = map[string]string{
   534  			"kubernetes.io/os": "windows",
   535  		}
   536  		testPod.pod.Spec.Containers[0].Image = "e2eteam/busybox:1.29"
   537  		testPod.pod.Spec.Containers[0].Command = []string{"powershell.exe"}
   538  		testPod.pod.Spec.Containers[0].Args = []string{command}
   539  	}
   540  
   541  	return testPod
   542  }
   543  
   544  func (t *TestPod) Create() {
   545  	var err error
   546  
   547  	t.pod, err = t.client.CoreV1().Pods(t.namespace.Name).Create(t.pod)
   548  	framework.ExpectNoError(err)
   549  }
   550  
   551  func (t *TestPod) WaitForSuccess() {
   552  	err := framework.WaitForPodSuccessInNamespaceSlow(t.client, t.pod.Name, t.namespace.Name)
   553  	framework.ExpectNoError(err)
   554  }
   555  
   556  func (t *TestPod) WaitForRunning() {
   557  	err := framework.WaitForPodRunningInNamespace(t.client, t.pod)
   558  	framework.ExpectNoError(err)
   559  }
   560  
   561  func (t *TestPod) WaitForFailedMountError() {
   562  	err := framework.WaitTimeoutForPodEvent(
   563  		t.client,
   564  		t.pod.Name,
   565  		t.namespace.Name,
   566  		fields.Set{"reason": events.FailedMountVolume}.AsSelector().String(),
   567  		"MountVolume.MountDevice failed for volume",
   568  		pollLongTimeout)
   569  	framework.ExpectNoError(err)
   570  }
   571  
   572  // Ideally this would be in "k8s.io/kubernetes/test/e2e/framework"
   573  // Similar to framework.WaitForPodSuccessInNamespaceSlow
   574  var podFailedCondition = func(pod *v1.Pod) (bool, error) {
   575  	switch pod.Status.Phase {
   576  	case v1.PodFailed:
   577  		ginkgo.By("Saw pod failure")
   578  		return true, nil
   579  	case v1.PodSucceeded:
   580  		return true, fmt.Errorf("pod %q successed with reason: %q, message: %q", pod.Name, pod.Status.Reason, pod.Status.Message)
   581  	default:
   582  		return false, nil
   583  	}
   584  }
   585  
   586  func (t *TestPod) WaitForFailure() {
   587  	err := framework.WaitForPodCondition(t.client, t.namespace.Name, t.pod.Name, failedConditionDescription, slowPodStartTimeout, podFailedCondition)
   588  	framework.ExpectNoError(err)
   589  }
   590  
   591  func (t *TestPod) SetupVolume(pvc *v1.PersistentVolumeClaim, name, mountPath string, readOnly bool) {
   592  	volumeMount := v1.VolumeMount{
   593  		Name:      name,
   594  		MountPath: mountPath,
   595  		ReadOnly:  readOnly,
   596  	}
   597  	t.pod.Spec.Containers[0].VolumeMounts = append(t.pod.Spec.Containers[0].VolumeMounts, volumeMount)
   598  
   599  	volume := v1.Volume{
   600  		Name: name,
   601  		VolumeSource: v1.VolumeSource{
   602  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   603  				ClaimName: pvc.Name,
   604  			},
   605  		},
   606  	}
   607  	t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
   608  }
   609  
   610  func (t *TestPod) SetupRawBlockVolume(pvc *v1.PersistentVolumeClaim, name, devicePath string) {
   611  	volumeDevice := v1.VolumeDevice{
   612  		Name:       name,
   613  		DevicePath: devicePath,
   614  	}
   615  	t.pod.Spec.Containers[0].VolumeDevices = make([]v1.VolumeDevice, 0)
   616  	t.pod.Spec.Containers[0].VolumeDevices = append(t.pod.Spec.Containers[0].VolumeDevices, volumeDevice)
   617  
   618  	volume := v1.Volume{
   619  		Name: name,
   620  		VolumeSource: v1.VolumeSource{
   621  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   622  				ClaimName: pvc.Name,
   623  			},
   624  		},
   625  	}
   626  	t.pod.Spec.Volumes = append(t.pod.Spec.Volumes, volume)
   627  }
   628  
   629  func (t *TestPod) SetNodeSelector(nodeSelector map[string]string) {
   630  	t.pod.Spec.NodeSelector = nodeSelector
   631  }
   632  
   633  func (t *TestPod) Cleanup() {
   634  	cleanupPodOrFail(t.client, t.pod.Name, t.namespace.Name)
   635  }
   636  
   637  func (t *TestPod) Logs() ([]byte, error) {
   638  	return podLogs(t.client, t.pod.Name, t.namespace.Name)
   639  }
   640  
   641  func cleanupPodOrFail(client clientset.Interface, name, namespace string) {
   642  	e2elog.Logf("deleting Pod %q/%q", namespace, name)
   643  	body, err := podLogs(client, name, namespace)
   644  	if err != nil {
   645  		e2elog.Logf("Error getting logs for pod %s: %v", name, err)
   646  	} else {
   647  		e2elog.Logf("Pod %s has the following logs: %s", name, body)
   648  	}
   649  	framework.DeletePodOrFail(client, namespace, name)
   650  }
   651  
   652  func podLogs(client clientset.Interface, name, namespace string) ([]byte, error) {
   653  	return client.CoreV1().Pods(namespace).GetLogs(name, &v1.PodLogOptions{}).Do().Raw()
   654  }
   655  
   656  func getPodsForDeployment(client clientset.Interface, deployment *apps.Deployment) (*v1.PodList, error) {
   657  	replicaSet, err := deploymentutil.GetNewReplicaSet(deployment, client.AppsV1())
   658  	if err != nil {
   659  		return nil, fmt.Errorf("Failed to get new replica set for deployment %q: %v", deployment.Name, err)
   660  	}
   661  	if replicaSet == nil {
   662  		return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
   663  	}
   664  	podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
   665  		return client.CoreV1().Pods(namespace).List(options)
   666  	}
   667  	rsList := []*apps.ReplicaSet{replicaSet}
   668  	podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)
   669  	if err != nil {
   670  		return nil, fmt.Errorf("Failed to list Pods of Deployment %q: %v", deployment.Name, err)
   671  	}
   672  	return podList, nil
   673  }