k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/storage/testsuites/provisioning.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strconv"
    23  	"strings"
    24  	"sync"
    25  	"time"
    26  
    27  	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
    28  
    29  	"github.com/onsi/ginkgo/v2"
    30  	"github.com/onsi/gomega"
    31  
    32  	appsv1 "k8s.io/api/apps/v1"
    33  	v1 "k8s.io/api/core/v1"
    34  	storagev1 "k8s.io/api/storage/v1"
    35  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    36  	"k8s.io/apimachinery/pkg/api/resource"
    37  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    38  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    39  	"k8s.io/apimachinery/pkg/runtime/schema"
    40  	"k8s.io/client-go/dynamic"
    41  	clientset "k8s.io/client-go/kubernetes"
    42  	"k8s.io/kubernetes/test/e2e/feature"
    43  	"k8s.io/kubernetes/test/e2e/framework"
    44  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    45  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    46  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    47  	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
    48  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    49  	storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
    50  	admissionapi "k8s.io/pod-security-admission/api"
    51  )
    52  
    53  // StorageClassTest represents parameters to be used by provisioning tests.
    54  // Not all parameters are used by all tests.
    55  type StorageClassTest struct {
    56  	Client               clientset.Interface
    57  	Timeouts             *framework.TimeoutContext
    58  	Claim                *v1.PersistentVolumeClaim
    59  	SourceClaim          *v1.PersistentVolumeClaim
    60  	Class                *storagev1.StorageClass
    61  	Name                 string
    62  	CloudProviders       []string
    63  	Provisioner          string
    64  	Parameters           map[string]string
    65  	DelayBinding         bool
    66  	ClaimSize            string
    67  	ExpectedSize         string
    68  	PvCheck              func(ctx context.Context, claim *v1.PersistentVolumeClaim)
    69  	VolumeMode           v1.PersistentVolumeMode
    70  	AllowVolumeExpansion bool
    71  	NodeSelection        e2epod.NodeSelection
    72  	MountOptions         []string
    73  	ReclaimPolicy        *v1.PersistentVolumeReclaimPolicy
    74  }
    75  
    76  type provisioningTestSuite struct {
    77  	tsInfo storageframework.TestSuiteInfo
    78  }
    79  
    80  // InitCustomProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface
    81  // using custom test patterns
    82  func InitCustomProvisioningTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
    83  	return &provisioningTestSuite{
    84  		tsInfo: storageframework.TestSuiteInfo{
    85  			Name:         "provisioning",
    86  			TestPatterns: patterns,
    87  			SupportedSizeRange: e2evolume.SizeRange{
    88  				Min: "1Mi",
    89  			},
    90  		},
    91  	}
    92  }
    93  
    94  // InitProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface\
    95  // using test suite default patterns
    96  func InitProvisioningTestSuite() storageframework.TestSuite {
    97  	patterns := []storageframework.TestPattern{
    98  		storageframework.DefaultFsDynamicPV,
    99  		storageframework.BlockVolModeDynamicPV,
   100  		storageframework.NtfsDynamicPV,
   101  	}
   102  	return InitCustomProvisioningTestSuite(patterns)
   103  }
   104  
   105  func (p *provisioningTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
   106  	return p.tsInfo
   107  }
   108  
   109  func (p *provisioningTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
   110  	// Check preconditions.
   111  	if pattern.VolType != storageframework.DynamicPV {
   112  		e2eskipper.Skipf("Suite %q does not support %v", p.tsInfo.Name, pattern.VolType)
   113  	}
   114  	dInfo := driver.GetDriverInfo()
   115  	if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] {
   116  		e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
   117  	}
   118  }
   119  
   120  func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
   121  	type local struct {
   122  		config *storageframework.PerTestConfig
   123  
   124  		testCase  *StorageClassTest
   125  		cs        clientset.Interface
   126  		pvc       *v1.PersistentVolumeClaim
   127  		sourcePVC *v1.PersistentVolumeClaim
   128  		sc        *storagev1.StorageClass
   129  
   130  		migrationCheck *migrationOpCheck
   131  	}
   132  	var (
   133  		dInfo   = driver.GetDriverInfo()
   134  		dDriver storageframework.DynamicPVTestDriver
   135  		l       local
   136  	)
   137  
   138  	// Beware that it also registers an AfterEach which renders f unusable. Any code using
   139  	// f must run inside an It or Context callback.
   140  	f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver))
   141  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   142  
   143  	init := func(ctx context.Context) {
   144  		l = local{}
   145  		dDriver, _ = driver.(storageframework.DynamicPVTestDriver)
   146  		// Now do the more expensive test initialization.
   147  		l.config = driver.PrepareTest(ctx, f)
   148  		l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
   149  		ginkgo.DeferCleanup(l.migrationCheck.validateMigrationVolumeOpCounts)
   150  		l.cs = l.config.Framework.ClientSet
   151  		testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange
   152  		driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
   153  		claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
   154  		framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
   155  
   156  		l.sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, pattern.FsType)
   157  		if l.sc == nil {
   158  			e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
   159  		}
   160  		l.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
   161  			ClaimSize:        claimSize,
   162  			StorageClassName: &(l.sc.Name),
   163  			VolumeMode:       &pattern.VolMode,
   164  		}, l.config.Framework.Namespace.Name)
   165  		l.sourcePVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
   166  			ClaimSize:        claimSize,
   167  			StorageClassName: &(l.sc.Name),
   168  			VolumeMode:       &pattern.VolMode,
   169  		}, l.config.Framework.Namespace.Name)
   170  		framework.Logf("In creating storage class object and pvc objects for driver - sc: %v, pvc: %v, src-pvc: %v", l.sc, l.pvc, l.sourcePVC)
   171  		l.testCase = &StorageClassTest{
   172  			Client:        l.config.Framework.ClientSet,
   173  			Timeouts:      f.Timeouts,
   174  			Claim:         l.pvc,
   175  			SourceClaim:   l.sourcePVC,
   176  			Class:         l.sc,
   177  			Provisioner:   l.sc.Provisioner,
   178  			ClaimSize:     claimSize,
   179  			ExpectedSize:  claimSize,
   180  			VolumeMode:    pattern.VolMode,
   181  			NodeSelection: l.config.ClientNodeSelection,
   182  		}
   183  	}
   184  
   185  	ginkgo.It("should provision storage with mount options", func(ctx context.Context) {
   186  		if dInfo.SupportedMountOption == nil {
   187  			e2eskipper.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
   188  		}
   189  		if pattern.VolMode == v1.PersistentVolumeBlock {
   190  			e2eskipper.Skipf("Block volumes do not support mount options - skipping")
   191  		}
   192  
   193  		init(ctx)
   194  
   195  		l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
   196  		l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   197  			PVWriteReadSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
   198  		}
   199  		SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
   200  
   201  		l.testCase.TestDynamicProvisioning(ctx)
   202  	})
   203  
   204  	f.It("should provision storage with snapshot data source", feature.VolumeSnapshotDataSource, func(ctx context.Context) {
   205  		if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
   206  			e2eskipper.Skipf("Driver %q does not support populating data from snapshot - skipping", dInfo.Name)
   207  		}
   208  		if !dInfo.SupportedFsType.Has(pattern.FsType) {
   209  			e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType)
   210  		}
   211  
   212  		sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
   213  		if !ok {
   214  			framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
   215  		}
   216  
   217  		init(ctx)
   218  
   219  		dc := l.config.Framework.DynamicClient
   220  		testConfig := storageframework.ConvertTestConfig(l.config)
   221  		expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
   222  		dataSourceRef := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
   223  
   224  		l.pvc.Spec.DataSourceRef = dataSourceRef
   225  		l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   226  			ginkgo.By("checking whether the created volume has the pre-populated data")
   227  			tests := []e2evolume.Test{
   228  				{
   229  					Volume:          *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
   230  					Mode:            pattern.VolMode,
   231  					File:            "index.html",
   232  					ExpectedContent: expectedContent,
   233  				},
   234  			}
   235  			e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
   236  		}
   237  		l.testCase.TestDynamicProvisioning(ctx)
   238  	})
   239  
   240  	f.It("should provision storage with snapshot data source (ROX mode)", feature.VolumeSnapshotDataSource, func(ctx context.Context) {
   241  		if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
   242  			e2eskipper.Skipf("Driver %q does not support populating data from snapshot - skipping", dInfo.Name)
   243  		}
   244  		if !dInfo.SupportedFsType.Has(pattern.FsType) {
   245  			e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType)
   246  		}
   247  		if !dInfo.Capabilities[storageframework.CapReadOnlyMany] {
   248  			e2eskipper.Skipf("Driver %q does not support ROX access mode - skipping", dInfo.Name)
   249  		}
   250  
   251  		sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
   252  		if !ok {
   253  			framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
   254  		}
   255  
   256  		init(ctx)
   257  
   258  		dc := l.config.Framework.DynamicClient
   259  		testConfig := storageframework.ConvertTestConfig(l.config)
   260  		expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
   261  		dataSourceRef := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
   262  
   263  		l.pvc.Spec.DataSourceRef = dataSourceRef
   264  		l.pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{
   265  			v1.PersistentVolumeAccessMode(v1.ReadOnlyMany),
   266  		}
   267  		l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   268  			ginkgo.By("checking whether the created volume has the pre-populated data")
   269  			tests := []e2evolume.Test{
   270  				{
   271  					Volume:          *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
   272  					Mode:            pattern.VolMode,
   273  					File:            "index.html",
   274  					ExpectedContent: expectedContent,
   275  				},
   276  			}
   277  			e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
   278  		}
   279  		l.testCase.TestDynamicProvisioning(ctx)
   280  	})
   281  
   282  	f.It("should provision storage with any volume data source", f.WithSerial(), func(ctx context.Context) {
   283  		if len(dInfo.InTreePluginName) != 0 {
   284  			e2eskipper.Skipf("AnyVolumeDataSource feature only works with CSI drivers - skipping")
   285  		}
   286  		if pattern.VolMode == v1.PersistentVolumeBlock {
   287  			e2eskipper.Skipf("Test for Block volumes is not implemented - skipping")
   288  		}
   289  
   290  		init(ctx)
   291  
   292  		ginkgo.By("Creating validator namespace")
   293  		valNamespace, err := f.CreateNamespace(ctx, fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{
   294  			"e2e-framework":      f.BaseName,
   295  			"e2e-test-namespace": f.Namespace.Name,
   296  		})
   297  		framework.ExpectNoError(err)
   298  		ginkgo.DeferCleanup(f.DeleteNamespace, valNamespace.Name)
   299  
   300  		ginkgo.By("Deploying validator")
   301  		valManifests := []string{
   302  			"test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/populator.storage.k8s.io_volumepopulators.yaml",
   303  			"test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/rbac-data-source-validator.yaml",
   304  			"test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/setup-data-source-validator.yaml",
   305  		}
   306  		err = storageutils.CreateFromManifests(ctx, f, valNamespace,
   307  			func(item interface{}) error { return nil },
   308  			valManifests...)
   309  
   310  		framework.ExpectNoError(err)
   311  
   312  		ginkgo.By("Creating populator namespace")
   313  		popNamespace, err := f.CreateNamespace(ctx, fmt.Sprintf("%s-pop", f.Namespace.Name), map[string]string{
   314  			"e2e-framework":      f.BaseName,
   315  			"e2e-test-namespace": f.Namespace.Name,
   316  		})
   317  		framework.ExpectNoError(err)
   318  		ginkgo.DeferCleanup(f.DeleteNamespace, popNamespace.Name)
   319  
   320  		ginkgo.By("Deploying hello-populator")
   321  		popManifests := []string{
   322  			"test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/hello-populator-crd.yaml",
   323  			"test/e2e/testing-manifests/storage-csi/any-volume-datasource/hello-populator-deploy.yaml",
   324  		}
   325  		err = storageutils.CreateFromManifests(ctx, f, popNamespace,
   326  			func(item interface{}) error {
   327  				switch item := item.(type) {
   328  				case *appsv1.Deployment:
   329  					for i, container := range item.Spec.Template.Spec.Containers {
   330  						switch container.Name {
   331  						case "hello":
   332  							args := []string{}
   333  							var foundNS, foundImage bool
   334  							for _, arg := range container.Args {
   335  								if strings.HasPrefix(arg, "--namespace=") {
   336  									args = append(args, fmt.Sprintf("--namespace=%s", popNamespace.Name))
   337  									foundNS = true
   338  								} else if strings.HasPrefix(arg, "--image-name=") {
   339  									args = append(args, fmt.Sprintf("--image-name=%s", container.Image))
   340  									foundImage = true
   341  								} else {
   342  									args = append(args, arg)
   343  								}
   344  							}
   345  							if !foundNS {
   346  								args = append(args, fmt.Sprintf("--namespace=%s", popNamespace.Name))
   347  								framework.Logf("container name: %s", container.Name)
   348  							}
   349  							if !foundImage {
   350  								args = append(args, fmt.Sprintf("--image-name=%s", container.Image))
   351  								framework.Logf("container image: %s", container.Image)
   352  							}
   353  							container.Args = args
   354  							item.Spec.Template.Spec.Containers[i] = container
   355  						default:
   356  						}
   357  					}
   358  				}
   359  				return nil
   360  			},
   361  			popManifests...)
   362  
   363  		framework.ExpectNoError(err)
   364  
   365  		dc := l.config.Framework.DynamicClient
   366  
   367  		// Make hello-populator handle Hello resource in hello.example.com group
   368  		ginkgo.By("Creating VolumePopulator CR datasource")
   369  		volumePopulatorGVR := schema.GroupVersionResource{Group: "populator.storage.k8s.io", Version: "v1beta1", Resource: "volumepopulators"}
   370  		helloPopulatorCR := &unstructured.Unstructured{
   371  			Object: map[string]interface{}{
   372  				"kind":       "VolumePopulator",
   373  				"apiVersion": "populator.storage.k8s.io/v1beta1",
   374  				"metadata": map[string]interface{}{
   375  					"name": fmt.Sprintf("%s-%s", "hello-populator", f.Namespace.Name),
   376  				},
   377  				"sourceKind": map[string]interface{}{
   378  					"group": "hello.example.com",
   379  					"kind":  "Hello",
   380  				},
   381  			},
   382  		}
   383  
   384  		_, err = dc.Resource(volumePopulatorGVR).Create(ctx, helloPopulatorCR, metav1.CreateOptions{})
   385  		framework.ExpectNoError(err)
   386  
   387  		defer func() {
   388  			framework.Logf("deleting VolumePopulator CR datasource %q/%q", helloPopulatorCR.GetNamespace(), helloPopulatorCR.GetName())
   389  			err = dc.Resource(volumePopulatorGVR).Delete(ctx, helloPopulatorCR.GetName(), metav1.DeleteOptions{})
   390  			if err != nil && !apierrors.IsNotFound(err) {
   391  				framework.Failf("Error deleting VolumePopulator CR datasource %q. Error: %v", helloPopulatorCR.GetName(), err)
   392  			}
   393  		}()
   394  
   395  		// Create Hello CR datasource
   396  		ginkgo.By("Creating Hello CR datasource")
   397  		helloCRName := "example-hello"
   398  		fileName := fmt.Sprintf("example-%s.txt", f.Namespace.Name)
   399  		expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
   400  		helloGVR := schema.GroupVersionResource{Group: "hello.example.com", Version: "v1alpha1", Resource: "hellos"}
   401  		helloCR := &unstructured.Unstructured{
   402  			Object: map[string]interface{}{
   403  				"kind":       "Hello",
   404  				"apiVersion": "hello.example.com/v1alpha1",
   405  				"metadata": map[string]interface{}{
   406  					"name":      helloCRName,
   407  					"namespace": f.Namespace.Name,
   408  				},
   409  				"spec": map[string]interface{}{
   410  					"fileName":     fileName,
   411  					"fileContents": expectedContent,
   412  				},
   413  			},
   414  		}
   415  
   416  		_, err = dc.Resource(helloGVR).Namespace(f.Namespace.Name).Create(ctx, helloCR, metav1.CreateOptions{})
   417  		framework.ExpectNoError(err)
   418  
   419  		defer func() {
   420  			framework.Logf("deleting Hello CR datasource %q/%q", helloCR.GetNamespace(), helloCR.GetName())
   421  			err = dc.Resource(helloGVR).Namespace(helloCR.GetNamespace()).Delete(ctx, helloCR.GetName(), metav1.DeleteOptions{})
   422  			if err != nil && !apierrors.IsNotFound(err) {
   423  				framework.Failf("Error deleting Hello CR datasource %q. Error: %v", helloCR.GetName(), err)
   424  			}
   425  		}()
   426  
   427  		apiGroup := "hello.example.com"
   428  		l.pvc.Spec.DataSourceRef = &v1.TypedObjectReference{
   429  			APIGroup: &apiGroup,
   430  			Kind:     "Hello",
   431  			Name:     helloCRName,
   432  		}
   433  
   434  		testConfig := storageframework.ConvertTestConfig(l.config)
   435  		l.testCase.NodeSelection = testConfig.ClientNodeSelection
   436  		l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   437  			ginkgo.By("checking whether the created volume has the pre-populated data")
   438  			tests := []e2evolume.Test{
   439  				{
   440  					Volume:          *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
   441  					Mode:            pattern.VolMode,
   442  					File:            fileName,
   443  					ExpectedContent: expectedContent,
   444  				},
   445  			}
   446  			e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
   447  		}
   448  
   449  		SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
   450  
   451  		l.testCase.TestDynamicProvisioning(ctx)
   452  	})
   453  
   454  	f.It("should provision correct filesystem size when restoring snapshot to larger size pvc", feature.VolumeSnapshotDataSource, func(ctx context.Context) {
   455  		//TODO: remove skip when issue is resolved - https://github.com/kubernetes/kubernetes/issues/113359
   456  		if framework.NodeOSDistroIs("windows") {
   457  			e2eskipper.Skipf("Test is not valid Windows - skipping")
   458  		}
   459  
   460  		if pattern.VolMode == "Block" {
   461  			e2eskipper.Skipf("Test is not valid for Block volume mode - skipping")
   462  		}
   463  
   464  		if dInfo.Capabilities[storageframework.CapFSResizeFromSourceNotSupported] {
   465  			e2eskipper.Skipf("Driver %q does not support filesystem resizing - skipping", dInfo.Name)
   466  		}
   467  
   468  		if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
   469  			e2eskipper.Skipf("Driver %q does not support populating data from snapshot - skipping", dInfo.Name)
   470  		}
   471  
   472  		if !dInfo.SupportedFsType.Has(pattern.FsType) {
   473  			e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType)
   474  		}
   475  
   476  		sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
   477  		if !ok {
   478  			framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
   479  		}
   480  
   481  		init(ctx)
   482  		pvc2 := l.pvc.DeepCopy()
   483  		l.pvc.Name = "pvc-origin"
   484  		dc := l.config.Framework.DynamicClient
   485  		testConfig := storageframework.ConvertTestConfig(l.config)
   486  		dataSourceRef := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, "")
   487  
   488  		// Get the created PVC and record the actual size of the pv (from pvc status).
   489  		c, err := l.testCase.Client.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(ctx, l.pvc.Name, metav1.GetOptions{})
   490  		framework.ExpectNoError(err, "Failed to get pvc: %v", err)
   491  		actualPVSize := c.Status.Capacity.Storage().Value()
   492  
   493  		createdClaims := []*v1.PersistentVolumeClaim{c}
   494  		pod, err := e2epod.CreatePod(ctx, l.testCase.Client, f.Namespace.Name, nil, createdClaims, f.NamespacePodSecurityLevel, "")
   495  		framework.ExpectNoError(err, "Failed to create pod: %v", err)
   496  
   497  		// Mount path should not be empty.
   498  		mountpath := findVolumeMountPath(pod, c)
   499  		gomega.Expect(mountpath).ShouldNot(gomega.BeEmpty())
   500  
   501  		// Save filesystem size of the origin volume.
   502  		originFSSize, err := getFilesystemSizeBytes(pod, mountpath)
   503  		framework.ExpectNoError(err, "Failed to obtain filesystem size of a volume mount: %v", err)
   504  
   505  		// For the new PVC, request a size that is larger than the origin PVC actually provisioned.
   506  		storageRequest := resource.NewQuantity(actualPVSize, resource.BinarySI)
   507  		storageRequest.Add(resource.MustParse("1Gi"))
   508  		pvc2.Spec.Resources.Requests = v1.ResourceList{
   509  			v1.ResourceStorage: *storageRequest,
   510  		}
   511  
   512  		// Set PVC snapshot data source.
   513  		pvc2.Spec.DataSourceRef = dataSourceRef
   514  
   515  		// Create a new claim and a pod that will use the new PVC.
   516  		c2, err := l.testCase.Client.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(ctx, pvc2, metav1.CreateOptions{})
   517  		framework.ExpectNoError(err, "Failed to create pvc: %v", err)
   518  		createdClaims2 := []*v1.PersistentVolumeClaim{c2}
   519  		pod2, err := e2epod.CreatePod(ctx, l.testCase.Client, f.Namespace.Name, nil, createdClaims2, f.NamespacePodSecurityLevel, "")
   520  		framework.ExpectNoError(err, "Failed to create pod: %v", err)
   521  
   522  		// Mount path should not be empty.
   523  		mountpath2 := findVolumeMountPath(pod2, c2)
   524  		gomega.Expect(mountpath2).ShouldNot(gomega.BeEmpty())
   525  
   526  		// Get actual size of the restored filesystem.
   527  		restoredFSSize, err := getFilesystemSizeBytes(pod2, mountpath2)
   528  		framework.ExpectNoError(err, "Failed to obtain filesystem size of a volume mount: %v", err)
   529  
   530  		// Filesystem of a restored volume should be larger than the origin.
   531  		msg := fmt.Sprintf("Filesystem resize failed when restoring from snapshot to PVC with larger size. "+
   532  			"Restored fs size: %v bytes is not larger than origin fs size: %v bytes.\n"+
   533  			"HINT: Your driver needs to check the volume in NodeStageVolume and resize fs if needed.\n"+
   534  			"HINT: For an example patch see: https://github.com/kubernetes/cloud-provider-openstack/pull/1563/files",
   535  			restoredFSSize, originFSSize)
   536  		gomega.Expect(restoredFSSize).Should(gomega.BeNumerically(">", originFSSize), msg)
   537  	})
   538  
   539  	ginkgo.It("should provision storage with pvc data source", func(ctx context.Context) {
   540  		if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
   541  			e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
   542  		}
   543  		init(ctx)
   544  
   545  		if l.config.ClientNodeSelection.Name == "" {
   546  			// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
   547  			// drivers don't support cloning across them.
   548  			if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
   549  				framework.Failf("Error setting topology requirements: %v", err)
   550  			}
   551  		}
   552  		testConfig := storageframework.ConvertTestConfig(l.config)
   553  		expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
   554  		dataSourceRef := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
   555  		l.pvc.Spec.DataSourceRef = dataSourceRef
   556  		l.testCase.NodeSelection = testConfig.ClientNodeSelection
   557  		l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   558  			ginkgo.By("checking whether the created volume has the pre-populated data")
   559  			tests := []e2evolume.Test{
   560  				{
   561  					Volume:          *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
   562  					Mode:            pattern.VolMode,
   563  					File:            "index.html",
   564  					ExpectedContent: expectedContent,
   565  				},
   566  			}
   567  			e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
   568  		}
   569  		// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
   570  		volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
   571  		framework.ExpectNoError(e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision))
   572  		l.testCase.TestDynamicProvisioning(ctx)
   573  	})
   574  
   575  	ginkgo.It("should provision storage with pvc data source (ROX mode)", func(ctx context.Context) {
   576  		if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
   577  			e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
   578  		}
   579  		if !dInfo.Capabilities[storageframework.CapReadOnlyMany] {
   580  			e2eskipper.Skipf("Driver %q does not support ROX access mode - skipping", dInfo.Name)
   581  		}
   582  		init(ctx)
   583  
   584  		if l.config.ClientNodeSelection.Name == "" {
   585  			// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
   586  			// drivers don't support cloning across them.
   587  			if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
   588  				framework.Failf("Error setting topology requirements: %v", err)
   589  			}
   590  		}
   591  		testConfig := storageframework.ConvertTestConfig(l.config)
   592  		expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
   593  		dataSourceRef := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
   594  		l.pvc.Spec.DataSourceRef = dataSourceRef
   595  		l.pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{
   596  			v1.PersistentVolumeAccessMode(v1.ReadOnlyMany),
   597  		}
   598  		l.testCase.NodeSelection = testConfig.ClientNodeSelection
   599  		l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   600  			ginkgo.By("checking whether the created volume has the pre-populated data")
   601  			tests := []e2evolume.Test{
   602  				{
   603  					Volume:          *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
   604  					Mode:            pattern.VolMode,
   605  					File:            "index.html",
   606  					ExpectedContent: expectedContent,
   607  				},
   608  			}
   609  			e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
   610  		}
   611  		// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
   612  		volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
   613  		framework.ExpectNoError(e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision))
   614  		l.testCase.TestDynamicProvisioning(ctx)
   615  	})
   616  
   617  	f.It("should provision storage with pvc data source in parallel", f.WithSlow(), func(ctx context.Context) {
   618  		// Test cloning a single volume multiple times.
   619  		if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
   620  			e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
   621  		}
   622  		if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] {
   623  			e2eskipper.Skipf("Driver %q does not support block volumes - skipping", dInfo.Name)
   624  		}
   625  
   626  		init(ctx)
   627  
   628  		if l.config.ClientNodeSelection.Name == "" {
   629  			// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
   630  			// drivers don't support cloning across them.
   631  			if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
   632  				framework.Failf("Error setting topology requirements: %v", err)
   633  			}
   634  		}
   635  		testConfig := storageframework.ConvertTestConfig(l.config)
   636  		expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
   637  		dataSourceRef := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
   638  		l.pvc.Spec.DataSourceRef = dataSourceRef
   639  
   640  		var wg sync.WaitGroup
   641  		for i := 0; i < 5; i++ {
   642  			wg.Add(1)
   643  			go func(i int) {
   644  				defer ginkgo.GinkgoRecover()
   645  				defer wg.Done()
   646  				ginkgo.By(fmt.Sprintf("Cloning volume nr. %d", i))
   647  				// Each go routine must have its own pod prefix
   648  				myTestConfig := testConfig
   649  				myTestConfig.Prefix = fmt.Sprintf("%s-%d", myTestConfig.Prefix, i)
   650  
   651  				t := *l.testCase
   652  				t.NodeSelection = testConfig.ClientNodeSelection
   653  				t.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   654  					ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i))
   655  					tests := []e2evolume.Test{
   656  						{
   657  							Volume:          *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
   658  							Mode:            pattern.VolMode,
   659  							File:            "index.html",
   660  							ExpectedContent: expectedContent,
   661  						},
   662  					}
   663  					e2evolume.TestVolumeClientSlow(ctx, f, myTestConfig, nil, "", tests)
   664  				}
   665  				// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
   666  				volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
   667  				framework.ExpectNoError(e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision))
   668  				t.TestDynamicProvisioning(ctx)
   669  			}(i)
   670  		}
   671  		wg.Wait()
   672  	})
   673  
   674  	ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func(ctx context.Context) {
   675  		// csi-hostpath driver does not support this test case. In this test case, we have 2 PV containing the same underlying storage.
   676  		// during the NodeStage call for the second volume, csi-hostpath fails the call, because it thinks the volume is already staged at a different path.
   677  		// Note: This is not an issue with driver like PD CSI where the NodeStage is a no-op for block mode.
   678  		if pattern.VolMode == v1.PersistentVolumeBlock {
   679  			e2eskipper.Skipf("skipping multiple PV mount test for block mode")
   680  		}
   681  
   682  		if !dInfo.Capabilities[storageframework.CapMultiplePVsSameID] {
   683  			e2eskipper.Skipf("this driver does not support multiple PVs with the same volumeHandle")
   684  		}
   685  
   686  		init(ctx)
   687  
   688  		l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
   689  			MultiplePVMountSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
   690  		}
   691  		SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
   692  
   693  		l.testCase.TestDynamicProvisioning(ctx)
   694  	})
   695  }
   696  
   697  // SetupStorageClass ensures that a StorageClass from a spec exists, if the StorageClass already exists
   698  // then it's returned as it is, if it doesn't exist then it's created first
   699  // and then returned, if the spec is nil then we return the `default` StorageClass
   700  func SetupStorageClass(
   701  	ctx context.Context,
   702  	client clientset.Interface,
   703  	class *storagev1.StorageClass,
   704  ) *storagev1.StorageClass {
   705  	gomega.Expect(client).NotTo(gomega.BeNil(), "SetupStorageClass.client is required")
   706  
   707  	var err error
   708  	var computedStorageClass *storagev1.StorageClass
   709  	if class != nil {
   710  		computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
   711  		if err == nil {
   712  			// skip storageclass creation if it already exists
   713  			ginkgo.By("Storage class " + computedStorageClass.Name + " is already created, skipping creation.")
   714  		} else {
   715  			ginkgo.By("Creating a StorageClass")
   716  			class, err = client.StorageV1().StorageClasses().Create(ctx, class, metav1.CreateOptions{})
   717  			framework.ExpectNoError(err)
   718  			computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
   719  			framework.ExpectNoError(err)
   720  			clearComputedStorageClass := func(ctx context.Context) {
   721  				framework.Logf("deleting storage class %s", computedStorageClass.Name)
   722  				err := client.StorageV1().StorageClasses().Delete(ctx, computedStorageClass.Name, metav1.DeleteOptions{})
   723  				if err != nil && !apierrors.IsNotFound(err) {
   724  					framework.ExpectNoError(err, "delete storage class")
   725  				}
   726  			}
   727  			ginkgo.DeferCleanup(clearComputedStorageClass)
   728  		}
   729  	} else {
   730  		// StorageClass is nil, so the default one will be used
   731  		scName, err := e2epv.GetDefaultStorageClassName(ctx, client)
   732  		framework.ExpectNoError(err)
   733  		ginkgo.By("Wanted storage class is nil, fetching default StorageClass=" + scName)
   734  		computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{})
   735  		framework.ExpectNoError(err)
   736  	}
   737  
   738  	return computedStorageClass
   739  }
   740  
   741  // TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest
   742  // it's assumed that the StorageClass `t.Class` is already provisioned,
   743  // see #ProvisionStorageClass
   744  func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.PersistentVolume {
   745  	var err error
   746  	client := t.Client
   747  	gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required")
   748  	claim := t.Claim
   749  	gomega.Expect(claim).NotTo(gomega.BeNil(), "StorageClassTest.Claim is required")
   750  	gomega.Expect(claim.GenerateName).NotTo(gomega.BeEmpty(), "StorageClassTest.Claim.GenerateName must not be empty")
   751  	class := t.Class
   752  	gomega.Expect(class).NotTo(gomega.BeNil(), "StorageClassTest.Class is required")
   753  	class, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
   754  	framework.ExpectNoError(err, "StorageClass.Class "+class.Name+" couldn't be fetched from the cluster")
   755  
   756  	ginkgo.By(fmt.Sprintf("creating claim=%+v", claim))
   757  	claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
   758  	framework.ExpectNoError(err)
   759  	defer func() {
   760  		framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
   761  		// typically this claim has already been deleted
   762  		err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})
   763  		if err != nil && !apierrors.IsNotFound(err) {
   764  			framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
   765  		}
   766  	}()
   767  
   768  	// ensure that the claim refers to the provisioned StorageClass
   769  	gomega.Expect(*claim.Spec.StorageClassName).To(gomega.Equal(class.Name))
   770  
   771  	// if late binding is configured, create and delete a pod to provision the volume
   772  	if *class.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
   773  		ginkgo.By(fmt.Sprintf("creating a pod referring to the class=%+v claim=%+v", class, claim))
   774  		var podConfig *e2epod.Config = &e2epod.Config{
   775  			NS:            claim.Namespace,
   776  			PVCs:          []*v1.PersistentVolumeClaim{claim},
   777  			NodeSelection: t.NodeSelection,
   778  		}
   779  
   780  		var pod *v1.Pod
   781  		pod, err := e2epod.CreateSecPod(ctx, client, podConfig, t.Timeouts.DataSourceProvision)
   782  		// Delete pod now, otherwise PV can't be deleted below
   783  		framework.ExpectNoError(err)
   784  		e2epod.DeletePodOrFail(ctx, client, pod.Namespace, pod.Name)
   785  	}
   786  
   787  	// Run the checker
   788  	if t.PvCheck != nil {
   789  		t.PvCheck(ctx, claim)
   790  	}
   791  
   792  	pv := t.checkProvisioning(ctx, client, claim, class)
   793  
   794  	ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
   795  	framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{}))
   796  
   797  	// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
   798  	// Retain, there's no use waiting because the PV won't be auto-deleted and
   799  	// it's expected for the caller to do it.) Technically, the first few delete
   800  	// attempts may fail, as the volume is still attached to a node because
   801  	// kubelet is slowly cleaning up the previous pod, however it should succeed
   802  	// in a couple of minutes. Wait 20 minutes (or whatever custom value is specified in
   803  	// t.Timeouts.PVDeleteSlow) to recover from random cloud hiccups.
   804  	if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
   805  		ginkgo.By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
   806  		framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, 5*time.Second, t.Timeouts.PVDeleteSlow))
   807  	}
   808  
   809  	return pv
   810  }
   811  
   812  // getBoundPV returns a PV details.
   813  func getBoundPV(ctx context.Context, client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
   814  	// Get new copy of the claim
   815  	claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
   816  	if err != nil {
   817  		return nil, err
   818  	}
   819  
   820  	// Get the bound PV
   821  	pv, err := client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
   822  	return pv, err
   823  }
   824  
   825  // checkProvisioning verifies that the claim is bound and has the correct properties
   826  func (t StorageClassTest) checkProvisioning(ctx context.Context, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
   827  	err := e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
   828  	framework.ExpectNoError(err)
   829  
   830  	ginkgo.By("checking the claim")
   831  	pv, err := getBoundPV(ctx, client, claim)
   832  	framework.ExpectNoError(err)
   833  
   834  	// Check sizes
   835  	expectedCapacity := resource.MustParse(t.ExpectedSize)
   836  	pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
   837  	gomega.Expect(pvCapacity.Value()).To(gomega.BeNumerically(">=", expectedCapacity.Value()), "pvCapacity is not greater or equal to expectedCapacity")
   838  
   839  	requestedCapacity := resource.MustParse(t.ClaimSize)
   840  	claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
   841  	gomega.Expect(claimCapacity.Value()).To(gomega.BeNumerically(">=", requestedCapacity.Value()), "claimCapacity is not greater or equal to requestedCapacity")
   842  
   843  	// Check PV properties
   844  	ginkgo.By("checking the PV")
   845  
   846  	// Every access mode in PV should be in PVC
   847  	gomega.Expect(pv.Spec.AccessModes).NotTo(gomega.BeZero())
   848  	for _, pvMode := range pv.Spec.AccessModes {
   849  		found := false
   850  		for _, pvcMode := range claim.Spec.AccessModes {
   851  			if pvMode == pvcMode {
   852  				found = true
   853  				break
   854  			}
   855  		}
   856  		if !found {
   857  			framework.Failf("Actual access modes %v are not in claim's access mode", pv.Spec.AccessModes)
   858  		}
   859  	}
   860  
   861  	gomega.Expect(pv.Spec.ClaimRef.Name).To(gomega.Equal(claim.ObjectMeta.Name))
   862  	gomega.Expect(pv.Spec.ClaimRef.Namespace).To(gomega.Equal(claim.ObjectMeta.Namespace))
   863  	if class == nil {
   864  		gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimDelete))
   865  	} else {
   866  		gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*class.ReclaimPolicy))
   867  		gomega.Expect(pv.Spec.MountOptions).To(gomega.Equal(class.MountOptions))
   868  	}
   869  	if claim.Spec.VolumeMode != nil {
   870  		gomega.Expect(pv.Spec.VolumeMode).NotTo(gomega.BeNil())
   871  		gomega.Expect(*pv.Spec.VolumeMode).To(gomega.Equal(*claim.Spec.VolumeMode))
   872  	}
   873  	return pv
   874  }
   875  
   876  // PVWriteReadSingleNodeCheck checks that a PV retains data on a single node
   877  // and returns the PV.
   878  //
   879  // It starts two pods:
   880  // - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
   881  // - The second pod runs grep 'hello world' on /mnt/test on the same node.
   882  //
   883  // The node is selected by Kubernetes when scheduling the first
   884  // pod. It's then selected via its name for the second pod.
   885  //
   886  // If both succeed, Kubernetes actually allocated something that is
   887  // persistent across pods.
   888  //
   889  // This is a common test that can be called from a StorageClassTest.PvCheck.
   890  func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
   891  	ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
   892  	command := "echo 'hello world' > /mnt/test/data"
   893  	pod := StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
   894  	ginkgo.DeferCleanup(func(ctx context.Context) {
   895  		// pod might be nil now.
   896  		StopPod(ctx, client, pod)
   897  	})
   898  	framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
   899  	runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   900  	framework.ExpectNoError(err, "get pod")
   901  	actualNodeName := runningPod.Spec.NodeName
   902  	StopPod(ctx, client, pod)
   903  	pod = nil // Don't stop twice.
   904  
   905  	// Get a new copy of the PV
   906  	e2evolume, err := getBoundPV(ctx, client, claim)
   907  	framework.ExpectNoError(err)
   908  
   909  	ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
   910  	command = "grep 'hello world' /mnt/test/data"
   911  
   912  	// We give the second pod the additional responsibility of checking the volume has
   913  	// been mounted with the PV's mount options, if the PV was provisioned with any
   914  	for _, option := range e2evolume.Spec.MountOptions {
   915  		// Get entry, get mount options at 6th word, replace brackets with commas
   916  		command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
   917  	}
   918  	command += " || (mount | grep 'on /mnt/test'; false)"
   919  
   920  	if framework.NodeOSDistroIs("windows") {
   921  		// agnhost doesn't support mount
   922  		command = "grep 'hello world' /mnt/test/data"
   923  	}
   924  	RunInPodWithVolume(ctx, client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName, Selector: node.Selector})
   925  	return e2evolume
   926  }
   927  
   928  // PVMultiNodeCheck checks that a PV retains data when moved between nodes.
   929  //
   930  // It starts these pods:
   931  // - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
   932  // - The second pod runs grep 'hello world' on /mnt/test on another node.
   933  //
   934  // The first node is selected by Kubernetes when scheduling the first pod. The second pod uses the same criteria, except that a special anti-affinity
   935  // for the first node gets added. This test can only pass if the cluster has more than one
   936  // suitable node. The caller has to ensure that.
   937  //
   938  // If all succeeds, Kubernetes actually allocated something that is
   939  // persistent across pods and across nodes.
   940  //
   941  // This is a common test that can be called from a StorageClassTest.PvCheck.
   942  func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
   943  	gomega.Expect(node.Name).To(gomega.BeZero(), "this test only works when not locked onto a single node")
   944  
   945  	var pod *v1.Pod
   946  	defer func() {
   947  		// passing pod = nil is okay.
   948  		StopPod(ctx, client, pod)
   949  	}()
   950  
   951  	ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
   952  	command := "echo 'hello world' > /mnt/test/data"
   953  	pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
   954  	framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
   955  	runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   956  	framework.ExpectNoError(err, "get pod")
   957  	actualNodeName := runningPod.Spec.NodeName
   958  	StopPod(ctx, client, pod)
   959  	pod = nil // Don't stop twice.
   960  
   961  	// Add node-anti-affinity.
   962  	secondNode := node
   963  	e2epod.SetAntiAffinity(&secondNode, actualNodeName)
   964  	ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
   965  	command = "grep 'hello world' /mnt/test/data"
   966  	pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
   967  	framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
   968  	runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   969  	framework.ExpectNoError(err, "get pod")
   970  	gomega.Expect(runningPod.Spec.NodeName).ToNot(gomega.Equal(actualNodeName), "second pod should have run on a different node")
   971  	StopPod(ctx, client, pod)
   972  	pod = nil
   973  }
   974  
   975  // TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode
   976  func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Context, claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
   977  	var err error
   978  	gomega.Expect(claims).ToNot(gomega.BeEmpty())
   979  	namespace := claims[0].Namespace
   980  
   981  	ginkgo.By("creating claims")
   982  	var claimNames []string
   983  	var createdClaims []*v1.PersistentVolumeClaim
   984  	for _, claim := range claims {
   985  		c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
   986  		claimNames = append(claimNames, c.Name)
   987  		createdClaims = append(createdClaims, c)
   988  		framework.ExpectNoError(err)
   989  	}
   990  	defer func() {
   991  		errors := map[string]error{}
   992  		for _, claim := range createdClaims {
   993  			err := e2epv.DeletePersistentVolumeClaim(ctx, t.Client, claim.Name, claim.Namespace)
   994  			if err != nil {
   995  				errors[claim.Name] = err
   996  			}
   997  		}
   998  		if len(errors) > 0 {
   999  			for claimName, err := range errors {
  1000  				framework.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
  1001  			}
  1002  		}
  1003  	}()
  1004  
  1005  	// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
  1006  	ginkgo.By("checking the claims are in pending state")
  1007  	err = e2epv.WaitForPersistentVolumeClaimsPhase(ctx, v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true)
  1008  	gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring("not all in phase Bound")))
  1009  	verifyPVCsPending(ctx, t.Client, createdClaims)
  1010  
  1011  	ginkgo.By("creating a pod referring to the claims")
  1012  	// Create a pod referring to the claim and wait for it to get to running
  1013  	var pod *v1.Pod
  1014  	if expectUnschedulable {
  1015  		pod, err = e2epod.CreateUnschedulablePod(ctx, t.Client, namespace, nodeSelector, createdClaims, admissionapi.LevelPrivileged, "" /* command */)
  1016  	} else {
  1017  		pod, err = e2epod.CreatePod(ctx, t.Client, namespace, nil /* nodeSelector */, createdClaims, admissionapi.LevelPrivileged, "" /* command */)
  1018  	}
  1019  	framework.ExpectNoError(err)
  1020  	ginkgo.DeferCleanup(func(ctx context.Context) error {
  1021  		e2epod.DeletePodOrFail(ctx, t.Client, pod.Namespace, pod.Name)
  1022  		return e2epod.WaitForPodNotFoundInNamespace(ctx, t.Client, pod.Name, pod.Namespace, t.Timeouts.PodDelete)
  1023  	})
  1024  	if expectUnschedulable {
  1025  		// Verify that no claims are provisioned.
  1026  		verifyPVCsPending(ctx, t.Client, createdClaims)
  1027  		return nil, nil
  1028  	}
  1029  
  1030  	// collect node details
  1031  	node, err := t.Client.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})
  1032  	framework.ExpectNoError(err)
  1033  
  1034  	ginkgo.By("re-checking the claims to see they bound")
  1035  	var pvs []*v1.PersistentVolume
  1036  	for _, claim := range createdClaims {
  1037  		// Get new copy of the claim
  1038  		claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
  1039  		framework.ExpectNoError(err)
  1040  		// make sure claim did bind
  1041  		err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
  1042  		framework.ExpectNoError(err)
  1043  
  1044  		pv, err := t.Client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
  1045  		framework.ExpectNoError(err)
  1046  		pvs = append(pvs, pv)
  1047  	}
  1048  	gomega.Expect(pvs).To(gomega.HaveLen(len(createdClaims)))
  1049  	return pvs, node
  1050  }
  1051  
  1052  // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
  1053  // It starts, checks, collects output and stops it.
  1054  func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
  1055  	pod := StartInPodWithVolume(ctx, c, ns, claimName, podName, command, node)
  1056  	defer StopPod(ctx, c, pod)
  1057  	framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, pod.Namespace, t.PodStartSlow))
  1058  	// get the latest status of the pod
  1059  	pod, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
  1060  	framework.ExpectNoError(err)
  1061  	return pod
  1062  }
  1063  
  1064  // StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
  1065  // The caller is responsible for checking the pod and deleting it.
  1066  func StartInPodWithVolume(ctx context.Context, c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
  1067  	return StartInPodWithVolumeSource(ctx, c, v1.VolumeSource{
  1068  		PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  1069  			ClaimName: claimName,
  1070  		},
  1071  	}, ns, podName, command, node)
  1072  }
  1073  
  1074  // StartInPodWithVolumeSource starts a command in a pod with given volume mounted to /mnt directory
  1075  // The caller is responsible for checking the pod and deleting it.
  1076  func StartInPodWithVolumeSource(ctx context.Context, c clientset.Interface, volSrc v1.VolumeSource, ns, podName, command string, node e2epod.NodeSelection) *v1.Pod {
  1077  	pod := &v1.Pod{
  1078  		TypeMeta: metav1.TypeMeta{
  1079  			Kind:       "Pod",
  1080  			APIVersion: "v1",
  1081  		},
  1082  		ObjectMeta: metav1.ObjectMeta{
  1083  			GenerateName: podName + "-",
  1084  			Labels: map[string]string{
  1085  				"app": podName,
  1086  			},
  1087  		},
  1088  		Spec: v1.PodSpec{
  1089  			Containers: []v1.Container{
  1090  				{
  1091  					Name:    "volume-tester",
  1092  					Image:   e2epod.GetDefaultTestImage(),
  1093  					Command: e2epod.GenerateScriptCmd(command),
  1094  					VolumeMounts: []v1.VolumeMount{
  1095  						{
  1096  							Name:      "my-volume",
  1097  							MountPath: "/mnt/test",
  1098  						},
  1099  					},
  1100  				},
  1101  			},
  1102  			RestartPolicy: v1.RestartPolicyNever,
  1103  			Volumes: []v1.Volume{
  1104  				{
  1105  					Name:         "my-volume",
  1106  					VolumeSource: volSrc,
  1107  				},
  1108  			},
  1109  		},
  1110  	}
  1111  
  1112  	e2epod.SetNodeSelection(&pod.Spec, node)
  1113  	pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
  1114  	framework.ExpectNoError(err, "Failed to create pod: %v", err)
  1115  	return pod
  1116  }
  1117  
  1118  // StopPod first tries to log the output of the pod's container, then deletes the pod and
  1119  // waits for that to succeed.
  1120  func StopPod(ctx context.Context, c clientset.Interface, pod *v1.Pod) {
  1121  	if pod == nil {
  1122  		return
  1123  	}
  1124  	body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw()
  1125  	if err != nil {
  1126  		framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
  1127  	} else {
  1128  		framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
  1129  	}
  1130  	framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod))
  1131  }
  1132  
  1133  // StopPodAndDependents first tries to log the output of the pod's container,
  1134  // then deletes the pod and waits for that to succeed. Also waits for all owned
  1135  // resources to be deleted.
  1136  func StopPodAndDependents(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) {
  1137  	if pod == nil {
  1138  		return
  1139  	}
  1140  	body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw()
  1141  	if err != nil {
  1142  		framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
  1143  	} else {
  1144  		framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
  1145  	}
  1146  
  1147  	// We must wait explicitly for removal of the generic ephemeral volume PVs.
  1148  	// For that we must find them first...
  1149  	pvs, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{})
  1150  	framework.ExpectNoError(err, "list PVs")
  1151  	var podPVs []v1.PersistentVolume
  1152  	for _, pv := range pvs.Items {
  1153  		if pv.Spec.ClaimRef == nil ||
  1154  			pv.Spec.ClaimRef.Namespace != pod.Namespace {
  1155  			continue
  1156  		}
  1157  		pvc, err := c.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pv.Spec.ClaimRef.Name, metav1.GetOptions{})
  1158  		if err != nil && apierrors.IsNotFound(err) {
  1159  			// Must have been some unrelated PV, otherwise the PVC should exist.
  1160  			continue
  1161  		}
  1162  		framework.ExpectNoError(err, "get PVC")
  1163  		if pv.Spec.ClaimRef.UID == pvc.UID && metav1.IsControlledBy(pvc, pod) {
  1164  			podPVs = append(podPVs, pv)
  1165  		}
  1166  	}
  1167  
  1168  	framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
  1169  	deletionPolicy := metav1.DeletePropagationForeground
  1170  	err = c.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name,
  1171  		metav1.DeleteOptions{
  1172  			// If the pod is the owner of some resources (like ephemeral inline volumes),
  1173  			// then we want to be sure that those are also gone before we return.
  1174  			// Blocking pod deletion via metav1.DeletePropagationForeground achieves that.
  1175  			PropagationPolicy: &deletionPolicy,
  1176  		})
  1177  	if err != nil {
  1178  		if apierrors.IsNotFound(err) {
  1179  			return // assume pod was already deleted
  1180  		}
  1181  		framework.Logf("pod Delete API error: %v", err)
  1182  	}
  1183  	framework.Logf("Wait up to %v for pod %q to be fully deleted", timeouts.PodDelete, pod.Name)
  1184  	framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, c, pod.Name, pod.Namespace, timeouts.PodDelete))
  1185  	if len(podPVs) > 0 {
  1186  		for _, pv := range podPVs {
  1187  			// As with CSI inline volumes, we use the pod delete timeout here because conceptually
  1188  			// the volume deletion needs to be that fast (whatever "that" is).
  1189  			framework.Logf("Wait up to %v for pod PV %s to be fully deleted", timeouts.PodDelete, pv.Name)
  1190  			framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 5*time.Second, timeouts.PodDelete))
  1191  		}
  1192  	}
  1193  }
  1194  
  1195  func verifyPVCsPending(ctx context.Context, client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
  1196  	for _, claim := range pvcs {
  1197  		// Get new copy of the claim
  1198  		claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
  1199  		framework.ExpectNoError(err)
  1200  		gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
  1201  	}
  1202  }
  1203  
  1204  func prepareSnapshotDataSourceForProvisioning(
  1205  	ctx context.Context,
  1206  	f *framework.Framework,
  1207  	config e2evolume.TestConfig,
  1208  	perTestConfig *storageframework.PerTestConfig,
  1209  	pattern storageframework.TestPattern,
  1210  	client clientset.Interface,
  1211  	dynamicClient dynamic.Interface,
  1212  	initClaim *v1.PersistentVolumeClaim,
  1213  	class *storagev1.StorageClass,
  1214  	sDriver storageframework.SnapshottableTestDriver,
  1215  	mode v1.PersistentVolumeMode,
  1216  	injectContent string,
  1217  ) *v1.TypedObjectReference {
  1218  	SetupStorageClass(ctx, client, class)
  1219  
  1220  	if initClaim.ResourceVersion != "" {
  1221  		ginkgo.By("Skipping creation of PVC, it already exists")
  1222  	} else {
  1223  		ginkgo.By("[Initialize dataSource]creating a initClaim")
  1224  		updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(ctx, initClaim, metav1.CreateOptions{})
  1225  		if apierrors.IsAlreadyExists(err) {
  1226  			err = nil
  1227  		}
  1228  		framework.ExpectNoError(err)
  1229  		initClaim = updatedClaim
  1230  	}
  1231  
  1232  	// write namespace to the /mnt/test (= the volume).
  1233  	tests := []e2evolume.Test{
  1234  		{
  1235  			Volume:          *storageutils.CreateVolumeSource(initClaim.Name, false /* readOnly */),
  1236  			Mode:            mode,
  1237  			File:            "index.html",
  1238  			ExpectedContent: injectContent,
  1239  		},
  1240  	}
  1241  	e2evolume.InjectContent(ctx, f, config, nil, "", tests)
  1242  
  1243  	parameters := map[string]string{}
  1244  	snapshotResource := storageframework.CreateSnapshotResource(ctx, sDriver, perTestConfig, pattern, initClaim.GetName(), initClaim.GetNamespace(), f.Timeouts, parameters)
  1245  	group := "snapshot.storage.k8s.io"
  1246  	dataSourceRef := &v1.TypedObjectReference{
  1247  		APIGroup: &group,
  1248  		Kind:     "VolumeSnapshot",
  1249  		Name:     snapshotResource.Vs.GetName(),
  1250  	}
  1251  
  1252  	cleanupFunc := func(ctx context.Context) {
  1253  		framework.Logf("deleting initClaim %q/%q", initClaim.Namespace, initClaim.Name)
  1254  		err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Delete(ctx, initClaim.Name, metav1.DeleteOptions{})
  1255  		if err != nil && !apierrors.IsNotFound(err) {
  1256  			framework.Failf("Error deleting initClaim %q. Error: %v", initClaim.Name, err)
  1257  		}
  1258  
  1259  		err = snapshotResource.CleanupResource(ctx, f.Timeouts)
  1260  		framework.ExpectNoError(err)
  1261  	}
  1262  	ginkgo.DeferCleanup(cleanupFunc)
  1263  
  1264  	return dataSourceRef
  1265  }
  1266  
  1267  func preparePVCDataSourceForProvisioning(
  1268  	ctx context.Context,
  1269  	f *framework.Framework,
  1270  	config e2evolume.TestConfig,
  1271  	client clientset.Interface,
  1272  	source *v1.PersistentVolumeClaim,
  1273  	class *storagev1.StorageClass,
  1274  	mode v1.PersistentVolumeMode,
  1275  	injectContent string,
  1276  ) *v1.TypedObjectReference {
  1277  	SetupStorageClass(ctx, client, class)
  1278  
  1279  	if source.ResourceVersion != "" {
  1280  		ginkgo.By("Skipping creation of PVC, it already exists")
  1281  	} else {
  1282  		ginkgo.By("[Initialize dataSource]creating a source PVC")
  1283  		var err error
  1284  		source, err = client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(ctx, source, metav1.CreateOptions{})
  1285  		framework.ExpectNoError(err)
  1286  	}
  1287  
  1288  	tests := []e2evolume.Test{
  1289  		{
  1290  			Volume:          *storageutils.CreateVolumeSource(source.Name, false /* readOnly */),
  1291  			Mode:            mode,
  1292  			File:            "index.html",
  1293  			ExpectedContent: injectContent,
  1294  		},
  1295  	}
  1296  	e2evolume.InjectContent(ctx, f, config, nil, "", tests)
  1297  
  1298  	dataSourceRef := &v1.TypedObjectReference{
  1299  		Kind: "PersistentVolumeClaim",
  1300  		Name: source.GetName(),
  1301  	}
  1302  
  1303  	cleanupFunc := func(ctx context.Context) {
  1304  		framework.Logf("deleting source PVC %q/%q", source.Namespace, source.Name)
  1305  		err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Delete(ctx, source.Name, metav1.DeleteOptions{})
  1306  		if err != nil && !apierrors.IsNotFound(err) {
  1307  			framework.Failf("Error deleting source PVC %q. Error: %v", source.Name, err)
  1308  		}
  1309  	}
  1310  	ginkgo.DeferCleanup(cleanupFunc)
  1311  
  1312  	return dataSourceRef
  1313  }
  1314  
  1315  // findVolumeMountPath looks for a claim name inside a pod and returns an absolute path of its volume mount point.
  1316  func findVolumeMountPath(pod *v1.Pod, claim *v1.PersistentVolumeClaim) string {
  1317  	// Find volume name that the pod2 assigned to pvc.
  1318  	volumeName = ""
  1319  	for _, volume := range pod.Spec.Volumes {
  1320  		if volume.PersistentVolumeClaim.ClaimName == claim.Name {
  1321  			volumeName = volume.Name
  1322  			break
  1323  		}
  1324  	}
  1325  
  1326  	// Find where the pod mounted the volume inside a container.
  1327  	containerMountPath := ""
  1328  	for _, volumeMount := range pod.Spec.Containers[0].VolumeMounts {
  1329  		if volumeMount.Name == volumeName {
  1330  			containerMountPath = volumeMount.MountPath
  1331  			break
  1332  		}
  1333  	}
  1334  	return containerMountPath
  1335  }
  1336  
  1337  // getFilesystemSizeBytes returns a total size of a filesystem on given mountPath inside a pod. You can use findVolumeMountPath for mountPath lookup.
  1338  func getFilesystemSizeBytes(pod *v1.Pod, mountPath string) (int, error) {
  1339  	cmd := fmt.Sprintf("stat -f -c %%s %v", mountPath)
  1340  	blockSize, err := e2ekubectl.RunKubectl(pod.Namespace, "exec", pod.Name, "--", "/bin/sh", "-c", cmd)
  1341  	if err != nil {
  1342  		return 0, err
  1343  	}
  1344  
  1345  	cmd = fmt.Sprintf("stat -f -c %%b %v", mountPath)
  1346  	blockCount, err := e2ekubectl.RunKubectl(pod.Namespace, "exec", pod.Name, "--", "/bin/sh", "-c", cmd)
  1347  	if err != nil {
  1348  		return 0, err
  1349  	}
  1350  
  1351  	bs, err := strconv.Atoi(strings.TrimSuffix(blockSize, "\n"))
  1352  	if err != nil {
  1353  		return 0, err
  1354  	}
  1355  
  1356  	bc, err := strconv.Atoi(strings.TrimSuffix(blockCount, "\n"))
  1357  	if err != nil {
  1358  		return 0, err
  1359  	}
  1360  
  1361  	return bs * bc, nil
  1362  }
  1363  
  1364  // MultiplePVMountSingleNodeCheck checks that multiple PV pointing to the same underlying storage can be mounted simultaneously on a single node.
  1365  //
  1366  // Steps:
  1367  // - Start Pod1 using PVC1, PV1 (which points to a underlying volume v) on node N1.
  1368  // - Create PVC2, PV2 and prebind them. PV2 points to the same underlying volume v.
  1369  // - Start Pod2 using PVC2, PV2 (which points to a underlying volume v) on node N1.
  1370  func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
  1371  	pod1Config := e2epod.Config{
  1372  		NS:            claim.Namespace,
  1373  		NodeSelection: node,
  1374  		PVCs:          []*v1.PersistentVolumeClaim{claim},
  1375  	}
  1376  	pod1, err := e2epod.CreateSecPodWithNodeSelection(ctx, client, &pod1Config, timeouts.PodStart)
  1377  	framework.ExpectNoError(err)
  1378  	defer func() {
  1379  		ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod1.Namespace, pod1.Name))
  1380  		framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod1))
  1381  	}()
  1382  	ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName))
  1383  
  1384  	// Create new PV which points to the same underlying storage. Retain policy is used so that deletion of second PVC does not trigger the deletion of its bound PV and underlying storage.
  1385  	e2evolume, err := getBoundPV(ctx, client, claim)
  1386  	framework.ExpectNoError(err)
  1387  	pv2Config := e2epv.PersistentVolumeConfig{
  1388  		NamePrefix:       fmt.Sprintf("%s-", "pv"),
  1389  		StorageClassName: *claim.Spec.StorageClassName,
  1390  		PVSource:         e2evolume.Spec.PersistentVolumeSource,
  1391  		AccessModes:      e2evolume.Spec.AccessModes,
  1392  		VolumeMode:       e2evolume.Spec.VolumeMode,
  1393  		ReclaimPolicy:    v1.PersistentVolumeReclaimRetain,
  1394  	}
  1395  
  1396  	pvc2Config := e2epv.PersistentVolumeClaimConfig{
  1397  		NamePrefix:       fmt.Sprintf("%s-", "pvc"),
  1398  		StorageClassName: &claim.Namespace,
  1399  		AccessModes:      e2evolume.Spec.AccessModes,
  1400  		VolumeMode:       e2evolume.Spec.VolumeMode,
  1401  	}
  1402  
  1403  	pv2, pvc2, err := e2epv.CreatePVCPV(ctx, client, timeouts, pv2Config, pvc2Config, claim.Namespace, true)
  1404  	framework.ExpectNoError(err, "PVC, PV creation failed")
  1405  	framework.Logf("Created PVC %s/%s and PV %s", pvc2.Namespace, pvc2.Name, pv2.Name)
  1406  
  1407  	pod2Config := e2epod.Config{
  1408  		NS:            pvc2.Namespace,
  1409  		NodeSelection: e2epod.NodeSelection{Name: pod1.Spec.NodeName, Selector: node.Selector},
  1410  		PVCs:          []*v1.PersistentVolumeClaim{pvc2},
  1411  	}
  1412  	pod2, err := e2epod.CreateSecPodWithNodeSelection(ctx, client, &pod2Config, timeouts.PodStart)
  1413  	framework.ExpectNoError(err)
  1414  	ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod2.Namespace, pod2.Name, pod2.Spec.NodeName))
  1415  
  1416  	ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod2.Namespace, pod2.Name))
  1417  	framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod2))
  1418  
  1419  	err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, pvc2.Namespace)
  1420  	framework.ExpectNoError(err, "Failed to delete PVC: %s/%s", pvc2.Namespace, pvc2.Name)
  1421  
  1422  	err = e2epv.DeletePersistentVolume(ctx, client, pv2.Name)
  1423  	framework.ExpectNoError(err, "Failed to delete PV: %s", pv2.Name)
  1424  }