k8s.io/kubernetes@v1.29.3/test/e2e/storage/csi_mock/base.go (about)

     1  /*
     2  Copyright 2022 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package csi_mock
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"reflect"
    24  	"strconv"
    25  	"strings"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
    30  	"github.com/onsi/ginkgo/v2"
    31  	"google.golang.org/grpc/codes"
    32  	v1 "k8s.io/api/core/v1"
    33  	storagev1 "k8s.io/api/storage/v1"
    34  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    35  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    36  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    37  	"k8s.io/apimachinery/pkg/fields"
    38  	utilerrors "k8s.io/apimachinery/pkg/util/errors"
    39  	"k8s.io/apimachinery/pkg/util/sets"
    40  	"k8s.io/apimachinery/pkg/util/wait"
    41  	clientset "k8s.io/client-go/kubernetes"
    42  	"k8s.io/kubernetes/test/e2e/framework"
    43  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    44  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    45  	"k8s.io/kubernetes/test/e2e/storage/drivers"
    46  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    47  	"k8s.io/kubernetes/test/e2e/storage/testsuites"
    48  	"k8s.io/kubernetes/test/e2e/storage/utils"
    49  	imageutils "k8s.io/kubernetes/test/utils/image"
    50  )
    51  
    52  const (
    53  	csiNodeLimitUpdateTimeout  = 5 * time.Minute
    54  	csiPodUnschedulableTimeout = 5 * time.Minute
    55  	csiResizeWaitPeriod        = 5 * time.Minute
    56  	csiVolumeAttachmentTimeout = 7 * time.Minute
    57  	// how long to wait for Resizing Condition on PVC to appear
    58  	csiResizingConditionWait = 2 * time.Minute
    59  
    60  	// Time for starting a pod with a volume.
    61  	csiPodRunningTimeout = 5 * time.Minute
    62  
    63  	// How log to wait for kubelet to unstage a volume after a pod is deleted
    64  	csiUnstageWaitTimeout = 1 * time.Minute
    65  )
    66  
    67  // csiCall represents an expected call from Kubernetes to CSI mock driver and
    68  // expected return value.
    69  // When matching expected csiCall with a real CSI mock driver output, one csiCall
    70  // matches *one or more* calls with the same method and error code.
    71  // This is due to exponential backoff in Kubernetes, where the test cannot expect
    72  // exact number of call repetitions.
    73  type csiCall struct {
    74  	expectedMethod string
    75  	expectedError  codes.Code
    76  	expectedSecret map[string]string
    77  	// This is a mark for the test itself to delete the tested pod *after*
    78  	// this csiCall is received.
    79  	deletePod bool
    80  }
    81  
    82  type testParameters struct {
    83  	disableAttach       bool
    84  	attachLimit         int
    85  	registerDriver      bool
    86  	lateBinding         bool
    87  	enableTopology      bool
    88  	podInfo             *bool
    89  	storageCapacity     *bool
    90  	scName              string // pre-selected storage class name; must be unique in the cluster
    91  	enableResizing      bool   // enable resizing for both CSI mock driver and storageClass.
    92  	enableNodeExpansion bool   // enable node expansion for CSI mock driver
    93  	// just disable resizing on driver it overrides enableResizing flag for CSI mock driver
    94  	disableResizingOnDriver       bool
    95  	enableSnapshot                bool
    96  	enableVolumeMountGroup        bool // enable the VOLUME_MOUNT_GROUP node capability in the CSI mock driver.
    97  	hooks                         *drivers.Hooks
    98  	tokenRequests                 []storagev1.TokenRequest
    99  	requiresRepublish             *bool
   100  	fsGroupPolicy                 *storagev1.FSGroupPolicy
   101  	enableSELinuxMount            *bool
   102  	enableRecoverExpansionFailure bool
   103  	enableCSINodeExpandSecret     bool
   104  }
   105  
   106  type mockDriverSetup struct {
   107  	cs          clientset.Interface
   108  	config      *storageframework.PerTestConfig
   109  	pods        []*v1.Pod
   110  	pvcs        []*v1.PersistentVolumeClaim
   111  	sc          map[string]*storagev1.StorageClass
   112  	vsc         map[string]*unstructured.Unstructured
   113  	driver      drivers.MockCSITestDriver
   114  	provisioner string
   115  	tp          testParameters
   116  	f           *framework.Framework
   117  }
   118  
   119  type volumeType string
   120  
   121  var (
   122  	csiEphemeral     = volumeType("CSI")
   123  	genericEphemeral = volumeType("Ephemeral")
   124  	pvcReference     = volumeType("PVC")
   125  )
   126  
   127  const (
   128  	poll                           = 2 * time.Second
   129  	pvcAsSourceProtectionFinalizer = "snapshot.storage.kubernetes.io/pvc-as-source-protection"
   130  	volumeSnapshotContentFinalizer = "snapshot.storage.kubernetes.io/volumesnapshotcontent-bound-protection"
   131  	volumeSnapshotBoundFinalizer   = "snapshot.storage.kubernetes.io/volumesnapshot-bound-protection"
   132  	errReasonNotEnoughSpace        = "node(s) did not have enough free storage"
   133  
   134  	csiNodeExpandSecretKey          = "csi.storage.k8s.io/node-expand-secret-name"
   135  	csiNodeExpandSecretNamespaceKey = "csi.storage.k8s.io/node-expand-secret-namespace"
   136  )
   137  
   138  var (
   139  	errPodCompleted   = fmt.Errorf("pod ran to completion")
   140  	errNotEnoughSpace = errors.New(errReasonNotEnoughSpace)
   141  )
   142  
   143  func newMockDriverSetup(f *framework.Framework) *mockDriverSetup {
   144  	return &mockDriverSetup{
   145  		cs:  f.ClientSet,
   146  		sc:  make(map[string]*storagev1.StorageClass),
   147  		vsc: make(map[string]*unstructured.Unstructured),
   148  		f:   f,
   149  	}
   150  }
   151  
   152  func (m *mockDriverSetup) init(ctx context.Context, tp testParameters) {
   153  	m.cs = m.f.ClientSet
   154  	m.tp = tp
   155  
   156  	var err error
   157  	driverOpts := drivers.CSIMockDriverOpts{
   158  		RegisterDriver:                tp.registerDriver,
   159  		PodInfo:                       tp.podInfo,
   160  		StorageCapacity:               tp.storageCapacity,
   161  		EnableTopology:                tp.enableTopology,
   162  		AttachLimit:                   tp.attachLimit,
   163  		DisableAttach:                 tp.disableAttach,
   164  		EnableResizing:                tp.enableResizing,
   165  		EnableNodeExpansion:           tp.enableNodeExpansion,
   166  		EnableSnapshot:                tp.enableSnapshot,
   167  		EnableVolumeMountGroup:        tp.enableVolumeMountGroup,
   168  		TokenRequests:                 tp.tokenRequests,
   169  		RequiresRepublish:             tp.requiresRepublish,
   170  		FSGroupPolicy:                 tp.fsGroupPolicy,
   171  		EnableSELinuxMount:            tp.enableSELinuxMount,
   172  		EnableRecoverExpansionFailure: tp.enableRecoverExpansionFailure,
   173  	}
   174  
   175  	// At the moment, only tests which need hooks are
   176  	// using the embedded CSI mock driver. The rest run
   177  	// the driver inside the cluster although they could
   178  	// changed to use embedding merely by setting
   179  	// driverOpts.embedded to true.
   180  	//
   181  	// Not enabling it for all tests minimizes
   182  	// the risk that the introduction of embedded breaks
   183  	// some existings tests and avoids a dependency
   184  	// on port forwarding, which is important if some of
   185  	// these tests are supposed to become part of
   186  	// conformance testing (port forwarding isn't
   187  	// currently required).
   188  	if tp.hooks != nil {
   189  		driverOpts.Embedded = true
   190  		driverOpts.Hooks = *tp.hooks
   191  	}
   192  
   193  	// this just disable resizing on driver, keeping resizing on SC enabled.
   194  	if tp.disableResizingOnDriver {
   195  		driverOpts.EnableResizing = false
   196  	}
   197  
   198  	m.driver = drivers.InitMockCSIDriver(driverOpts)
   199  	config := m.driver.PrepareTest(ctx, m.f)
   200  	m.config = config
   201  	m.provisioner = config.GetUniqueDriverName()
   202  
   203  	if tp.registerDriver {
   204  		err = waitForCSIDriver(m.cs, m.config.GetUniqueDriverName())
   205  		framework.ExpectNoError(err, "Failed to get CSIDriver %v", m.config.GetUniqueDriverName())
   206  		ginkgo.DeferCleanup(destroyCSIDriver, m.cs, m.config.GetUniqueDriverName())
   207  	}
   208  
   209  	// Wait for the CSIDriver actually get deployed and CSINode object to be generated.
   210  	// This indicates the mock CSI driver pod is up and running healthy.
   211  	err = drivers.WaitForCSIDriverRegistrationOnNode(ctx, m.config.ClientNodeSelection.Name, m.config.GetUniqueDriverName(), m.cs)
   212  	framework.ExpectNoError(err, "Failed to register CSIDriver %v", m.config.GetUniqueDriverName())
   213  }
   214  
   215  func (m *mockDriverSetup) cleanup(ctx context.Context) {
   216  	cs := m.f.ClientSet
   217  	var errs []error
   218  
   219  	for _, pod := range m.pods {
   220  		ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
   221  		errs = append(errs, e2epod.DeletePodWithWait(ctx, cs, pod))
   222  	}
   223  
   224  	for _, claim := range m.pvcs {
   225  		ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name))
   226  		claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
   227  		if err == nil {
   228  			if err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}); err != nil {
   229  				errs = append(errs, err)
   230  			}
   231  			if claim.Spec.VolumeName != "" {
   232  				errs = append(errs, e2epv.WaitForPersistentVolumeDeleted(ctx, cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute))
   233  			}
   234  		}
   235  	}
   236  
   237  	for _, sc := range m.sc {
   238  		ginkgo.By(fmt.Sprintf("Deleting storageclass %s", sc.Name))
   239  		cs.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
   240  	}
   241  
   242  	for _, vsc := range m.vsc {
   243  		ginkgo.By(fmt.Sprintf("Deleting volumesnapshotclass %s", vsc.GetName()))
   244  		m.config.Framework.DynamicClient.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{})
   245  	}
   246  
   247  	err := utilerrors.NewAggregate(errs)
   248  	framework.ExpectNoError(err, "while cleaning up after test")
   249  }
   250  
   251  func (m *mockDriverSetup) createPod(ctx context.Context, withVolume volumeType) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
   252  	ginkgo.By("Creating pod")
   253  	f := m.f
   254  
   255  	sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
   256  	if m.tp.enableCSINodeExpandSecret {
   257  		if sc.Parameters == nil {
   258  			parameters := map[string]string{
   259  				csiNodeExpandSecretKey:          "test-secret",
   260  				csiNodeExpandSecretNamespaceKey: f.Namespace.Name,
   261  			}
   262  			sc.Parameters = parameters
   263  		} else {
   264  			sc.Parameters[csiNodeExpandSecretKey] = "test-secret"
   265  			sc.Parameters[csiNodeExpandSecretNamespaceKey] = f.Namespace.Name
   266  		}
   267  	}
   268  	scTest := testsuites.StorageClassTest{
   269  		Name:                 m.driver.GetDriverInfo().Name,
   270  		Timeouts:             f.Timeouts,
   271  		Provisioner:          sc.Provisioner,
   272  		Parameters:           sc.Parameters,
   273  		ClaimSize:            "1Gi",
   274  		ExpectedSize:         "1Gi",
   275  		DelayBinding:         m.tp.lateBinding,
   276  		AllowVolumeExpansion: m.tp.enableResizing,
   277  	}
   278  
   279  	// The mock driver only works when everything runs on a single node.
   280  	nodeSelection := m.config.ClientNodeSelection
   281  	switch withVolume {
   282  	case csiEphemeral:
   283  		pod = startPausePodInline(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
   284  	case genericEphemeral:
   285  		class, pod = startPausePodGenericEphemeral(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name)
   286  		if class != nil {
   287  			m.sc[class.Name] = class
   288  		}
   289  		claim = &v1.PersistentVolumeClaim{
   290  			ObjectMeta: metav1.ObjectMeta{
   291  				Name:      pod.Name + "-" + pod.Spec.Volumes[0].Name,
   292  				Namespace: f.Namespace.Name,
   293  			},
   294  		}
   295  	case pvcReference:
   296  		class, claim, pod = startPausePod(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name)
   297  		if class != nil {
   298  			m.sc[class.Name] = class
   299  		}
   300  		if claim != nil {
   301  			m.pvcs = append(m.pvcs, claim)
   302  		}
   303  	}
   304  	if pod != nil {
   305  		m.pods = append(m.pods, pod)
   306  	}
   307  	return // result variables set above
   308  }
   309  
   310  func (m *mockDriverSetup) createPodWithPVC(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
   311  	f := m.f
   312  
   313  	nodeSelection := m.config.ClientNodeSelection
   314  	pod, err := startPausePodWithClaim(m.cs, pvc, nodeSelection, f.Namespace.Name)
   315  	if pod != nil {
   316  		m.pods = append(m.pods, pod)
   317  	}
   318  	return pod, err
   319  }
   320  
   321  func (m *mockDriverSetup) createPodWithFSGroup(ctx context.Context, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
   322  	f := m.f
   323  
   324  	ginkgo.By("Creating pod with fsGroup")
   325  	nodeSelection := m.config.ClientNodeSelection
   326  	sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
   327  	scTest := testsuites.StorageClassTest{
   328  		Name:                 m.driver.GetDriverInfo().Name,
   329  		Provisioner:          sc.Provisioner,
   330  		Parameters:           sc.Parameters,
   331  		ClaimSize:            "1Gi",
   332  		ExpectedSize:         "1Gi",
   333  		DelayBinding:         m.tp.lateBinding,
   334  		AllowVolumeExpansion: m.tp.enableResizing,
   335  	}
   336  	class, claim, pod := startBusyBoxPod(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup)
   337  
   338  	if class != nil {
   339  		m.sc[class.Name] = class
   340  	}
   341  	if claim != nil {
   342  		m.pvcs = append(m.pvcs, claim)
   343  	}
   344  
   345  	if pod != nil {
   346  		m.pods = append(m.pods, pod)
   347  	}
   348  
   349  	return class, claim, pod
   350  }
   351  
   352  func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
   353  	ginkgo.By("Creating pod with SELinux context")
   354  	f := m.f
   355  	nodeSelection := m.config.ClientNodeSelection
   356  	sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
   357  	scTest := testsuites.StorageClassTest{
   358  		Name:                 m.driver.GetDriverInfo().Name,
   359  		Provisioner:          sc.Provisioner,
   360  		Parameters:           sc.Parameters,
   361  		ClaimSize:            "1Gi",
   362  		ExpectedSize:         "1Gi",
   363  		DelayBinding:         m.tp.lateBinding,
   364  		AllowVolumeExpansion: m.tp.enableResizing,
   365  		MountOptions:         mountOptions,
   366  	}
   367  	class, claim := createClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes)
   368  	pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts)
   369  	framework.ExpectNoError(err, "Failed to create pause pod with SELinux context %s: %v", seLinuxOpts, err)
   370  
   371  	if class != nil {
   372  		m.sc[class.Name] = class
   373  	}
   374  	if claim != nil {
   375  		m.pvcs = append(m.pvcs, claim)
   376  	}
   377  
   378  	if pod != nil {
   379  		m.pods = append(m.pods, pod)
   380  	}
   381  
   382  	return class, claim, pod
   383  }
   384  
   385  func waitForCSIDriver(cs clientset.Interface, driverName string) error {
   386  	timeout := 4 * time.Minute
   387  
   388  	framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
   389  	for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
   390  		_, err := cs.StorageV1().CSIDrivers().Get(context.TODO(), driverName, metav1.GetOptions{})
   391  		if !apierrors.IsNotFound(err) {
   392  			return err
   393  		}
   394  	}
   395  	return fmt.Errorf("gave up after waiting %v for CSIDriver %q", timeout, driverName)
   396  }
   397  
   398  func destroyCSIDriver(cs clientset.Interface, driverName string) {
   399  	driverGet, err := cs.StorageV1().CSIDrivers().Get(context.TODO(), driverName, metav1.GetOptions{})
   400  	if err == nil {
   401  		framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
   402  		// Uncomment the following line to get full dump of CSIDriver object
   403  		// framework.Logf("%s", framework.PrettyPrint(driverGet))
   404  		cs.StorageV1().CSIDrivers().Delete(context.TODO(), driverName, metav1.DeleteOptions{})
   405  	}
   406  }
   407  
   408  func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *storagev1.StorageClass {
   409  	pluginName := t.Provisioner
   410  	if pluginName == "" {
   411  		pluginName = getDefaultPluginName()
   412  	}
   413  	if prefix == "" {
   414  		prefix = "sc"
   415  	}
   416  	bindingMode := storagev1.VolumeBindingImmediate
   417  	if t.DelayBinding {
   418  		bindingMode = storagev1.VolumeBindingWaitForFirstConsumer
   419  	}
   420  	if t.Parameters == nil {
   421  		t.Parameters = make(map[string]string)
   422  	}
   423  
   424  	if framework.NodeOSDistroIs("windows") {
   425  		// fstype might be forced from outside, in that case skip setting a default
   426  		if _, exists := t.Parameters["fstype"]; !exists {
   427  			t.Parameters["fstype"] = e2epv.GetDefaultFSType()
   428  			framework.Logf("settings a default fsType=%s in the storage class", t.Parameters["fstype"])
   429  		}
   430  	}
   431  
   432  	sc := getStorageClass(pluginName, t.Parameters, &bindingMode, t.MountOptions, ns, prefix)
   433  	if t.AllowVolumeExpansion {
   434  		sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
   435  	}
   436  	return sc
   437  }
   438  
   439  func getStorageClass(
   440  	provisioner string,
   441  	parameters map[string]string,
   442  	bindingMode *storagev1.VolumeBindingMode,
   443  	mountOptions []string,
   444  	ns string,
   445  	prefix string,
   446  ) *storagev1.StorageClass {
   447  	if bindingMode == nil {
   448  		defaultBindingMode := storagev1.VolumeBindingImmediate
   449  		bindingMode = &defaultBindingMode
   450  	}
   451  	return &storagev1.StorageClass{
   452  		TypeMeta: metav1.TypeMeta{
   453  			Kind: "StorageClass",
   454  		},
   455  		ObjectMeta: metav1.ObjectMeta{
   456  			// Name must be unique, so let's base it on namespace name and the prefix (the prefix is test specific)
   457  			GenerateName: ns + "-" + prefix,
   458  		},
   459  		Provisioner:       provisioner,
   460  		Parameters:        parameters,
   461  		VolumeBindingMode: bindingMode,
   462  		MountOptions:      mountOptions,
   463  	}
   464  }
   465  
   466  func getDefaultPluginName() string {
   467  	switch {
   468  	case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
   469  		return "kubernetes.io/gce-pd"
   470  	case framework.ProviderIs("aws"):
   471  		return "kubernetes.io/aws-ebs"
   472  	case framework.ProviderIs("vsphere"):
   473  		return "kubernetes.io/vsphere-volume"
   474  	case framework.ProviderIs("azure"):
   475  		return "kubernetes.io/azure-disk"
   476  	}
   477  	return ""
   478  }
   479  
   480  func createSC(cs clientset.Interface, t testsuites.StorageClassTest, scName, ns string) *storagev1.StorageClass {
   481  	class := newStorageClass(t, ns, "")
   482  	if scName != "" {
   483  		class.Name = scName
   484  	}
   485  	var err error
   486  	_, err = cs.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{})
   487  	if err != nil {
   488  		class, err = cs.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
   489  		framework.ExpectNoError(err, "Failed to create class: %v", err)
   490  	}
   491  
   492  	return class
   493  }
   494  
   495  func createClaim(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, accessModes []v1.PersistentVolumeAccessMode) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) {
   496  	class := createSC(cs, t, scName, ns)
   497  	claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
   498  		ClaimSize:        t.ClaimSize,
   499  		StorageClassName: &(class.Name),
   500  		VolumeMode:       &t.VolumeMode,
   501  		AccessModes:      accessModes,
   502  	}, ns)
   503  	claim, err := cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{})
   504  	framework.ExpectNoError(err, "Failed to create claim: %v", err)
   505  
   506  	if !t.DelayBinding {
   507  		pvcClaims := []*v1.PersistentVolumeClaim{claim}
   508  		_, err = e2epv.WaitForPVClaimBoundPhase(ctx, cs, pvcClaims, framework.ClaimProvisionTimeout)
   509  		framework.ExpectNoError(err, "Failed waiting for PVC to be bound: %v", err)
   510  	}
   511  	return class, claim
   512  }
   513  
   514  func startPausePod(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
   515  	class, claim := createClaim(ctx, cs, t, node, scName, ns, nil)
   516  
   517  	pod, err := startPausePodWithClaim(cs, claim, node, ns)
   518  	framework.ExpectNoError(err, "Failed to create pause pod: %v", err)
   519  	return class, claim, pod
   520  }
   521  
   522  func startBusyBoxPod(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
   523  	class, claim := createClaim(ctx, cs, t, node, scName, ns, nil)
   524  	pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup)
   525  	framework.ExpectNoError(err, "Failed to create busybox pod: %v", err)
   526  	return class, claim, pod
   527  }
   528  
   529  func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, ns string) *v1.Pod {
   530  	pod, err := startPausePodWithInlineVolume(cs,
   531  		&v1.CSIVolumeSource{
   532  			Driver: t.Provisioner,
   533  		},
   534  		node, ns)
   535  	framework.ExpectNoError(err, "Failed to create pod: %v", err)
   536  	return pod
   537  }
   538  
   539  func startPausePodGenericEphemeral(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.Pod) {
   540  	class := createSC(cs, t, scName, ns)
   541  	claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
   542  		ClaimSize:        t.ClaimSize,
   543  		StorageClassName: &(class.Name),
   544  		VolumeMode:       &t.VolumeMode,
   545  	}, ns)
   546  	pod, err := startPausePodWithVolumeSource(cs, v1.VolumeSource{
   547  		Ephemeral: &v1.EphemeralVolumeSource{
   548  			VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{Spec: claim.Spec}},
   549  	}, node, ns)
   550  	framework.ExpectNoError(err, "Failed to create pod: %v", err)
   551  	return class, pod
   552  }
   553  
   554  func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
   555  	return startPausePodWithVolumeSource(cs,
   556  		v1.VolumeSource{
   557  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   558  				ClaimName: pvc.Name,
   559  				ReadOnly:  false,
   560  			},
   561  		},
   562  		node, ns)
   563  }
   564  
   565  func startBusyBoxPodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) {
   566  	return startBusyBoxPodWithVolumeSource(cs,
   567  		v1.VolumeSource{
   568  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   569  				ClaimName: pvc.Name,
   570  				ReadOnly:  false,
   571  			},
   572  		},
   573  		node, ns, fsGroup)
   574  }
   575  
   576  func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
   577  	return startPausePodWithVolumeSource(cs,
   578  		v1.VolumeSource{
   579  			CSI: inlineVolume,
   580  		},
   581  		node, ns)
   582  }
   583  
   584  func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
   585  	pod := &v1.Pod{
   586  		ObjectMeta: metav1.ObjectMeta{
   587  			GenerateName: "pvc-volume-tester-",
   588  		},
   589  		Spec: v1.PodSpec{
   590  			Containers: []v1.Container{
   591  				{
   592  					Name:  "volume-tester",
   593  					Image: imageutils.GetE2EImage(imageutils.Pause),
   594  					VolumeMounts: []v1.VolumeMount{
   595  						{
   596  							Name:      "my-volume",
   597  							MountPath: "/mnt/test",
   598  						},
   599  					},
   600  				},
   601  			},
   602  			RestartPolicy: v1.RestartPolicyNever,
   603  			Volumes: []v1.Volume{
   604  				{
   605  					Name:         "my-volume",
   606  					VolumeSource: volumeSource,
   607  				},
   608  			},
   609  		},
   610  	}
   611  	e2epod.SetNodeSelection(&pod.Spec, node)
   612  	return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
   613  }
   614  
   615  func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) {
   616  	pod := &v1.Pod{
   617  		ObjectMeta: metav1.ObjectMeta{
   618  			GenerateName: "pvc-volume-tester-",
   619  		},
   620  		Spec: v1.PodSpec{
   621  			Containers: []v1.Container{
   622  				{
   623  					Name:  "volume-tester",
   624  					Image: framework.BusyBoxImage,
   625  					VolumeMounts: []v1.VolumeMount{
   626  						{
   627  							Name:      "my-volume",
   628  							MountPath: "/mnt/test",
   629  						},
   630  					},
   631  					Command: e2epod.GenerateScriptCmd("while true ; do sleep 2; done"),
   632  				},
   633  			},
   634  			SecurityContext: &v1.PodSecurityContext{
   635  				FSGroup: fsGroup,
   636  			},
   637  			RestartPolicy: v1.RestartPolicyNever,
   638  			Volumes: []v1.Volume{
   639  				{
   640  					Name:         "my-volume",
   641  					VolumeSource: volumeSource,
   642  				},
   643  			},
   644  		},
   645  	}
   646  	e2epod.SetNodeSelection(&pod.Spec, node)
   647  	return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
   648  }
   649  
   650  func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, seLinuxOpts *v1.SELinuxOptions) (*v1.Pod, error) {
   651  	pod := &v1.Pod{
   652  		ObjectMeta: metav1.ObjectMeta{
   653  			GenerateName: "pvc-volume-tester-",
   654  		},
   655  		Spec: v1.PodSpec{
   656  			SecurityContext: &v1.PodSecurityContext{
   657  				SELinuxOptions: seLinuxOpts,
   658  			},
   659  			Containers: []v1.Container{
   660  				{
   661  					Name:  "volume-tester",
   662  					Image: imageutils.GetE2EImage(imageutils.Pause),
   663  					VolumeMounts: []v1.VolumeMount{
   664  						{
   665  							Name:      "my-volume",
   666  							MountPath: "/mnt/test",
   667  						},
   668  					},
   669  				},
   670  			},
   671  			RestartPolicy: v1.RestartPolicyNever,
   672  			Volumes: []v1.Volume{
   673  				{
   674  					Name: "my-volume",
   675  					VolumeSource: v1.VolumeSource{
   676  						PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   677  							ClaimName: pvc.Name,
   678  							ReadOnly:  false,
   679  						},
   680  					},
   681  				},
   682  			},
   683  		},
   684  	}
   685  	if node.Name != "" {
   686  		// Force schedule the pod to skip scheduler RWOP checks
   687  		framework.Logf("Forcing node name %s", node.Name)
   688  		pod.Spec.NodeName = node.Name
   689  	}
   690  	e2epod.SetNodeSelection(&pod.Spec, node)
   691  	return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
   692  }
   693  
   694  func checkPodLogs(ctx context.Context, getCalls func(ctx context.Context) ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error {
   695  	expectedAttributes := map[string]string{}
   696  	if expectPodInfo {
   697  		expectedAttributes["csi.storage.k8s.io/pod.name"] = pod.Name
   698  		expectedAttributes["csi.storage.k8s.io/pod.namespace"] = pod.Namespace
   699  		expectedAttributes["csi.storage.k8s.io/pod.uid"] = string(pod.UID)
   700  		expectedAttributes["csi.storage.k8s.io/serviceAccount.name"] = "default"
   701  
   702  	}
   703  	if csiInlineVolumesEnabled {
   704  		// This is only passed in 1.15 when the CSIInlineVolume feature gate is set.
   705  		expectedAttributes["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(ephemeralVolume)
   706  	}
   707  
   708  	if csiServiceAccountTokenEnabled {
   709  		expectedAttributes["csi.storage.k8s.io/serviceAccount.tokens"] = "<nonempty>"
   710  	}
   711  
   712  	// Find NodePublish in the GRPC calls.
   713  	foundAttributes := sets.NewString()
   714  	numNodePublishVolume := 0
   715  	numNodeUnpublishVolume := 0
   716  	calls, err := getCalls(ctx)
   717  	if err != nil {
   718  		return err
   719  	}
   720  
   721  	for _, call := range calls {
   722  		switch call.Method {
   723  		case "NodePublishVolume":
   724  			numNodePublishVolume++
   725  			if numNodePublishVolume == 1 {
   726  				// Check that NodePublish had expected attributes for first volume
   727  				for k, v := range expectedAttributes {
   728  					vv, found := call.Request.VolumeContext[k]
   729  					if found && (v == vv || (v == "<nonempty>" && len(vv) != 0)) {
   730  						foundAttributes.Insert(k)
   731  						framework.Logf("Found volume attribute %s: %s", k, vv)
   732  					}
   733  				}
   734  			}
   735  		case "NodeUnpublishVolume":
   736  			framework.Logf("Found NodeUnpublishVolume: %+v", call)
   737  			numNodeUnpublishVolume++
   738  		}
   739  	}
   740  	if numNodePublishVolume < expectedNumNodePublish {
   741  		return fmt.Errorf("NodePublish should be called at least %d", expectedNumNodePublish)
   742  	}
   743  
   744  	if numNodeUnpublishVolume == 0 {
   745  		return fmt.Errorf("NodeUnpublish was never called")
   746  	}
   747  	if foundAttributes.Len() != len(expectedAttributes) {
   748  		return fmt.Errorf("number of found volume attributes does not match, expected %d, got %d", len(expectedAttributes), foundAttributes.Len())
   749  	}
   750  	return nil
   751  }
   752  
   753  // createFSGroupRequestPreHook creates a hook that records the fsGroup passed in
   754  // through NodeStageVolume and NodePublishVolume calls.
   755  func createFSGroupRequestPreHook(nodeStageFsGroup, nodePublishFsGroup *string) *drivers.Hooks {
   756  	return &drivers.Hooks{
   757  		Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
   758  			nodeStageRequest, ok := request.(*csipbv1.NodeStageVolumeRequest)
   759  			if ok {
   760  				mountVolume := nodeStageRequest.GetVolumeCapability().GetMount()
   761  				if mountVolume != nil {
   762  					*nodeStageFsGroup = mountVolume.VolumeMountGroup
   763  				}
   764  			}
   765  			nodePublishRequest, ok := request.(*csipbv1.NodePublishVolumeRequest)
   766  			if ok {
   767  				mountVolume := nodePublishRequest.GetVolumeCapability().GetMount()
   768  				if mountVolume != nil {
   769  					*nodePublishFsGroup = mountVolume.VolumeMountGroup
   770  				}
   771  			}
   772  			return nil, nil
   773  		},
   774  	}
   775  }
   776  
   777  // createPreHook counts invocations of a certain method (identified by a substring in the full gRPC method name).
   778  func createPreHook(method string, callback func(counter int64) error) *drivers.Hooks {
   779  	var counter int64
   780  
   781  	return &drivers.Hooks{
   782  		Pre: func() func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
   783  			return func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
   784  				if strings.Contains(fullMethod, method) {
   785  					counter := atomic.AddInt64(&counter, 1)
   786  					return nil, callback(counter)
   787  				}
   788  				return nil, nil
   789  			}
   790  		}(),
   791  	}
   792  }
   793  
   794  // compareCSICalls compares expectedCalls with logs of the mock driver.
   795  // It returns index of the first expectedCall that was *not* received
   796  // yet or error when calls do not match.
   797  // All repeated calls to the CSI mock driver (e.g. due to exponential backoff)
   798  // are squashed and checked against single expectedCallSequence item.
   799  //
   800  // Only permanent errors are returned. Other errors are logged and no
   801  // calls are returned. The caller is expected to retry.
   802  func compareCSICalls(ctx context.Context, trackedCalls []string, expectedCallSequence []csiCall, getCalls func(ctx context.Context) ([]drivers.MockCSICall, error)) ([]drivers.MockCSICall, int, error) {
   803  	allCalls, err := getCalls(ctx)
   804  	if err != nil {
   805  		framework.Logf("intermittent (?) log retrieval error, proceeding without output: %v", err)
   806  		return nil, 0, nil
   807  	}
   808  
   809  	// Remove all repeated and ignored calls
   810  	tracked := sets.NewString(trackedCalls...)
   811  	var calls []drivers.MockCSICall
   812  	var last drivers.MockCSICall
   813  	for _, c := range allCalls {
   814  		if !tracked.Has(c.Method) {
   815  			continue
   816  		}
   817  		if c.Method != last.Method || c.FullError.Code != last.FullError.Code {
   818  			last = c
   819  			calls = append(calls, c)
   820  		}
   821  		// This call is the same as the last one, ignore it.
   822  	}
   823  
   824  	for i, c := range calls {
   825  		if i >= len(expectedCallSequence) {
   826  			// Log all unexpected calls first, return error below outside the loop.
   827  			framework.Logf("Unexpected CSI driver call: %s (%d)", c.Method, c.FullError)
   828  			continue
   829  		}
   830  
   831  		// Compare current call with expected call
   832  		expectedCall := expectedCallSequence[i]
   833  		if c.Method != expectedCall.expectedMethod || c.FullError.Code != expectedCall.expectedError {
   834  			return allCalls, i, fmt.Errorf("Unexpected CSI call %d: expected %s (%d), got %s (%d)", i, expectedCall.expectedMethod, expectedCall.expectedError, c.Method, c.FullError.Code)
   835  		}
   836  
   837  		// if the secret is not nil, compare it
   838  		if expectedCall.expectedSecret != nil {
   839  			if !reflect.DeepEqual(expectedCall.expectedSecret, c.Request.Secrets) {
   840  				return allCalls, i, fmt.Errorf("Unexpected secret: expected %v, got %v", expectedCall.expectedSecret, c.Request.Secrets)
   841  			}
   842  		}
   843  
   844  	}
   845  	if len(calls) > len(expectedCallSequence) {
   846  		return allCalls, len(expectedCallSequence), fmt.Errorf("Received %d unexpected CSI driver calls", len(calls)-len(expectedCallSequence))
   847  	}
   848  	// All calls were correct
   849  	return allCalls, len(calls), nil
   850  
   851  }
   852  
   853  // createSELinuxMountPreHook creates a hook that records the mountOptions passed in
   854  // through NodeStageVolume and NodePublishVolume calls.
   855  func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string, stageCalls, unstageCalls, publishCalls, unpublishCalls *atomic.Int32) *drivers.Hooks {
   856  	return &drivers.Hooks{
   857  		Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
   858  			switch req := request.(type) {
   859  			case *csipbv1.NodeStageVolumeRequest:
   860  				stageCalls.Add(1)
   861  				mountVolume := req.GetVolumeCapability().GetMount()
   862  				if mountVolume != nil {
   863  					*nodeStageMountOpts = mountVolume.MountFlags
   864  				}
   865  			case *csipbv1.NodePublishVolumeRequest:
   866  				publishCalls.Add(1)
   867  				mountVolume := req.GetVolumeCapability().GetMount()
   868  				if mountVolume != nil {
   869  					*nodePublishMountOpts = mountVolume.MountFlags
   870  				}
   871  			case *csipbv1.NodeUnstageVolumeRequest:
   872  				unstageCalls.Add(1)
   873  			case *csipbv1.NodeUnpublishVolumeRequest:
   874  				unpublishCalls.Add(1)
   875  			}
   876  			return nil, nil
   877  		},
   878  	}
   879  }
   880  
   881  // A lot of this code was copied from e2e/framework. It would be nicer
   882  // if it could be reused - see https://github.com/kubernetes/kubernetes/issues/92754
   883  func podRunning(ctx context.Context, c clientset.Interface, podName, namespace string) wait.ConditionFunc {
   884  	return func() (bool, error) {
   885  		pod, err := c.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
   886  		if err != nil {
   887  			return false, err
   888  		}
   889  		switch pod.Status.Phase {
   890  		case v1.PodRunning:
   891  			return true, nil
   892  		case v1.PodFailed, v1.PodSucceeded:
   893  			return false, errPodCompleted
   894  		}
   895  		return false, nil
   896  	}
   897  }
   898  
   899  func podHasStorage(ctx context.Context, c clientset.Interface, podName, namespace string, when time.Time) wait.ConditionFunc {
   900  	// Check for events of this pod. Copied from test/e2e/common/container_probe.go.
   901  	expectedEvent := fields.Set{
   902  		"involvedObject.kind":      "Pod",
   903  		"involvedObject.name":      podName,
   904  		"involvedObject.namespace": namespace,
   905  		"reason":                   "FailedScheduling",
   906  	}.AsSelector().String()
   907  	options := metav1.ListOptions{
   908  		FieldSelector: expectedEvent,
   909  	}
   910  	// copied from test/e2e/framework/events/events.go
   911  	return func() (bool, error) {
   912  		// We cannot be sure here whether it has enough storage, only when
   913  		// it hasn't. In that case we abort waiting with a special error.
   914  		events, err := c.CoreV1().Events(namespace).List(ctx, options)
   915  		if err != nil {
   916  			return false, fmt.Errorf("got error while getting events: %w", err)
   917  		}
   918  		for _, event := range events.Items {
   919  			if /* event.CreationTimestamp.After(when) &&
   920  			 */strings.Contains(event.Message, errReasonNotEnoughSpace) {
   921  				return false, errNotEnoughSpace
   922  			}
   923  		}
   924  		return false, nil
   925  	}
   926  }
   927  
   928  func anyOf(conditions ...wait.ConditionFunc) wait.ConditionFunc {
   929  	return func() (bool, error) {
   930  		for _, condition := range conditions {
   931  			done, err := condition()
   932  			if err != nil {
   933  				return false, err
   934  			}
   935  			if done {
   936  				return true, nil
   937  			}
   938  		}
   939  		return false, nil
   940  	}
   941  }
   942  
   943  func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
   944  	waitErr := wait.PollImmediate(10*time.Second, csiPodUnschedulableTimeout, func() (bool, error) {
   945  		pod, err := cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
   946  		if err != nil {
   947  			return false, err
   948  		}
   949  		for _, c := range pod.Status.Conditions {
   950  			// Conformance tests cannot rely on specific output of optional fields (e.g., Reason
   951  			// and Message) because these fields are not suject to the deprecation policy.
   952  			if c.Type == v1.PodScheduled && c.Status == v1.ConditionFalse && c.Reason != "" && c.Message != "" {
   953  				return true, nil
   954  			}
   955  		}
   956  		return false, nil
   957  	})
   958  	if waitErr != nil {
   959  		return fmt.Errorf("error waiting for pod %s/%s to have max volume condition: %v", pod.Namespace, pod.Name, waitErr)
   960  	}
   961  	return nil
   962  }