k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/storage/csimock/base.go (about)

     1  /*
     2  Copyright 2022 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package csimock
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"reflect"
    24  	"strconv"
    25  	"strings"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	csipbv1 "github.com/container-storage-interface/spec/lib/go/csi"
    30  	"github.com/onsi/ginkgo/v2"
    31  	"google.golang.org/grpc/codes"
    32  	v1 "k8s.io/api/core/v1"
    33  	storagev1 "k8s.io/api/storage/v1"
    34  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    35  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    36  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    37  	"k8s.io/apimachinery/pkg/fields"
    38  	utilerrors "k8s.io/apimachinery/pkg/util/errors"
    39  	"k8s.io/apimachinery/pkg/util/sets"
    40  	"k8s.io/apimachinery/pkg/util/wait"
    41  	clientset "k8s.io/client-go/kubernetes"
    42  	"k8s.io/kubernetes/test/e2e/framework"
    43  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    44  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    45  	"k8s.io/kubernetes/test/e2e/storage/drivers"
    46  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    47  	"k8s.io/kubernetes/test/e2e/storage/testsuites"
    48  	"k8s.io/kubernetes/test/e2e/storage/utils"
    49  	"k8s.io/kubernetes/test/utils/format"
    50  	imageutils "k8s.io/kubernetes/test/utils/image"
    51  	"k8s.io/utils/ptr"
    52  )
    53  
    54  const (
    55  	csiNodeLimitUpdateTimeout  = 5 * time.Minute
    56  	csiPodUnschedulableTimeout = 5 * time.Minute
    57  	csiResizeWaitPeriod        = 5 * time.Minute
    58  	csiVolumeAttachmentTimeout = 7 * time.Minute
    59  	// how long to wait for Resizing Condition on PVC to appear
    60  	csiResizingConditionWait = 2 * time.Minute
    61  
    62  	// Time for starting a pod with a volume.
    63  	csiPodRunningTimeout = 5 * time.Minute
    64  
    65  	// How log to wait for kubelet to unstage a volume after a pod is deleted
    66  	csiUnstageWaitTimeout = 1 * time.Minute
    67  )
    68  
    69  // csiCall represents an expected call from Kubernetes to CSI mock driver and
    70  // expected return value.
    71  // When matching expected csiCall with a real CSI mock driver output, one csiCall
    72  // matches *one or more* calls with the same method and error code.
    73  // This is due to exponential backoff in Kubernetes, where the test cannot expect
    74  // exact number of call repetitions.
    75  type csiCall struct {
    76  	expectedMethod string
    77  	expectedError  codes.Code
    78  	expectedSecret map[string]string
    79  	// This is a mark for the test itself to delete the tested pod *after*
    80  	// this csiCall is received.
    81  	deletePod bool
    82  }
    83  
    84  type testParameters struct {
    85  	disableAttach       bool
    86  	attachLimit         int
    87  	registerDriver      bool
    88  	lateBinding         bool
    89  	enableTopology      bool
    90  	podInfo             *bool
    91  	storageCapacity     *bool
    92  	scName              string // pre-selected storage class name; must be unique in the cluster
    93  	enableResizing      bool   // enable resizing for both CSI mock driver and storageClass.
    94  	enableNodeExpansion bool   // enable node expansion for CSI mock driver
    95  	// just disable resizing on driver it overrides enableResizing flag for CSI mock driver
    96  	disableResizingOnDriver       bool
    97  	enableSnapshot                bool
    98  	enableVolumeMountGroup        bool // enable the VOLUME_MOUNT_GROUP node capability in the CSI mock driver.
    99  	hooks                         *drivers.Hooks
   100  	tokenRequests                 []storagev1.TokenRequest
   101  	requiresRepublish             *bool
   102  	fsGroupPolicy                 *storagev1.FSGroupPolicy
   103  	enableSELinuxMount            *bool
   104  	enableRecoverExpansionFailure bool
   105  	enableHonorPVReclaimPolicy    bool
   106  	enableCSINodeExpandSecret     bool
   107  	reclaimPolicy                 *v1.PersistentVolumeReclaimPolicy
   108  }
   109  
   110  type mockDriverSetup struct {
   111  	cs          clientset.Interface
   112  	config      *storageframework.PerTestConfig
   113  	pods        []*v1.Pod
   114  	pvcs        []*v1.PersistentVolumeClaim
   115  	pvs         []*v1.PersistentVolume
   116  	sc          map[string]*storagev1.StorageClass
   117  	vsc         map[string]*unstructured.Unstructured
   118  	driver      drivers.MockCSITestDriver
   119  	provisioner string
   120  	tp          testParameters
   121  	f           *framework.Framework
   122  }
   123  
   124  type volumeType string
   125  
   126  var (
   127  	csiEphemeral     = volumeType("CSI")
   128  	genericEphemeral = volumeType("Ephemeral")
   129  	pvcReference     = volumeType("PVC")
   130  )
   131  
   132  const (
   133  	poll                           = 2 * time.Second
   134  	pvcAsSourceProtectionFinalizer = "snapshot.storage.kubernetes.io/pvc-as-source-protection"
   135  	volumeSnapshotContentFinalizer = "snapshot.storage.kubernetes.io/volumesnapshotcontent-bound-protection"
   136  	volumeSnapshotBoundFinalizer   = "snapshot.storage.kubernetes.io/volumesnapshot-bound-protection"
   137  	errReasonNotEnoughSpace        = "node(s) did not have enough free storage"
   138  
   139  	csiNodeExpandSecretKey          = "csi.storage.k8s.io/node-expand-secret-name"
   140  	csiNodeExpandSecretNamespaceKey = "csi.storage.k8s.io/node-expand-secret-namespace"
   141  )
   142  
   143  var (
   144  	errPodCompleted   = fmt.Errorf("pod ran to completion")
   145  	errNotEnoughSpace = errors.New(errReasonNotEnoughSpace)
   146  )
   147  
   148  func newMockDriverSetup(f *framework.Framework) *mockDriverSetup {
   149  	return &mockDriverSetup{
   150  		cs:  f.ClientSet,
   151  		sc:  make(map[string]*storagev1.StorageClass),
   152  		vsc: make(map[string]*unstructured.Unstructured),
   153  		f:   f,
   154  	}
   155  }
   156  
   157  func (m *mockDriverSetup) init(ctx context.Context, tp testParameters) {
   158  	m.cs = m.f.ClientSet
   159  	m.tp = tp
   160  
   161  	var err error
   162  	driverOpts := drivers.CSIMockDriverOpts{
   163  		RegisterDriver:                tp.registerDriver,
   164  		PodInfo:                       tp.podInfo,
   165  		StorageCapacity:               tp.storageCapacity,
   166  		EnableTopology:                tp.enableTopology,
   167  		AttachLimit:                   tp.attachLimit,
   168  		DisableAttach:                 tp.disableAttach,
   169  		EnableResizing:                tp.enableResizing,
   170  		EnableNodeExpansion:           tp.enableNodeExpansion,
   171  		EnableSnapshot:                tp.enableSnapshot,
   172  		EnableVolumeMountGroup:        tp.enableVolumeMountGroup,
   173  		TokenRequests:                 tp.tokenRequests,
   174  		RequiresRepublish:             tp.requiresRepublish,
   175  		FSGroupPolicy:                 tp.fsGroupPolicy,
   176  		EnableSELinuxMount:            tp.enableSELinuxMount,
   177  		EnableRecoverExpansionFailure: tp.enableRecoverExpansionFailure,
   178  		EnableHonorPVReclaimPolicy:    tp.enableHonorPVReclaimPolicy,
   179  	}
   180  
   181  	// At the moment, only tests which need hooks are
   182  	// using the embedded CSI mock driver. The rest run
   183  	// the driver inside the cluster although they could
   184  	// changed to use embedding merely by setting
   185  	// driverOpts.embedded to true.
   186  	//
   187  	// Not enabling it for all tests minimizes
   188  	// the risk that the introduction of embedded breaks
   189  	// some existings tests and avoids a dependency
   190  	// on port forwarding, which is important if some of
   191  	// these tests are supposed to become part of
   192  	// conformance testing (port forwarding isn't
   193  	// currently required).
   194  	if tp.hooks != nil {
   195  		driverOpts.Embedded = true
   196  		driverOpts.Hooks = *tp.hooks
   197  	}
   198  
   199  	// this just disable resizing on driver, keeping resizing on SC enabled.
   200  	if tp.disableResizingOnDriver {
   201  		driverOpts.EnableResizing = false
   202  	}
   203  
   204  	m.driver = drivers.InitMockCSIDriver(driverOpts)
   205  	config := m.driver.PrepareTest(ctx, m.f)
   206  	m.config = config
   207  	m.provisioner = config.GetUniqueDriverName()
   208  
   209  	if tp.registerDriver {
   210  		err = waitForCSIDriver(m.cs, m.config.GetUniqueDriverName())
   211  		framework.ExpectNoError(err, "Failed to get CSIDriver %v", m.config.GetUniqueDriverName())
   212  		ginkgo.DeferCleanup(destroyCSIDriver, m.cs, m.config.GetUniqueDriverName())
   213  	}
   214  
   215  	// Wait for the CSIDriver actually get deployed and CSINode object to be generated.
   216  	// This indicates the mock CSI driver pod is up and running healthy.
   217  	err = drivers.WaitForCSIDriverRegistrationOnNode(ctx, m.config.ClientNodeSelection.Name, m.config.GetUniqueDriverName(), m.cs)
   218  	framework.ExpectNoError(err, "Failed to register CSIDriver %v", m.config.GetUniqueDriverName())
   219  }
   220  
   221  func (m *mockDriverSetup) cleanup(ctx context.Context) {
   222  	cs := m.f.ClientSet
   223  	var errs []error
   224  
   225  	for _, pod := range m.pods {
   226  		ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
   227  		errs = append(errs, e2epod.DeletePodWithWait(ctx, cs, pod))
   228  	}
   229  
   230  	for _, claim := range m.pvcs {
   231  		ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name))
   232  		claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
   233  		if err == nil {
   234  			if err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}); err != nil {
   235  				errs = append(errs, err)
   236  			}
   237  			if claim.Spec.VolumeName != "" {
   238  				errs = append(errs, e2epv.WaitForPersistentVolumeDeleted(ctx, cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute))
   239  			}
   240  		}
   241  	}
   242  
   243  	for _, pv := range m.pvs {
   244  		ginkgo.By(fmt.Sprintf("Deleting pv %s", pv.Name))
   245  		errs = append(errs, e2epv.DeletePersistentVolume(ctx, cs, pv.Name))
   246  	}
   247  
   248  	for _, sc := range m.sc {
   249  		ginkgo.By(fmt.Sprintf("Deleting storageclass %s", sc.Name))
   250  		cs.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
   251  	}
   252  
   253  	for _, vsc := range m.vsc {
   254  		ginkgo.By(fmt.Sprintf("Deleting volumesnapshotclass %s", vsc.GetName()))
   255  		m.config.Framework.DynamicClient.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{})
   256  	}
   257  
   258  	err := utilerrors.NewAggregate(errs)
   259  	framework.ExpectNoError(err, "while cleaning up after test")
   260  }
   261  
   262  func (m *mockDriverSetup) update(o utils.PatchCSIOptions) {
   263  	item, err := m.cs.StorageV1().CSIDrivers().Get(context.TODO(), m.config.GetUniqueDriverName(), metav1.GetOptions{})
   264  	framework.ExpectNoError(err, "Failed to get CSIDriver %v", m.config.GetUniqueDriverName())
   265  
   266  	err = utils.PatchCSIDeployment(nil, o, item)
   267  	framework.ExpectNoError(err, "Failed to apply %v to CSIDriver object %v", o, m.config.GetUniqueDriverName())
   268  
   269  	_, err = m.cs.StorageV1().CSIDrivers().Update(context.TODO(), item, metav1.UpdateOptions{})
   270  	framework.ExpectNoError(err, "Failed to update CSIDriver %v", m.config.GetUniqueDriverName())
   271  }
   272  
   273  func (m *mockDriverSetup) createPod(ctx context.Context, withVolume volumeType) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
   274  	ginkgo.By("Creating pod")
   275  	f := m.f
   276  
   277  	sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
   278  	if m.tp.enableCSINodeExpandSecret {
   279  		if sc.Parameters == nil {
   280  			parameters := map[string]string{
   281  				csiNodeExpandSecretKey:          "test-secret",
   282  				csiNodeExpandSecretNamespaceKey: f.Namespace.Name,
   283  			}
   284  			sc.Parameters = parameters
   285  		} else {
   286  			sc.Parameters[csiNodeExpandSecretKey] = "test-secret"
   287  			sc.Parameters[csiNodeExpandSecretNamespaceKey] = f.Namespace.Name
   288  		}
   289  	}
   290  	scTest := testsuites.StorageClassTest{
   291  		Name:                 m.driver.GetDriverInfo().Name,
   292  		Timeouts:             f.Timeouts,
   293  		Provisioner:          sc.Provisioner,
   294  		Parameters:           sc.Parameters,
   295  		ClaimSize:            "1Gi",
   296  		ExpectedSize:         "1Gi",
   297  		DelayBinding:         m.tp.lateBinding,
   298  		AllowVolumeExpansion: m.tp.enableResizing,
   299  		ReclaimPolicy:        m.tp.reclaimPolicy,
   300  	}
   301  
   302  	// The mock driver only works when everything runs on a single node.
   303  	nodeSelection := m.config.ClientNodeSelection
   304  	switch withVolume {
   305  	case csiEphemeral:
   306  		pod = startPausePodInline(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
   307  	case genericEphemeral:
   308  		class, pod = startPausePodGenericEphemeral(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name)
   309  		if class != nil {
   310  			m.sc[class.Name] = class
   311  		}
   312  		claim = &v1.PersistentVolumeClaim{
   313  			ObjectMeta: metav1.ObjectMeta{
   314  				Name:      pod.Name + "-" + pod.Spec.Volumes[0].Name,
   315  				Namespace: f.Namespace.Name,
   316  			},
   317  		}
   318  	case pvcReference:
   319  		class, claim, pod = startPausePod(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name)
   320  		if class != nil {
   321  			m.sc[class.Name] = class
   322  		}
   323  		if claim != nil {
   324  			m.pvcs = append(m.pvcs, claim)
   325  		}
   326  	}
   327  	if pod != nil {
   328  		m.pods = append(m.pods, pod)
   329  	}
   330  	return // result variables set above
   331  }
   332  
   333  func (m *mockDriverSetup) createPVC(ctx context.Context) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim) {
   334  	ginkgo.By("Creating pvc")
   335  	f := m.f
   336  
   337  	sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
   338  	if m.tp.enableCSINodeExpandSecret {
   339  		if sc.Parameters == nil {
   340  			parameters := map[string]string{
   341  				csiNodeExpandSecretKey:          "test-secret",
   342  				csiNodeExpandSecretNamespaceKey: f.Namespace.Name,
   343  			}
   344  			sc.Parameters = parameters
   345  		} else {
   346  			sc.Parameters[csiNodeExpandSecretKey] = "test-secret"
   347  			sc.Parameters[csiNodeExpandSecretNamespaceKey] = f.Namespace.Name
   348  		}
   349  	}
   350  	scTest := testsuites.StorageClassTest{
   351  		Name:                 m.driver.GetDriverInfo().Name,
   352  		Timeouts:             f.Timeouts,
   353  		Provisioner:          sc.Provisioner,
   354  		Parameters:           sc.Parameters,
   355  		ClaimSize:            "1Gi",
   356  		ExpectedSize:         "1Gi",
   357  		DelayBinding:         m.tp.lateBinding,
   358  		AllowVolumeExpansion: m.tp.enableResizing,
   359  		ReclaimPolicy:        m.tp.reclaimPolicy,
   360  	}
   361  
   362  	// The mock driver only works when everything runs on a single node.
   363  	nodeSelection := m.config.ClientNodeSelection
   364  	class, claim = createClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, nil)
   365  	if class != nil {
   366  		m.sc[class.Name] = class
   367  	}
   368  	if claim != nil {
   369  		m.pvcs = append(m.pvcs, claim)
   370  	}
   371  
   372  	return class, claim
   373  }
   374  
   375  func (m *mockDriverSetup) createPVPVC(ctx context.Context) (class *storagev1.StorageClass, volume *v1.PersistentVolume, claim *v1.PersistentVolumeClaim) {
   376  	ginkgo.By("Creating the PV and PVC manually")
   377  	f := m.f
   378  
   379  	sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
   380  	if m.tp.enableCSINodeExpandSecret {
   381  		if sc.Parameters == nil {
   382  			parameters := map[string]string{
   383  				csiNodeExpandSecretKey:          "test-secret",
   384  				csiNodeExpandSecretNamespaceKey: f.Namespace.Name,
   385  			}
   386  			sc.Parameters = parameters
   387  		} else {
   388  			sc.Parameters[csiNodeExpandSecretKey] = "test-secret"
   389  			sc.Parameters[csiNodeExpandSecretNamespaceKey] = f.Namespace.Name
   390  		}
   391  	}
   392  	scTest := testsuites.StorageClassTest{
   393  		Name:                 m.driver.GetDriverInfo().Name,
   394  		Timeouts:             f.Timeouts,
   395  		Provisioner:          sc.Provisioner,
   396  		Parameters:           sc.Parameters,
   397  		ClaimSize:            "1Gi",
   398  		ExpectedSize:         "1Gi",
   399  		DelayBinding:         m.tp.lateBinding,
   400  		AllowVolumeExpansion: m.tp.enableResizing,
   401  		ReclaimPolicy:        m.tp.reclaimPolicy,
   402  	}
   403  
   404  	// The mock driver only works when everything runs on a single node.
   405  	nodeSelection := m.config.ClientNodeSelection
   406  	class, volume, claim = createVolumeAndClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, nil)
   407  	if class != nil {
   408  		m.sc[class.Name] = class
   409  	}
   410  	if volume != nil {
   411  		m.pvs = append(m.pvs, volume)
   412  	}
   413  	if claim != nil {
   414  		m.pvcs = append(m.pvcs, claim)
   415  	}
   416  	return class, volume, claim
   417  }
   418  
   419  func (m *mockDriverSetup) createPodWithPVC(pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
   420  	f := m.f
   421  
   422  	nodeSelection := m.config.ClientNodeSelection
   423  	pod, err := startPausePodWithClaim(m.cs, pvc, nodeSelection, f.Namespace.Name)
   424  	if pod != nil {
   425  		m.pods = append(m.pods, pod)
   426  	}
   427  	return pod, err
   428  }
   429  
   430  func (m *mockDriverSetup) createPodWithFSGroup(ctx context.Context, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
   431  	f := m.f
   432  
   433  	ginkgo.By("Creating pod with fsGroup")
   434  	nodeSelection := m.config.ClientNodeSelection
   435  	sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
   436  	scTest := testsuites.StorageClassTest{
   437  		Name:                 m.driver.GetDriverInfo().Name,
   438  		Provisioner:          sc.Provisioner,
   439  		Parameters:           sc.Parameters,
   440  		ClaimSize:            "1Gi",
   441  		ExpectedSize:         "1Gi",
   442  		DelayBinding:         m.tp.lateBinding,
   443  		AllowVolumeExpansion: m.tp.enableResizing,
   444  		ReclaimPolicy:        m.tp.reclaimPolicy,
   445  	}
   446  	class, claim, pod := startBusyBoxPod(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup)
   447  
   448  	if class != nil {
   449  		m.sc[class.Name] = class
   450  	}
   451  	if claim != nil {
   452  		m.pvcs = append(m.pvcs, claim)
   453  	}
   454  
   455  	if pod != nil {
   456  		m.pods = append(m.pods, pod)
   457  	}
   458  
   459  	return class, claim, pod
   460  }
   461  
   462  func (m *mockDriverSetup) createPodWithSELinux(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
   463  	ginkgo.By("Creating pod with SELinux context")
   464  	f := m.f
   465  	nodeSelection := m.config.ClientNodeSelection
   466  	sc := m.driver.GetDynamicProvisionStorageClass(ctx, m.config, "")
   467  	scTest := testsuites.StorageClassTest{
   468  		Name:                 m.driver.GetDriverInfo().Name,
   469  		Provisioner:          sc.Provisioner,
   470  		Parameters:           sc.Parameters,
   471  		ClaimSize:            "1Gi",
   472  		ExpectedSize:         "1Gi",
   473  		DelayBinding:         m.tp.lateBinding,
   474  		AllowVolumeExpansion: m.tp.enableResizing,
   475  		MountOptions:         mountOptions,
   476  		ReclaimPolicy:        m.tp.reclaimPolicy,
   477  	}
   478  	class, claim := createClaim(ctx, f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes)
   479  	pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts)
   480  	framework.ExpectNoError(err, "Failed to create pause pod with SELinux context %s: %v", seLinuxOpts, err)
   481  
   482  	if class != nil {
   483  		m.sc[class.Name] = class
   484  	}
   485  	if claim != nil {
   486  		m.pvcs = append(m.pvcs, claim)
   487  	}
   488  
   489  	if pod != nil {
   490  		m.pods = append(m.pods, pod)
   491  	}
   492  
   493  	return class, claim, pod
   494  }
   495  
   496  func waitForCSIDriver(cs clientset.Interface, driverName string) error {
   497  	timeout := 4 * time.Minute
   498  
   499  	framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName)
   500  	for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) {
   501  		_, err := cs.StorageV1().CSIDrivers().Get(context.TODO(), driverName, metav1.GetOptions{})
   502  		if !apierrors.IsNotFound(err) {
   503  			return err
   504  		}
   505  	}
   506  	return fmt.Errorf("gave up after waiting %v for CSIDriver %q", timeout, driverName)
   507  }
   508  
   509  func destroyCSIDriver(cs clientset.Interface, driverName string) {
   510  	driverGet, err := cs.StorageV1().CSIDrivers().Get(context.TODO(), driverName, metav1.GetOptions{})
   511  	if err == nil {
   512  		framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name)
   513  		// Uncomment the following line to get full dump of CSIDriver object
   514  		// framework.Logf("%s", framework.PrettyPrint(driverGet))
   515  		cs.StorageV1().CSIDrivers().Delete(context.TODO(), driverName, metav1.DeleteOptions{})
   516  	}
   517  }
   518  
   519  func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *storagev1.StorageClass {
   520  	pluginName := t.Provisioner
   521  	if pluginName == "" {
   522  		pluginName = getDefaultPluginName()
   523  	}
   524  	if prefix == "" {
   525  		prefix = "sc"
   526  	}
   527  	bindingMode := storagev1.VolumeBindingImmediate
   528  	if t.DelayBinding {
   529  		bindingMode = storagev1.VolumeBindingWaitForFirstConsumer
   530  	}
   531  	if t.Parameters == nil {
   532  		t.Parameters = make(map[string]string)
   533  	}
   534  
   535  	if framework.NodeOSDistroIs("windows") {
   536  		// fstype might be forced from outside, in that case skip setting a default
   537  		if _, exists := t.Parameters["fstype"]; !exists {
   538  			t.Parameters["fstype"] = e2epv.GetDefaultFSType()
   539  			framework.Logf("settings a default fsType=%s in the storage class", t.Parameters["fstype"])
   540  		}
   541  	}
   542  
   543  	sc := getStorageClass(pluginName, t.Parameters, &bindingMode, t.MountOptions, t.ReclaimPolicy, ns, prefix)
   544  	if t.AllowVolumeExpansion {
   545  		sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
   546  	}
   547  	return sc
   548  }
   549  
   550  func getStorageClass(
   551  	provisioner string,
   552  	parameters map[string]string,
   553  	bindingMode *storagev1.VolumeBindingMode,
   554  	mountOptions []string,
   555  	reclaimPolicy *v1.PersistentVolumeReclaimPolicy,
   556  	ns string,
   557  	prefix string,
   558  ) *storagev1.StorageClass {
   559  	if bindingMode == nil {
   560  		defaultBindingMode := storagev1.VolumeBindingImmediate
   561  		bindingMode = &defaultBindingMode
   562  	}
   563  	return &storagev1.StorageClass{
   564  		TypeMeta: metav1.TypeMeta{
   565  			Kind: "StorageClass",
   566  		},
   567  		ObjectMeta: metav1.ObjectMeta{
   568  			// Name must be unique, so let's base it on namespace name and the prefix (the prefix is test specific)
   569  			GenerateName: ns + "-" + prefix,
   570  		},
   571  		Provisioner:       provisioner,
   572  		Parameters:        parameters,
   573  		VolumeBindingMode: bindingMode,
   574  		MountOptions:      mountOptions,
   575  		ReclaimPolicy:     reclaimPolicy,
   576  	}
   577  }
   578  
   579  func getDefaultPluginName() string {
   580  	switch {
   581  	case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
   582  		return "kubernetes.io/gce-pd"
   583  	case framework.ProviderIs("aws"):
   584  		return "kubernetes.io/aws-ebs"
   585  	case framework.ProviderIs("vsphere"):
   586  		return "kubernetes.io/vsphere-volume"
   587  	case framework.ProviderIs("azure"):
   588  		return "kubernetes.io/azure-disk"
   589  	}
   590  	return ""
   591  }
   592  
   593  func createSC(cs clientset.Interface, t testsuites.StorageClassTest, scName, ns string) *storagev1.StorageClass {
   594  	class := newStorageClass(t, ns, "")
   595  	if scName != "" {
   596  		class.Name = scName
   597  	}
   598  	var err error
   599  	_, err = cs.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{})
   600  	if err != nil {
   601  		class, err = cs.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
   602  		framework.ExpectNoError(err, "Failed to create class: %v", err)
   603  	}
   604  
   605  	return class
   606  }
   607  
   608  func createClaim(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, accessModes []v1.PersistentVolumeAccessMode) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) {
   609  	class := createSC(cs, t, scName, ns)
   610  	claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
   611  		ClaimSize:        t.ClaimSize,
   612  		StorageClassName: &(class.Name),
   613  		VolumeMode:       &t.VolumeMode,
   614  		AccessModes:      accessModes,
   615  	}, ns)
   616  	claim, err := cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{})
   617  	framework.ExpectNoError(err, "Failed to create claim: %v", err)
   618  
   619  	if !t.DelayBinding {
   620  		pvcClaims := []*v1.PersistentVolumeClaim{claim}
   621  		_, err = e2epv.WaitForPVClaimBoundPhase(ctx, cs, pvcClaims, framework.ClaimProvisionTimeout)
   622  		framework.ExpectNoError(err, "Failed waiting for PVC to be bound: %v", err)
   623  	}
   624  	return class, claim
   625  }
   626  
   627  func createVolumeAndClaim(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, accessModes []v1.PersistentVolumeAccessMode) (*storagev1.StorageClass, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
   628  	class := createSC(cs, t, scName, ns)
   629  
   630  	volumeMode := v1.PersistentVolumeFilesystem
   631  	if t.VolumeMode != "" {
   632  		volumeMode = t.VolumeMode
   633  	}
   634  
   635  	pvConfig := e2epv.PersistentVolumeConfig{
   636  		Capacity:         t.ClaimSize,
   637  		StorageClassName: class.Name,
   638  		VolumeMode:       &volumeMode,
   639  		AccessModes:      accessModes,
   640  		ReclaimPolicy:    ptr.Deref(class.ReclaimPolicy, v1.PersistentVolumeReclaimDelete),
   641  		PVSource: v1.PersistentVolumeSource{
   642  			CSI: &v1.CSIPersistentVolumeSource{
   643  				Driver:       class.Provisioner,
   644  				VolumeHandle: "test-volume-handle",
   645  			},
   646  		},
   647  	}
   648  
   649  	pvcConfig := e2epv.PersistentVolumeClaimConfig{
   650  		ClaimSize:        t.ClaimSize,
   651  		StorageClassName: &(class.Name),
   652  		VolumeMode:       &volumeMode,
   653  		AccessModes:      accessModes,
   654  	}
   655  
   656  	volume, claim, err := e2epv.CreatePVPVC(ctx, cs, t.Timeouts, pvConfig, pvcConfig, ns, true)
   657  	framework.ExpectNoError(err, "Failed to create PV and PVC")
   658  
   659  	err = e2epv.WaitOnPVandPVC(ctx, cs, t.Timeouts, ns, volume, claim)
   660  	framework.ExpectNoError(err, "Failed waiting for PV and PVC to be bound each other")
   661  
   662  	return class, volume, claim
   663  }
   664  
   665  func startPausePod(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
   666  	class, claim := createClaim(ctx, cs, t, node, scName, ns, nil)
   667  
   668  	pod, err := startPausePodWithClaim(cs, claim, node, ns)
   669  	framework.ExpectNoError(err, "Failed to create pause pod: %v", err)
   670  	return class, claim, pod
   671  }
   672  
   673  func startBusyBoxPod(ctx context.Context, cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
   674  	class, claim := createClaim(ctx, cs, t, node, scName, ns, nil)
   675  	pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup)
   676  	framework.ExpectNoError(err, "Failed to create busybox pod: %v", err)
   677  	return class, claim, pod
   678  }
   679  
   680  func startPausePodInline(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, ns string) *v1.Pod {
   681  	pod, err := startPausePodWithInlineVolume(cs,
   682  		&v1.CSIVolumeSource{
   683  			Driver: t.Provisioner,
   684  		},
   685  		node, ns)
   686  	framework.ExpectNoError(err, "Failed to create pod: %v", err)
   687  	return pod
   688  }
   689  
   690  func startPausePodGenericEphemeral(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.Pod) {
   691  	class := createSC(cs, t, scName, ns)
   692  	claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
   693  		ClaimSize:        t.ClaimSize,
   694  		StorageClassName: &(class.Name),
   695  		VolumeMode:       &t.VolumeMode,
   696  	}, ns)
   697  	pod, err := startPausePodWithVolumeSource(cs, v1.VolumeSource{
   698  		Ephemeral: &v1.EphemeralVolumeSource{
   699  			VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{Spec: claim.Spec}},
   700  	}, node, ns)
   701  	framework.ExpectNoError(err, "Failed to create pod: %v", err)
   702  	return class, pod
   703  }
   704  
   705  func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
   706  	return startPausePodWithVolumeSource(cs,
   707  		v1.VolumeSource{
   708  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   709  				ClaimName: pvc.Name,
   710  				ReadOnly:  false,
   711  			},
   712  		},
   713  		node, ns)
   714  }
   715  
   716  func startBusyBoxPodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) {
   717  	return startBusyBoxPodWithVolumeSource(cs,
   718  		v1.VolumeSource{
   719  			PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   720  				ClaimName: pvc.Name,
   721  				ReadOnly:  false,
   722  			},
   723  		},
   724  		node, ns, fsGroup)
   725  }
   726  
   727  func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
   728  	return startPausePodWithVolumeSource(cs,
   729  		v1.VolumeSource{
   730  			CSI: inlineVolume,
   731  		},
   732  		node, ns)
   733  }
   734  
   735  func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
   736  	pod := &v1.Pod{
   737  		ObjectMeta: metav1.ObjectMeta{
   738  			GenerateName: "pvc-volume-tester-",
   739  		},
   740  		Spec: v1.PodSpec{
   741  			Containers: []v1.Container{
   742  				{
   743  					Name:  "volume-tester",
   744  					Image: imageutils.GetE2EImage(imageutils.Pause),
   745  					VolumeMounts: []v1.VolumeMount{
   746  						{
   747  							Name:      "my-volume",
   748  							MountPath: "/mnt/test",
   749  						},
   750  					},
   751  				},
   752  			},
   753  			RestartPolicy: v1.RestartPolicyNever,
   754  			Volumes: []v1.Volume{
   755  				{
   756  					Name:         "my-volume",
   757  					VolumeSource: volumeSource,
   758  				},
   759  			},
   760  		},
   761  	}
   762  	e2epod.SetNodeSelection(&pod.Spec, node)
   763  	return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
   764  }
   765  
   766  func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) {
   767  	pod := &v1.Pod{
   768  		ObjectMeta: metav1.ObjectMeta{
   769  			GenerateName: "pvc-volume-tester-",
   770  		},
   771  		Spec: v1.PodSpec{
   772  			Containers: []v1.Container{
   773  				{
   774  					Name:  "volume-tester",
   775  					Image: imageutils.GetE2EImage(imageutils.BusyBox),
   776  					VolumeMounts: []v1.VolumeMount{
   777  						{
   778  							Name:      "my-volume",
   779  							MountPath: "/mnt/test",
   780  						},
   781  					},
   782  					Command: e2epod.GenerateScriptCmd("while true ; do sleep 2; done"),
   783  				},
   784  			},
   785  			SecurityContext: &v1.PodSecurityContext{
   786  				FSGroup: fsGroup,
   787  			},
   788  			RestartPolicy: v1.RestartPolicyNever,
   789  			Volumes: []v1.Volume{
   790  				{
   791  					Name:         "my-volume",
   792  					VolumeSource: volumeSource,
   793  				},
   794  			},
   795  		},
   796  	}
   797  	e2epod.SetNodeSelection(&pod.Spec, node)
   798  	return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
   799  }
   800  
   801  func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, seLinuxOpts *v1.SELinuxOptions) (*v1.Pod, error) {
   802  	pod := &v1.Pod{
   803  		ObjectMeta: metav1.ObjectMeta{
   804  			GenerateName: "pvc-volume-tester-",
   805  		},
   806  		Spec: v1.PodSpec{
   807  			SecurityContext: &v1.PodSecurityContext{
   808  				SELinuxOptions: seLinuxOpts,
   809  			},
   810  			Containers: []v1.Container{
   811  				{
   812  					Name:  "volume-tester",
   813  					Image: imageutils.GetE2EImage(imageutils.Pause),
   814  					VolumeMounts: []v1.VolumeMount{
   815  						{
   816  							Name:      "my-volume",
   817  							MountPath: "/mnt/test",
   818  						},
   819  					},
   820  				},
   821  			},
   822  			RestartPolicy: v1.RestartPolicyNever,
   823  			Volumes: []v1.Volume{
   824  				{
   825  					Name: "my-volume",
   826  					VolumeSource: v1.VolumeSource{
   827  						PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   828  							ClaimName: pvc.Name,
   829  							ReadOnly:  false,
   830  						},
   831  					},
   832  				},
   833  			},
   834  		},
   835  	}
   836  	if node.Name != "" {
   837  		// Force schedule the pod to skip scheduler RWOP checks
   838  		framework.Logf("Forcing node name %s", node.Name)
   839  		pod.Spec.NodeName = node.Name
   840  	}
   841  	e2epod.SetNodeSelection(&pod.Spec, node)
   842  	return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
   843  }
   844  
   845  // checkNodePublishVolume goes through all calls to the mock driver and checks that at least one NodePublishVolume call had expected attributes.
   846  // If a matched call is found but it has unexpected attributes, checkNodePublishVolume skips it and continues searching.
   847  func checkNodePublishVolume(ctx context.Context, getCalls func(ctx context.Context) ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool) error {
   848  	expectedAttributes := map[string]string{}
   849  	unexpectedAttributeKeys := sets.New[string]()
   850  	if expectPodInfo {
   851  		expectedAttributes["csi.storage.k8s.io/pod.name"] = pod.Name
   852  		expectedAttributes["csi.storage.k8s.io/pod.namespace"] = pod.Namespace
   853  		expectedAttributes["csi.storage.k8s.io/pod.uid"] = string(pod.UID)
   854  		expectedAttributes["csi.storage.k8s.io/serviceAccount.name"] = "default"
   855  	} else {
   856  		unexpectedAttributeKeys.Insert("csi.storage.k8s.io/pod.name")
   857  		unexpectedAttributeKeys.Insert("csi.storage.k8s.io/pod.namespace")
   858  		unexpectedAttributeKeys.Insert("csi.storage.k8s.io/pod.uid")
   859  		unexpectedAttributeKeys.Insert("csi.storage.k8s.io/serviceAccount.name")
   860  	}
   861  	if csiInlineVolumesEnabled {
   862  		// This is only passed in 1.15 when the CSIInlineVolume feature gate is set.
   863  		expectedAttributes["csi.storage.k8s.io/ephemeral"] = strconv.FormatBool(ephemeralVolume)
   864  	} else {
   865  		unexpectedAttributeKeys.Insert("csi.storage.k8s.io/ephemeral")
   866  	}
   867  
   868  	if csiServiceAccountTokenEnabled {
   869  		expectedAttributes["csi.storage.k8s.io/serviceAccount.tokens"] = "<nonempty>"
   870  	} else {
   871  		unexpectedAttributeKeys.Insert("csi.storage.k8s.io/serviceAccount.tokens")
   872  	}
   873  
   874  	calls, err := getCalls(ctx)
   875  	if err != nil {
   876  		return err
   877  	}
   878  
   879  	var volumeContexts []map[string]string
   880  	for _, call := range calls {
   881  		if call.Method != "NodePublishVolume" {
   882  			continue
   883  		}
   884  
   885  		volumeCtx := call.Request.VolumeContext
   886  
   887  		// Check that NodePublish had expected attributes
   888  		foundAttributes := sets.NewString()
   889  		for k, v := range expectedAttributes {
   890  			vv, found := volumeCtx[k]
   891  			if found && (v == vv || (v == "<nonempty>" && len(vv) != 0)) {
   892  				foundAttributes.Insert(k)
   893  			}
   894  		}
   895  		if foundAttributes.Len() != len(expectedAttributes) {
   896  			framework.Logf("Skipping the NodePublishVolume call: expected attribute %+v, got %+v", format.Object(expectedAttributes, 1), format.Object(volumeCtx, 1))
   897  			continue
   898  		}
   899  
   900  		// Check that NodePublish had no unexpected attributes
   901  		unexpectedAttributes := make(map[string]string)
   902  		for k := range volumeCtx {
   903  			if unexpectedAttributeKeys.Has(k) {
   904  				unexpectedAttributes[k] = volumeCtx[k]
   905  			}
   906  		}
   907  		if len(unexpectedAttributes) != 0 {
   908  			framework.Logf("Skipping the NodePublishVolume call because it contains unexpected attributes %+v", format.Object(unexpectedAttributes, 1))
   909  			continue
   910  		}
   911  
   912  		return nil
   913  	}
   914  
   915  	if len(volumeContexts) == 0 {
   916  		return fmt.Errorf("NodePublishVolume was never called")
   917  	}
   918  
   919  	return fmt.Errorf("NodePublishVolume was called %d times, but no call had expected attributes %s or calls have unwanted attributes key %+v", len(volumeContexts), format.Object(expectedAttributes, 1), unexpectedAttributeKeys.UnsortedList())
   920  }
   921  
   922  // createFSGroupRequestPreHook creates a hook that records the fsGroup passed in
   923  // through NodeStageVolume and NodePublishVolume calls.
   924  func createFSGroupRequestPreHook(nodeStageFsGroup, nodePublishFsGroup *string) *drivers.Hooks {
   925  	return &drivers.Hooks{
   926  		Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
   927  			nodeStageRequest, ok := request.(*csipbv1.NodeStageVolumeRequest)
   928  			if ok {
   929  				mountVolume := nodeStageRequest.GetVolumeCapability().GetMount()
   930  				if mountVolume != nil {
   931  					*nodeStageFsGroup = mountVolume.VolumeMountGroup
   932  				}
   933  			}
   934  			nodePublishRequest, ok := request.(*csipbv1.NodePublishVolumeRequest)
   935  			if ok {
   936  				mountVolume := nodePublishRequest.GetVolumeCapability().GetMount()
   937  				if mountVolume != nil {
   938  					*nodePublishFsGroup = mountVolume.VolumeMountGroup
   939  				}
   940  			}
   941  			return nil, nil
   942  		},
   943  	}
   944  }
   945  
   946  // createPreHook counts invocations of a certain method (identified by a substring in the full gRPC method name).
   947  func createPreHook(method string, callback func(counter int64) error) *drivers.Hooks {
   948  	var counter int64
   949  
   950  	return &drivers.Hooks{
   951  		Pre: func() func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
   952  			return func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
   953  				if strings.Contains(fullMethod, method) {
   954  					counter := atomic.AddInt64(&counter, 1)
   955  					return nil, callback(counter)
   956  				}
   957  				return nil, nil
   958  			}
   959  		}(),
   960  	}
   961  }
   962  
   963  // compareCSICalls compares expectedCalls with logs of the mock driver.
   964  // It returns index of the first expectedCall that was *not* received
   965  // yet or error when calls do not match.
   966  // All repeated calls to the CSI mock driver (e.g. due to exponential backoff)
   967  // are squashed and checked against single expectedCallSequence item.
   968  //
   969  // Only permanent errors are returned. Other errors are logged and no
   970  // calls are returned. The caller is expected to retry.
   971  func compareCSICalls(ctx context.Context, trackedCalls []string, expectedCallSequence []csiCall, getCalls func(ctx context.Context) ([]drivers.MockCSICall, error)) ([]drivers.MockCSICall, int, error) {
   972  	allCalls, err := getCalls(ctx)
   973  	if err != nil {
   974  		framework.Logf("intermittent (?) log retrieval error, proceeding without output: %v", err)
   975  		return nil, 0, nil
   976  	}
   977  
   978  	// Remove all repeated and ignored calls
   979  	tracked := sets.NewString(trackedCalls...)
   980  	var calls []drivers.MockCSICall
   981  	var last drivers.MockCSICall
   982  	for _, c := range allCalls {
   983  		if !tracked.Has(c.Method) {
   984  			continue
   985  		}
   986  		if c.Method != last.Method || c.FullError.Code != last.FullError.Code {
   987  			last = c
   988  			calls = append(calls, c)
   989  		}
   990  		// This call is the same as the last one, ignore it.
   991  	}
   992  
   993  	for i, c := range calls {
   994  		if i >= len(expectedCallSequence) {
   995  			// Log all unexpected calls first, return error below outside the loop.
   996  			framework.Logf("Unexpected CSI driver call: %s (%v)", c.Method, c.FullError)
   997  			continue
   998  		}
   999  
  1000  		// Compare current call with expected call
  1001  		expectedCall := expectedCallSequence[i]
  1002  		if c.Method != expectedCall.expectedMethod || c.FullError.Code != expectedCall.expectedError {
  1003  			return allCalls, i, fmt.Errorf("Unexpected CSI call %d: expected %s (%d), got %s (%d)", i, expectedCall.expectedMethod, expectedCall.expectedError, c.Method, c.FullError.Code)
  1004  		}
  1005  
  1006  		// if the secret is not nil, compare it
  1007  		if expectedCall.expectedSecret != nil {
  1008  			if !reflect.DeepEqual(expectedCall.expectedSecret, c.Request.Secrets) {
  1009  				return allCalls, i, fmt.Errorf("Unexpected secret: expected %v, got %v", expectedCall.expectedSecret, c.Request.Secrets)
  1010  			}
  1011  		}
  1012  
  1013  	}
  1014  	if len(calls) > len(expectedCallSequence) {
  1015  		return allCalls, len(expectedCallSequence), fmt.Errorf("Received %d unexpected CSI driver calls", len(calls)-len(expectedCallSequence))
  1016  	}
  1017  	// All calls were correct
  1018  	return allCalls, len(calls), nil
  1019  
  1020  }
  1021  
  1022  // createSELinuxMountPreHook creates a hook that records the mountOptions passed in
  1023  // through NodeStageVolume and NodePublishVolume calls.
  1024  func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string, stageCalls, unstageCalls, publishCalls, unpublishCalls *atomic.Int32) *drivers.Hooks {
  1025  	return &drivers.Hooks{
  1026  		Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
  1027  			switch req := request.(type) {
  1028  			case *csipbv1.NodeStageVolumeRequest:
  1029  				stageCalls.Add(1)
  1030  				mountVolume := req.GetVolumeCapability().GetMount()
  1031  				if mountVolume != nil {
  1032  					*nodeStageMountOpts = mountVolume.MountFlags
  1033  				}
  1034  			case *csipbv1.NodePublishVolumeRequest:
  1035  				publishCalls.Add(1)
  1036  				mountVolume := req.GetVolumeCapability().GetMount()
  1037  				if mountVolume != nil {
  1038  					*nodePublishMountOpts = mountVolume.MountFlags
  1039  				}
  1040  			case *csipbv1.NodeUnstageVolumeRequest:
  1041  				unstageCalls.Add(1)
  1042  			case *csipbv1.NodeUnpublishVolumeRequest:
  1043  				unpublishCalls.Add(1)
  1044  			}
  1045  			return nil, nil
  1046  		},
  1047  	}
  1048  }
  1049  
  1050  // A lot of this code was copied from e2e/framework. It would be nicer
  1051  // if it could be reused - see https://github.com/kubernetes/kubernetes/issues/92754
  1052  func podRunning(ctx context.Context, c clientset.Interface, podName, namespace string) wait.ConditionFunc {
  1053  	return func() (bool, error) {
  1054  		pod, err := c.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
  1055  		if err != nil {
  1056  			return false, err
  1057  		}
  1058  		switch pod.Status.Phase {
  1059  		case v1.PodRunning:
  1060  			return true, nil
  1061  		case v1.PodFailed, v1.PodSucceeded:
  1062  			return false, errPodCompleted
  1063  		}
  1064  		return false, nil
  1065  	}
  1066  }
  1067  
  1068  func podHasStorage(ctx context.Context, c clientset.Interface, podName, namespace string, when time.Time) wait.ConditionFunc {
  1069  	// Check for events of this pod. Copied from test/e2e/common/container_probe.go.
  1070  	expectedEvent := fields.Set{
  1071  		"involvedObject.kind":      "Pod",
  1072  		"involvedObject.name":      podName,
  1073  		"involvedObject.namespace": namespace,
  1074  		"reason":                   "FailedScheduling",
  1075  	}.AsSelector().String()
  1076  	options := metav1.ListOptions{
  1077  		FieldSelector: expectedEvent,
  1078  	}
  1079  	// copied from test/e2e/framework/events/events.go
  1080  	return func() (bool, error) {
  1081  		// We cannot be sure here whether it has enough storage, only when
  1082  		// it hasn't. In that case we abort waiting with a special error.
  1083  		events, err := c.CoreV1().Events(namespace).List(ctx, options)
  1084  		if err != nil {
  1085  			return false, fmt.Errorf("got error while getting events: %w", err)
  1086  		}
  1087  		for _, event := range events.Items {
  1088  			if /* event.CreationTimestamp.After(when) &&
  1089  			 */strings.Contains(event.Message, errReasonNotEnoughSpace) {
  1090  				return false, errNotEnoughSpace
  1091  			}
  1092  		}
  1093  		return false, nil
  1094  	}
  1095  }
  1096  
  1097  func anyOf(conditions ...wait.ConditionFunc) wait.ConditionFunc {
  1098  	return func() (bool, error) {
  1099  		for _, condition := range conditions {
  1100  			done, err := condition()
  1101  			if err != nil {
  1102  				return false, err
  1103  			}
  1104  			if done {
  1105  				return true, nil
  1106  			}
  1107  		}
  1108  		return false, nil
  1109  	}
  1110  }
  1111  
  1112  func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
  1113  	waitErr := wait.PollImmediate(10*time.Second, csiPodUnschedulableTimeout, func() (bool, error) {
  1114  		pod, err := cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
  1115  		if err != nil {
  1116  			return false, err
  1117  		}
  1118  		for _, c := range pod.Status.Conditions {
  1119  			// Conformance tests cannot rely on specific output of optional fields (e.g., Reason
  1120  			// and Message) because these fields are not suject to the deprecation policy.
  1121  			if c.Type == v1.PodScheduled && c.Status == v1.ConditionFalse && c.Reason != "" && c.Message != "" {
  1122  				return true, nil
  1123  			}
  1124  		}
  1125  		return false, nil
  1126  	})
  1127  	if waitErr != nil {
  1128  		return fmt.Errorf("error waiting for pod %s/%s to have max volume condition: %v", pod.Namespace, pod.Name, waitErr)
  1129  	}
  1130  	return nil
  1131  }