k8s.io/kubernetes@v1.29.3/test/e2e/storage/drivers/in_tree.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  /*
    18   * This file defines various in-tree volume test drivers for TestSuites.
    19   *
    20   * There are two ways, how to prepare test drivers:
    21   * 1) With containerized server (NFS, Ceph, iSCSI, ...)
    22   * It creates a server pod which defines one volume for the tests.
    23   * These tests work only when privileged containers are allowed, exporting
    24   * various filesystems (like NFS) usually needs some mounting or
    25   * other privileged magic in the server pod.
    26   *
    27   * Note that the server containers are for testing purposes only and should not
    28   * be used in production.
    29   *
    30   * 2) With server or cloud provider outside of Kubernetes (Cinder, GCE, AWS, Azure, ...)
    31   * Appropriate server or cloud provider must exist somewhere outside
    32   * the tested Kubernetes cluster. CreateVolume will create a new volume to be
    33   * used in the TestSuites for inlineVolume or DynamicPV tests.
    34   */
    35  
    36  package drivers
    37  
    38  import (
    39  	"context"
    40  	"fmt"
    41  	"strconv"
    42  	"strings"
    43  	"time"
    44  
    45  	"github.com/onsi/ginkgo/v2"
    46  	v1 "k8s.io/api/core/v1"
    47  	rbacv1 "k8s.io/api/rbac/v1"
    48  	storagev1 "k8s.io/api/storage/v1"
    49  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    50  	"k8s.io/apimachinery/pkg/runtime/schema"
    51  	"k8s.io/apimachinery/pkg/util/sets"
    52  	"k8s.io/apiserver/pkg/authentication/serviceaccount"
    53  	clientset "k8s.io/client-go/kubernetes"
    54  	"k8s.io/kubernetes/test/e2e/feature"
    55  	"k8s.io/kubernetes/test/e2e/framework"
    56  	e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
    57  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    58  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    59  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    60  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    61  	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
    62  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    63  	"k8s.io/kubernetes/test/e2e/storage/utils"
    64  	vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
    65  	imageutils "k8s.io/kubernetes/test/utils/image"
    66  )
    67  
    68  const (
    69  	// Template for iSCSI IQN.
    70  	iSCSIIQNTemplate = "iqn.2003-01.io.k8s:e2e.%s"
    71  )
    72  
    73  // NFS
    74  type nfsDriver struct {
    75  	externalProvisionerPod *v1.Pod
    76  	externalPluginName     string
    77  
    78  	driverInfo storageframework.DriverInfo
    79  }
    80  
    81  type nfsVolume struct {
    82  	serverHost string
    83  	serverPod  *v1.Pod
    84  	f          *framework.Framework
    85  }
    86  
    87  var _ storageframework.TestDriver = &nfsDriver{}
    88  var _ storageframework.PreprovisionedVolumeTestDriver = &nfsDriver{}
    89  var _ storageframework.InlineVolumeTestDriver = &nfsDriver{}
    90  var _ storageframework.PreprovisionedPVTestDriver = &nfsDriver{}
    91  var _ storageframework.DynamicPVTestDriver = &nfsDriver{}
    92  
    93  // InitNFSDriver returns nfsDriver that implements TestDriver interface
    94  func InitNFSDriver() storageframework.TestDriver {
    95  	return &nfsDriver{
    96  		driverInfo: storageframework.DriverInfo{
    97  			Name:             "nfs",
    98  			InTreePluginName: "kubernetes.io/nfs",
    99  			MaxFileSize:      storageframework.FileSizeLarge,
   100  			SupportedSizeRange: e2evolume.SizeRange{
   101  				Min: "1Gi",
   102  			},
   103  			SupportedFsType: sets.NewString(
   104  				"", // Default fsType
   105  			),
   106  			SupportedMountOption: sets.NewString("relatime"),
   107  			RequiredMountOption:  sets.NewString("vers=4.1"),
   108  			Capabilities: map[storageframework.Capability]bool{
   109  				storageframework.CapPersistence:       true,
   110  				storageframework.CapExec:              true,
   111  				storageframework.CapRWX:               true,
   112  				storageframework.CapMultiPODs:         true,
   113  				storageframework.CapMultiplePVsSameID: true,
   114  			},
   115  		},
   116  	}
   117  }
   118  
   119  func (n *nfsDriver) GetDriverInfo() *storageframework.DriverInfo {
   120  	return &n.driverInfo
   121  }
   122  
   123  func (n *nfsDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
   124  }
   125  
   126  func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
   127  	nv, ok := e2evolume.(*nfsVolume)
   128  	if !ok {
   129  		framework.Failf("Failed to cast test volume of type %T to the NFS test volume", e2evolume)
   130  	}
   131  	return &v1.VolumeSource{
   132  		NFS: &v1.NFSVolumeSource{
   133  			Server:   nv.serverHost,
   134  			Path:     "/",
   135  			ReadOnly: readOnly,
   136  		},
   137  	}
   138  }
   139  
   140  func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
   141  	nv, ok := e2evolume.(*nfsVolume)
   142  	if !ok {
   143  		framework.Failf("Failed to cast test volume of type %T to the NFS test volume", e2evolume)
   144  	}
   145  	return &v1.PersistentVolumeSource{
   146  		NFS: &v1.NFSVolumeSource{
   147  			Server:   nv.serverHost,
   148  			Path:     "/",
   149  			ReadOnly: readOnly,
   150  		},
   151  	}, nil
   152  }
   153  
   154  func (n *nfsDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
   155  	provisioner := n.externalPluginName
   156  	parameters := map[string]string{"mountOptions": "vers=4.1"}
   157  	ns := config.Framework.Namespace.Name
   158  
   159  	return storageframework.GetStorageClass(provisioner, parameters, nil, ns)
   160  }
   161  
   162  func (n *nfsDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
   163  	cs := f.ClientSet
   164  	ns := f.Namespace
   165  	n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name)
   166  
   167  	// TODO(mkimuram): cluster-admin gives too much right but system:persistent-volume-provisioner
   168  	// is not enough. We should create new clusterrole for testing.
   169  	err := e2eauth.BindClusterRole(ctx, cs.RbacV1(), "cluster-admin", ns.Name,
   170  		rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"})
   171  	framework.ExpectNoError(err)
   172  	ginkgo.DeferCleanup(cs.RbacV1().ClusterRoleBindings().Delete, ns.Name+"--"+"cluster-admin", *metav1.NewDeleteOptions(0))
   173  
   174  	err = e2eauth.WaitForAuthorizationUpdate(ctx, cs.AuthorizationV1(),
   175  		serviceaccount.MakeUsername(ns.Name, "default"),
   176  		"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
   177  	framework.ExpectNoError(err, "Failed to update authorization: %v", err)
   178  
   179  	ginkgo.By("creating an external dynamic provisioner pod")
   180  	n.externalProvisionerPod = utils.StartExternalProvisioner(ctx, cs, ns.Name, n.externalPluginName)
   181  	ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, n.externalProvisionerPod)
   182  
   183  	return &storageframework.PerTestConfig{
   184  		Driver:    n,
   185  		Prefix:    "nfs",
   186  		Framework: f,
   187  	}
   188  }
   189  
   190  func (n *nfsDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
   191  	f := config.Framework
   192  	cs := f.ClientSet
   193  	ns := f.Namespace
   194  
   195  	// NewNFSServer creates a pod for InlineVolume and PreprovisionedPV,
   196  	// and startExternalProvisioner creates a pod for DynamicPV.
   197  	// Therefore, we need a different PrepareTest logic for volType.
   198  	switch volType {
   199  	case storageframework.InlineVolume:
   200  		fallthrough
   201  	case storageframework.PreprovisionedPV:
   202  		c, serverPod, serverHost := e2evolume.NewNFSServer(ctx, cs, ns.Name, []string{})
   203  		config.ServerConfig = &c
   204  		return &nfsVolume{
   205  			serverHost: serverHost,
   206  			serverPod:  serverPod,
   207  			f:          f,
   208  		}
   209  	case storageframework.DynamicPV:
   210  		// Do nothing
   211  	default:
   212  		framework.Failf("Unsupported volType:%v is specified", volType)
   213  	}
   214  	return nil
   215  }
   216  
   217  func (v *nfsVolume) DeleteVolume(ctx context.Context) {
   218  	cleanUpVolumeServer(ctx, v.f, v.serverPod)
   219  }
   220  
   221  // iSCSI
   222  // The iscsiadm utility and iscsi target kernel modules must be installed on all nodes.
   223  type iSCSIDriver struct {
   224  	driverInfo storageframework.DriverInfo
   225  }
   226  type iSCSIVolume struct {
   227  	serverPod *v1.Pod
   228  	serverIP  string
   229  	f         *framework.Framework
   230  	iqn       string
   231  }
   232  
   233  var _ storageframework.TestDriver = &iSCSIDriver{}
   234  var _ storageframework.PreprovisionedVolumeTestDriver = &iSCSIDriver{}
   235  var _ storageframework.InlineVolumeTestDriver = &iSCSIDriver{}
   236  var _ storageframework.PreprovisionedPVTestDriver = &iSCSIDriver{}
   237  
   238  // InitISCSIDriver returns iSCSIDriver that implements TestDriver interface
   239  func InitISCSIDriver() storageframework.TestDriver {
   240  	return &iSCSIDriver{
   241  		driverInfo: storageframework.DriverInfo{
   242  			Name:             "iscsi",
   243  			InTreePluginName: "kubernetes.io/iscsi",
   244  			TestTags:         []interface{}{feature.Volumes},
   245  			MaxFileSize:      storageframework.FileSizeMedium,
   246  			SupportedFsType: sets.NewString(
   247  				"", // Default fsType
   248  				"ext4",
   249  			),
   250  			TopologyKeys: []string{v1.LabelHostname},
   251  			Capabilities: map[storageframework.Capability]bool{
   252  				storageframework.CapPersistence:       true,
   253  				storageframework.CapFsGroup:           true,
   254  				storageframework.CapBlock:             true,
   255  				storageframework.CapExec:              true,
   256  				storageframework.CapMultiPODs:         true,
   257  				storageframework.CapTopology:          true,
   258  				storageframework.CapMultiplePVsSameID: true,
   259  			},
   260  		},
   261  	}
   262  }
   263  
   264  func (i *iSCSIDriver) GetDriverInfo() *storageframework.DriverInfo {
   265  	return &i.driverInfo
   266  }
   267  
   268  func (i *iSCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
   269  }
   270  
   271  func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
   272  	iv, ok := e2evolume.(*iSCSIVolume)
   273  	if !ok {
   274  		framework.Failf("failed to cast test volume of type %T to the iSCSI test volume", e2evolume)
   275  	}
   276  
   277  	volSource := v1.VolumeSource{
   278  		ISCSI: &v1.ISCSIVolumeSource{
   279  			TargetPortal: "127.0.0.1:3260",
   280  			IQN:          iv.iqn,
   281  			Lun:          0,
   282  			ReadOnly:     readOnly,
   283  		},
   284  	}
   285  	if fsType != "" {
   286  		volSource.ISCSI.FSType = fsType
   287  	}
   288  	return &volSource
   289  }
   290  
   291  func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
   292  	iv, ok := e2evolume.(*iSCSIVolume)
   293  	if !ok {
   294  		framework.Failf("failed to cast test volume of type %T to the iSCSI test volume", e2evolume)
   295  	}
   296  
   297  	pvSource := v1.PersistentVolumeSource{
   298  		ISCSI: &v1.ISCSIPersistentVolumeSource{
   299  			TargetPortal: "127.0.0.1:3260",
   300  			IQN:          iv.iqn,
   301  			Lun:          0,
   302  			ReadOnly:     readOnly,
   303  		},
   304  	}
   305  	if fsType != "" {
   306  		pvSource.ISCSI.FSType = fsType
   307  	}
   308  	return &pvSource, nil
   309  }
   310  
   311  func (i *iSCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
   312  	return &storageframework.PerTestConfig{
   313  		Driver:    i,
   314  		Prefix:    "iscsi",
   315  		Framework: f,
   316  	}
   317  }
   318  
   319  func (i *iSCSIDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
   320  	f := config.Framework
   321  	cs := f.ClientSet
   322  	ns := f.Namespace
   323  
   324  	c, serverPod, serverIP, iqn := newISCSIServer(ctx, cs, ns.Name)
   325  	config.ServerConfig = &c
   326  	config.ClientNodeSelection = c.ClientNodeSelection
   327  	return &iSCSIVolume{
   328  		serverPod: serverPod,
   329  		serverIP:  serverIP,
   330  		iqn:       iqn,
   331  		f:         f,
   332  	}
   333  }
   334  
   335  // newISCSIServer is an iSCSI-specific wrapper for CreateStorageServer.
   336  func newISCSIServer(ctx context.Context, cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, ip, iqn string) {
   337  	// Generate cluster-wide unique IQN
   338  	iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace)
   339  	config = e2evolume.TestConfig{
   340  		Namespace:   namespace,
   341  		Prefix:      "iscsi",
   342  		ServerImage: imageutils.GetE2EImage(imageutils.VolumeISCSIServer),
   343  		ServerArgs:  []string{iqn},
   344  		ServerVolumes: map[string]string{
   345  			// iSCSI container needs to insert modules from the host
   346  			"/lib/modules": "/lib/modules",
   347  			// iSCSI container needs to configure kernel
   348  			"/sys/kernel": "/sys/kernel",
   349  			// iSCSI source "block devices" must be available on the host
   350  			"/srv/iscsi": "/srv/iscsi",
   351  			// targetcli uses dbus
   352  			"/run/dbus": "/run/dbus",
   353  		},
   354  		ServerReadyMessage: "iscsi target started",
   355  		ServerHostNetwork:  true,
   356  	}
   357  	pod, ip = e2evolume.CreateStorageServer(ctx, cs, config)
   358  	// Make sure the client runs on the same node as server so we don't need to open any firewalls.
   359  	config.ClientNodeSelection = e2epod.NodeSelection{Name: pod.Spec.NodeName}
   360  	return config, pod, ip, iqn
   361  }
   362  
   363  // newRBDServer is a CephRBD-specific wrapper for CreateStorageServer.
   364  func newRBDServer(ctx context.Context, cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
   365  	config = e2evolume.TestConfig{
   366  		Namespace:   namespace,
   367  		Prefix:      "rbd",
   368  		ServerImage: imageutils.GetE2EImage(imageutils.VolumeRBDServer),
   369  		ServerPorts: []int{6789},
   370  		ServerVolumes: map[string]string{
   371  			"/lib/modules": "/lib/modules",
   372  		},
   373  		ServerReadyMessage: "Ceph is ready",
   374  	}
   375  	pod, ip = e2evolume.CreateStorageServer(ctx, cs, config)
   376  	// create secrets for the server
   377  	secret = &v1.Secret{
   378  		TypeMeta: metav1.TypeMeta{
   379  			Kind:       "Secret",
   380  			APIVersion: "v1",
   381  		},
   382  		ObjectMeta: metav1.ObjectMeta{
   383  			Name: config.Prefix + "-secret",
   384  		},
   385  		Data: map[string][]byte{
   386  			// from test/images/volumes-tester/rbd/keyring
   387  			"key": []byte("AQDRrKNVbEevChAAEmRC+pW/KBVHxa0w/POILA=="),
   388  		},
   389  		Type: "kubernetes.io/rbd",
   390  	}
   391  
   392  	secret, err := cs.CoreV1().Secrets(config.Namespace).Create(ctx, secret, metav1.CreateOptions{})
   393  	if err != nil {
   394  		framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
   395  	}
   396  
   397  	return config, pod, secret, ip
   398  }
   399  
   400  func (v *iSCSIVolume) DeleteVolume(ctx context.Context) {
   401  	cleanUpVolumeServer(ctx, v.f, v.serverPod)
   402  }
   403  
   404  // Ceph RBD
   405  type rbdDriver struct {
   406  	driverInfo storageframework.DriverInfo
   407  }
   408  
   409  type rbdVolume struct {
   410  	serverPod *v1.Pod
   411  	serverIP  string
   412  	secret    *v1.Secret
   413  	f         *framework.Framework
   414  }
   415  
   416  var _ storageframework.TestDriver = &rbdDriver{}
   417  var _ storageframework.PreprovisionedVolumeTestDriver = &rbdDriver{}
   418  var _ storageframework.InlineVolumeTestDriver = &rbdDriver{}
   419  var _ storageframework.PreprovisionedPVTestDriver = &rbdDriver{}
   420  
   421  // InitRbdDriver returns rbdDriver that implements TestDriver interface
   422  func InitRbdDriver() storageframework.TestDriver {
   423  	return &rbdDriver{
   424  		driverInfo: storageframework.DriverInfo{
   425  			Name:             "rbd",
   426  			InTreePluginName: "kubernetes.io/rbd",
   427  			TestTags:         []interface{}{feature.Volumes, framework.WithSerial()},
   428  			MaxFileSize:      storageframework.FileSizeMedium,
   429  			SupportedSizeRange: e2evolume.SizeRange{
   430  				Min: "1Gi",
   431  			},
   432  			SupportedFsType: sets.NewString(
   433  				"", // Default fsType
   434  				"ext4",
   435  			),
   436  			Capabilities: map[storageframework.Capability]bool{
   437  				storageframework.CapPersistence:       true,
   438  				storageframework.CapFsGroup:           true,
   439  				storageframework.CapBlock:             true,
   440  				storageframework.CapExec:              true,
   441  				storageframework.CapMultiPODs:         true,
   442  				storageframework.CapMultiplePVsSameID: true,
   443  			},
   444  		},
   445  	}
   446  }
   447  
   448  func (r *rbdDriver) GetDriverInfo() *storageframework.DriverInfo {
   449  	return &r.driverInfo
   450  }
   451  
   452  func (r *rbdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
   453  }
   454  
   455  func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
   456  	rv, ok := e2evolume.(*rbdVolume)
   457  	if !ok {
   458  		framework.Failf("failed to cast test volume of type %T to the RBD test volume", e2evolume)
   459  	}
   460  
   461  	volSource := v1.VolumeSource{
   462  		RBD: &v1.RBDVolumeSource{
   463  			CephMonitors: []string{rv.serverIP},
   464  			RBDPool:      "rbd",
   465  			RBDImage:     "foo",
   466  			RadosUser:    "admin",
   467  			SecretRef: &v1.LocalObjectReference{
   468  				Name: rv.secret.Name,
   469  			},
   470  			ReadOnly: readOnly,
   471  		},
   472  	}
   473  	if fsType != "" {
   474  		volSource.RBD.FSType = fsType
   475  	}
   476  	return &volSource
   477  }
   478  
   479  func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
   480  	rv, ok := e2evolume.(*rbdVolume)
   481  	if !ok {
   482  		framework.Failf("failed to cast test volume of type %T to the RBD test volume", e2evolume)
   483  	}
   484  
   485  	f := rv.f
   486  	ns := f.Namespace
   487  
   488  	pvSource := v1.PersistentVolumeSource{
   489  		RBD: &v1.RBDPersistentVolumeSource{
   490  			CephMonitors: []string{rv.serverIP},
   491  			RBDPool:      "rbd",
   492  			RBDImage:     "foo",
   493  			RadosUser:    "admin",
   494  			SecretRef: &v1.SecretReference{
   495  				Name:      rv.secret.Name,
   496  				Namespace: ns.Name,
   497  			},
   498  			ReadOnly: readOnly,
   499  		},
   500  	}
   501  	if fsType != "" {
   502  		pvSource.RBD.FSType = fsType
   503  	}
   504  	return &pvSource, nil
   505  }
   506  
   507  func (r *rbdDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
   508  	return &storageframework.PerTestConfig{
   509  		Driver:    r,
   510  		Prefix:    "rbd",
   511  		Framework: f,
   512  	}
   513  }
   514  
   515  func (r *rbdDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
   516  	f := config.Framework
   517  	cs := f.ClientSet
   518  	ns := f.Namespace
   519  
   520  	c, serverPod, secret, serverIP := newRBDServer(ctx, cs, ns.Name)
   521  	config.ServerConfig = &c
   522  	return &rbdVolume{
   523  		serverPod: serverPod,
   524  		serverIP:  serverIP,
   525  		secret:    secret,
   526  		f:         f,
   527  	}
   528  }
   529  
   530  func (v *rbdVolume) DeleteVolume(ctx context.Context) {
   531  	cleanUpVolumeServerWithSecret(ctx, v.f, v.serverPod, v.secret)
   532  }
   533  
   534  // Ceph
   535  type cephFSDriver struct {
   536  	driverInfo storageframework.DriverInfo
   537  }
   538  
   539  type cephVolume struct {
   540  	serverPod *v1.Pod
   541  	serverIP  string
   542  	secret    *v1.Secret
   543  	f         *framework.Framework
   544  }
   545  
   546  var _ storageframework.TestDriver = &cephFSDriver{}
   547  var _ storageframework.PreprovisionedVolumeTestDriver = &cephFSDriver{}
   548  var _ storageframework.InlineVolumeTestDriver = &cephFSDriver{}
   549  var _ storageframework.PreprovisionedPVTestDriver = &cephFSDriver{}
   550  
   551  // InitCephFSDriver returns cephFSDriver that implements TestDriver interface
   552  func InitCephFSDriver() storageframework.TestDriver {
   553  	return &cephFSDriver{
   554  		driverInfo: storageframework.DriverInfo{
   555  			Name:             "ceph",
   556  			InTreePluginName: "kubernetes.io/cephfs",
   557  			TestTags:         []interface{}{feature.Volumes, framework.WithSerial()},
   558  			MaxFileSize:      storageframework.FileSizeMedium,
   559  			SupportedSizeRange: e2evolume.SizeRange{
   560  				Min: "1Gi",
   561  			},
   562  			SupportedFsType: sets.NewString(
   563  				"", // Default fsType
   564  			),
   565  			Capabilities: map[storageframework.Capability]bool{
   566  				storageframework.CapPersistence:       true,
   567  				storageframework.CapExec:              true,
   568  				storageframework.CapRWX:               true,
   569  				storageframework.CapMultiPODs:         true,
   570  				storageframework.CapMultiplePVsSameID: true,
   571  			},
   572  		},
   573  	}
   574  }
   575  
   576  func (c *cephFSDriver) GetDriverInfo() *storageframework.DriverInfo {
   577  	return &c.driverInfo
   578  }
   579  
   580  func (c *cephFSDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
   581  }
   582  
   583  func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
   584  	cv, ok := e2evolume.(*cephVolume)
   585  	if !ok {
   586  		framework.Failf("Failed to cast test volume of type %T to the Ceph test volume", e2evolume)
   587  	}
   588  
   589  	return &v1.VolumeSource{
   590  		CephFS: &v1.CephFSVolumeSource{
   591  			Monitors: []string{cv.serverIP + ":6789"},
   592  			User:     "kube",
   593  			SecretRef: &v1.LocalObjectReference{
   594  				Name: cv.secret.Name,
   595  			},
   596  			ReadOnly: readOnly,
   597  		},
   598  	}
   599  }
   600  
   601  func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
   602  	cv, ok := e2evolume.(*cephVolume)
   603  	if !ok {
   604  		framework.Failf("Failed to cast test volume of type %T to the Ceph test volume", e2evolume)
   605  	}
   606  
   607  	ns := cv.f.Namespace
   608  
   609  	return &v1.PersistentVolumeSource{
   610  		CephFS: &v1.CephFSPersistentVolumeSource{
   611  			Monitors: []string{cv.serverIP + ":6789"},
   612  			User:     "kube",
   613  			SecretRef: &v1.SecretReference{
   614  				Name:      cv.secret.Name,
   615  				Namespace: ns.Name,
   616  			},
   617  			ReadOnly: readOnly,
   618  		},
   619  	}, nil
   620  }
   621  
   622  func (c *cephFSDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
   623  	return &storageframework.PerTestConfig{
   624  		Driver:    c,
   625  		Prefix:    "cephfs",
   626  		Framework: f,
   627  	}
   628  }
   629  
   630  func (c *cephFSDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
   631  	f := config.Framework
   632  	cs := f.ClientSet
   633  	ns := f.Namespace
   634  
   635  	cfg, serverPod, secret, serverIP := newRBDServer(ctx, cs, ns.Name)
   636  	config.ServerConfig = &cfg
   637  	return &cephVolume{
   638  		serverPod: serverPod,
   639  		serverIP:  serverIP,
   640  		secret:    secret,
   641  		f:         f,
   642  	}
   643  }
   644  
   645  func (v *cephVolume) DeleteVolume(ctx context.Context) {
   646  	cleanUpVolumeServerWithSecret(ctx, v.f, v.serverPod, v.secret)
   647  }
   648  
   649  // Hostpath
   650  type hostPathDriver struct {
   651  	driverInfo storageframework.DriverInfo
   652  }
   653  
   654  var _ storageframework.TestDriver = &hostPathDriver{}
   655  var _ storageframework.PreprovisionedVolumeTestDriver = &hostPathDriver{}
   656  var _ storageframework.InlineVolumeTestDriver = &hostPathDriver{}
   657  
   658  // InitHostPathDriver returns hostPathDriver that implements TestDriver interface
   659  func InitHostPathDriver() storageframework.TestDriver {
   660  	return &hostPathDriver{
   661  		driverInfo: storageframework.DriverInfo{
   662  			Name:             "hostPath",
   663  			InTreePluginName: "kubernetes.io/host-path",
   664  			MaxFileSize:      storageframework.FileSizeMedium,
   665  			SupportedFsType: sets.NewString(
   666  				"", // Default fsType
   667  			),
   668  			TopologyKeys: []string{v1.LabelHostname},
   669  			Capabilities: map[storageframework.Capability]bool{
   670  				storageframework.CapPersistence:       true,
   671  				storageframework.CapMultiPODs:         true,
   672  				storageframework.CapSingleNodeVolume:  true,
   673  				storageframework.CapTopology:          true,
   674  				storageframework.CapMultiplePVsSameID: true,
   675  			},
   676  		},
   677  	}
   678  }
   679  
   680  func (h *hostPathDriver) GetDriverInfo() *storageframework.DriverInfo {
   681  	return &h.driverInfo
   682  }
   683  
   684  func (h *hostPathDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
   685  }
   686  
   687  func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
   688  	// hostPath doesn't support readOnly volume
   689  	if readOnly {
   690  		return nil
   691  	}
   692  	return &v1.VolumeSource{
   693  		HostPath: &v1.HostPathVolumeSource{
   694  			Path: "/tmp",
   695  		},
   696  	}
   697  }
   698  
   699  func (h *hostPathDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
   700  	return &storageframework.PerTestConfig{
   701  		Driver:    h,
   702  		Prefix:    "hostpath",
   703  		Framework: f,
   704  	}
   705  }
   706  
   707  func (h *hostPathDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
   708  	f := config.Framework
   709  	cs := f.ClientSet
   710  
   711  	// pods should be scheduled on the node
   712  	node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs)
   713  	framework.ExpectNoError(err)
   714  	config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name}
   715  	return nil
   716  }
   717  
   718  // HostPathSymlink
   719  type hostPathSymlinkDriver struct {
   720  	driverInfo storageframework.DriverInfo
   721  }
   722  
   723  type hostPathSymlinkVolume struct {
   724  	targetPath string
   725  	sourcePath string
   726  	prepPod    *v1.Pod
   727  	f          *framework.Framework
   728  }
   729  
   730  var _ storageframework.TestDriver = &hostPathSymlinkDriver{}
   731  var _ storageframework.PreprovisionedVolumeTestDriver = &hostPathSymlinkDriver{}
   732  var _ storageframework.InlineVolumeTestDriver = &hostPathSymlinkDriver{}
   733  
   734  // InitHostPathSymlinkDriver returns hostPathSymlinkDriver that implements TestDriver interface
   735  func InitHostPathSymlinkDriver() storageframework.TestDriver {
   736  	return &hostPathSymlinkDriver{
   737  		driverInfo: storageframework.DriverInfo{
   738  			Name:             "hostPathSymlink",
   739  			InTreePluginName: "kubernetes.io/host-path",
   740  			MaxFileSize:      storageframework.FileSizeMedium,
   741  			SupportedFsType: sets.NewString(
   742  				"", // Default fsType
   743  			),
   744  			TopologyKeys: []string{v1.LabelHostname},
   745  			Capabilities: map[storageframework.Capability]bool{
   746  				storageframework.CapPersistence:       true,
   747  				storageframework.CapMultiPODs:         true,
   748  				storageframework.CapSingleNodeVolume:  true,
   749  				storageframework.CapTopology:          true,
   750  				storageframework.CapMultiplePVsSameID: true,
   751  			},
   752  		},
   753  	}
   754  }
   755  
   756  func (h *hostPathSymlinkDriver) GetDriverInfo() *storageframework.DriverInfo {
   757  	return &h.driverInfo
   758  }
   759  
   760  func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
   761  }
   762  
   763  func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
   764  	hv, ok := e2evolume.(*hostPathSymlinkVolume)
   765  	if !ok {
   766  		framework.Failf("Failed to cast test volume of type %T to the Hostpath Symlink test volume", e2evolume)
   767  	}
   768  
   769  	// hostPathSymlink doesn't support readOnly volume
   770  	if readOnly {
   771  		return nil
   772  	}
   773  	return &v1.VolumeSource{
   774  		HostPath: &v1.HostPathVolumeSource{
   775  			Path: hv.targetPath,
   776  		},
   777  	}
   778  }
   779  
   780  func (h *hostPathSymlinkDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
   781  	return &storageframework.PerTestConfig{
   782  		Driver:    h,
   783  		Prefix:    "hostpathsymlink",
   784  		Framework: f,
   785  	}
   786  }
   787  
   788  func (h *hostPathSymlinkDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
   789  	f := config.Framework
   790  	cs := f.ClientSet
   791  
   792  	sourcePath := fmt.Sprintf("/tmp/%v", f.Namespace.Name)
   793  	targetPath := fmt.Sprintf("/tmp/%v-link", f.Namespace.Name)
   794  	volumeName := "test-volume"
   795  
   796  	// pods should be scheduled on the node
   797  	node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs)
   798  	framework.ExpectNoError(err)
   799  	config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name}
   800  
   801  	cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath)
   802  	privileged := true
   803  
   804  	// Launch pod to initialize hostPath directory and symlink
   805  	prepPod := &v1.Pod{
   806  		ObjectMeta: metav1.ObjectMeta{
   807  			Name: fmt.Sprintf("hostpath-symlink-prep-%s", f.Namespace.Name),
   808  		},
   809  		Spec: v1.PodSpec{
   810  			Containers: []v1.Container{
   811  				{
   812  					Name:    fmt.Sprintf("init-volume-%s", f.Namespace.Name),
   813  					Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   814  					Command: []string{"/bin/sh", "-ec", cmd},
   815  					VolumeMounts: []v1.VolumeMount{
   816  						{
   817  							Name:      volumeName,
   818  							MountPath: "/tmp",
   819  						},
   820  					},
   821  					SecurityContext: &v1.SecurityContext{
   822  						Privileged: &privileged,
   823  					},
   824  				},
   825  			},
   826  			RestartPolicy: v1.RestartPolicyNever,
   827  			Volumes: []v1.Volume{
   828  				{
   829  					Name: volumeName,
   830  					VolumeSource: v1.VolumeSource{
   831  						HostPath: &v1.HostPathVolumeSource{
   832  							Path: "/tmp",
   833  						},
   834  					},
   835  				},
   836  			},
   837  			NodeName: node.Name,
   838  		},
   839  	}
   840  	// h.prepPod will be reused in cleanupDriver.
   841  	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, prepPod, metav1.CreateOptions{})
   842  	framework.ExpectNoError(err, "while creating hostPath init pod")
   843  
   844  	err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
   845  	framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed")
   846  
   847  	err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
   848  	framework.ExpectNoError(err, "while deleting hostPath init pod")
   849  	return &hostPathSymlinkVolume{
   850  		sourcePath: sourcePath,
   851  		targetPath: targetPath,
   852  		prepPod:    prepPod,
   853  		f:          f,
   854  	}
   855  }
   856  
   857  func (v *hostPathSymlinkVolume) DeleteVolume(ctx context.Context) {
   858  	f := v.f
   859  
   860  	cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", v.targetPath, v.sourcePath)
   861  	v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd}
   862  
   863  	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, v.prepPod, metav1.CreateOptions{})
   864  	framework.ExpectNoError(err, "while creating hostPath teardown pod")
   865  
   866  	err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
   867  	framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed")
   868  
   869  	err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
   870  	framework.ExpectNoError(err, "while deleting hostPath teardown pod")
   871  }
   872  
   873  // emptydir
   874  type emptydirDriver struct {
   875  	driverInfo storageframework.DriverInfo
   876  }
   877  
   878  var _ storageframework.TestDriver = &emptydirDriver{}
   879  var _ storageframework.PreprovisionedVolumeTestDriver = &emptydirDriver{}
   880  var _ storageframework.InlineVolumeTestDriver = &emptydirDriver{}
   881  
   882  // InitEmptydirDriver returns emptydirDriver that implements TestDriver interface
   883  func InitEmptydirDriver() storageframework.TestDriver {
   884  	return &emptydirDriver{
   885  		driverInfo: storageframework.DriverInfo{
   886  			Name:             "emptydir",
   887  			InTreePluginName: "kubernetes.io/empty-dir",
   888  			MaxFileSize:      storageframework.FileSizeMedium,
   889  			SupportedFsType: sets.NewString(
   890  				"", // Default fsType
   891  			),
   892  			Capabilities: map[storageframework.Capability]bool{
   893  				storageframework.CapExec:             true,
   894  				storageframework.CapSingleNodeVolume: true,
   895  			},
   896  		},
   897  	}
   898  }
   899  
   900  func (e *emptydirDriver) GetDriverInfo() *storageframework.DriverInfo {
   901  	return &e.driverInfo
   902  }
   903  
   904  func (e *emptydirDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
   905  }
   906  
   907  func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
   908  	// emptydir doesn't support readOnly volume
   909  	if readOnly {
   910  		return nil
   911  	}
   912  	return &v1.VolumeSource{
   913  		EmptyDir: &v1.EmptyDirVolumeSource{},
   914  	}
   915  }
   916  
   917  func (e *emptydirDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
   918  	return nil
   919  }
   920  
   921  func (e *emptydirDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
   922  	return &storageframework.PerTestConfig{
   923  		Driver:    e,
   924  		Prefix:    "emptydir",
   925  		Framework: f,
   926  	}
   927  }
   928  
   929  // Cinder
   930  // This tests only CSI migration with dynamically provisioned volumes.
   931  type cinderDriver struct {
   932  	driverInfo storageframework.DriverInfo
   933  }
   934  
   935  var _ storageframework.TestDriver = &cinderDriver{}
   936  var _ storageframework.DynamicPVTestDriver = &cinderDriver{}
   937  
   938  // InitCinderDriver returns cinderDriver that implements TestDriver interface
   939  func InitCinderDriver() storageframework.TestDriver {
   940  	return &cinderDriver{
   941  		driverInfo: storageframework.DriverInfo{
   942  			Name:             "cinder",
   943  			InTreePluginName: "kubernetes.io/cinder",
   944  			MaxFileSize:      storageframework.FileSizeMedium,
   945  			SupportedSizeRange: e2evolume.SizeRange{
   946  				Min: "1Gi",
   947  			},
   948  			SupportedFsType: sets.NewString(
   949  				"", // Default fsType
   950  			),
   951  			TopologyKeys: []string{v1.LabelFailureDomainBetaZone},
   952  			Capabilities: map[storageframework.Capability]bool{
   953  				storageframework.CapPersistence: true,
   954  				storageframework.CapFsGroup:     true,
   955  				storageframework.CapExec:        true,
   956  				storageframework.CapBlock:       true,
   957  				// Cinder supports volume limits, but the test creates large
   958  				// number of volumes and times out test suites.
   959  				storageframework.CapVolumeLimits: false,
   960  				storageframework.CapTopology:     true,
   961  			},
   962  		},
   963  	}
   964  }
   965  
   966  func (c *cinderDriver) GetDriverInfo() *storageframework.DriverInfo {
   967  	return &c.driverInfo
   968  }
   969  
   970  func (c *cinderDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
   971  	e2eskipper.SkipUnlessProviderIs("openstack")
   972  }
   973  
   974  func (c *cinderDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
   975  	provisioner := "kubernetes.io/cinder"
   976  	parameters := map[string]string{}
   977  	if fsType != "" {
   978  		parameters["fsType"] = fsType
   979  	}
   980  	ns := config.Framework.Namespace.Name
   981  
   982  	return storageframework.GetStorageClass(provisioner, parameters, nil, ns)
   983  }
   984  
   985  func (c *cinderDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
   986  	return &storageframework.PerTestConfig{
   987  		Driver:    c,
   988  		Prefix:    "cinder",
   989  		Framework: f,
   990  	}
   991  }
   992  
   993  // GCE
   994  type gcePdDriver struct {
   995  	driverInfo storageframework.DriverInfo
   996  }
   997  
   998  type gcePdVolume struct {
   999  	volumeName string
  1000  }
  1001  
  1002  var _ storageframework.TestDriver = &gcePdDriver{}
  1003  var _ storageframework.PreprovisionedVolumeTestDriver = &gcePdDriver{}
  1004  var _ storageframework.InlineVolumeTestDriver = &gcePdDriver{}
  1005  var _ storageframework.PreprovisionedPVTestDriver = &gcePdDriver{}
  1006  var _ storageframework.DynamicPVTestDriver = &gcePdDriver{}
  1007  
  1008  // InitGcePdDriver returns gcePdDriver that implements TestDriver interface
  1009  func InitGcePdDriver() storageframework.TestDriver {
  1010  	supportedTypes := sets.NewString(
  1011  		"", // Default fsType
  1012  		"ext2",
  1013  		"ext3",
  1014  		"ext4",
  1015  		"xfs",
  1016  	)
  1017  	return &gcePdDriver{
  1018  		driverInfo: storageframework.DriverInfo{
  1019  			Name:             "gcepd",
  1020  			InTreePluginName: "kubernetes.io/gce-pd",
  1021  			MaxFileSize:      storageframework.FileSizeMedium,
  1022  			SupportedSizeRange: e2evolume.SizeRange{
  1023  				Min: "1Gi",
  1024  			},
  1025  			SupportedFsType:      supportedTypes,
  1026  			SupportedMountOption: sets.NewString("debug", "nouid32"),
  1027  			TopologyKeys:         []string{v1.LabelTopologyZone},
  1028  			Capabilities: map[storageframework.Capability]bool{
  1029  				storageframework.CapPersistence:         true,
  1030  				storageframework.CapFsGroup:             true,
  1031  				storageframework.CapBlock:               true,
  1032  				storageframework.CapExec:                true,
  1033  				storageframework.CapMultiPODs:           true,
  1034  				storageframework.CapControllerExpansion: true,
  1035  				storageframework.CapOfflineExpansion:    true,
  1036  				storageframework.CapOnlineExpansion:     true,
  1037  				storageframework.CapNodeExpansion:       true,
  1038  				// GCE supports volume limits, but the test creates large
  1039  				// number of volumes and times out test suites.
  1040  				storageframework.CapVolumeLimits:      false,
  1041  				storageframework.CapTopology:          true,
  1042  				storageframework.CapMultiplePVsSameID: true,
  1043  			},
  1044  		},
  1045  	}
  1046  }
  1047  
  1048  // InitWindowsGcePdDriver returns gcePdDriver running on Windows cluster that implements TestDriver interface
  1049  // In current test structure, it first initialize the driver and then set up
  1050  // the new framework, so we cannot get the correct OS here and select which file system is supported.
  1051  // So here uses a separate Windows in-tree gce pd driver
  1052  func InitWindowsGcePdDriver() storageframework.TestDriver {
  1053  	supportedTypes := sets.NewString(
  1054  		"ntfs",
  1055  	)
  1056  	return &gcePdDriver{
  1057  		driverInfo: storageframework.DriverInfo{
  1058  			Name:             "windows-gcepd",
  1059  			InTreePluginName: "kubernetes.io/gce-pd",
  1060  			MaxFileSize:      storageframework.FileSizeMedium,
  1061  			SupportedSizeRange: e2evolume.SizeRange{
  1062  				Min: "1Gi",
  1063  			},
  1064  			SupportedFsType: supportedTypes,
  1065  			TopologyKeys:    []string{v1.LabelZoneFailureDomain},
  1066  			Capabilities: map[storageframework.Capability]bool{
  1067  				storageframework.CapControllerExpansion: false,
  1068  				storageframework.CapPersistence:         true,
  1069  				storageframework.CapExec:                true,
  1070  				storageframework.CapMultiPODs:           true,
  1071  				// GCE supports volume limits, but the test creates large
  1072  				// number of volumes and times out test suites.
  1073  				storageframework.CapVolumeLimits:      false,
  1074  				storageframework.CapTopology:          true,
  1075  				storageframework.CapMultiplePVsSameID: true,
  1076  			},
  1077  		},
  1078  	}
  1079  }
  1080  
  1081  func (g *gcePdDriver) GetDriverInfo() *storageframework.DriverInfo {
  1082  	return &g.driverInfo
  1083  }
  1084  
  1085  func (g *gcePdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
  1086  	e2eskipper.SkipUnlessProviderIs("gce", "gke")
  1087  	for _, tag := range pattern.TestTags {
  1088  		if tag == feature.Windows {
  1089  			e2eskipper.SkipUnlessNodeOSDistroIs("windows")
  1090  		}
  1091  	}
  1092  }
  1093  
  1094  func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
  1095  	gv, ok := e2evolume.(*gcePdVolume)
  1096  	if !ok {
  1097  		framework.Failf("Failed to cast test volume of type %T to the GCE PD test volume", e2evolume)
  1098  	}
  1099  	volSource := v1.VolumeSource{
  1100  		GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
  1101  			PDName:   gv.volumeName,
  1102  			ReadOnly: readOnly,
  1103  		},
  1104  	}
  1105  	if fsType != "" {
  1106  		volSource.GCEPersistentDisk.FSType = fsType
  1107  	}
  1108  	return &volSource
  1109  }
  1110  
  1111  func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
  1112  	gv, ok := e2evolume.(*gcePdVolume)
  1113  	if !ok {
  1114  		framework.Failf("Failed to cast test volume of type %T to the GCE PD test volume", e2evolume)
  1115  	}
  1116  	pvSource := v1.PersistentVolumeSource{
  1117  		GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
  1118  			PDName:   gv.volumeName,
  1119  			ReadOnly: readOnly,
  1120  		},
  1121  	}
  1122  	if fsType != "" {
  1123  		pvSource.GCEPersistentDisk.FSType = fsType
  1124  	}
  1125  	return &pvSource, nil
  1126  }
  1127  
  1128  func (g *gcePdDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
  1129  	provisioner := "kubernetes.io/gce-pd"
  1130  	parameters := map[string]string{}
  1131  	if fsType != "" {
  1132  		parameters["fsType"] = fsType
  1133  	}
  1134  	ns := config.Framework.Namespace.Name
  1135  	delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
  1136  
  1137  	return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns)
  1138  }
  1139  
  1140  func (g *gcePdDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
  1141  	config := &storageframework.PerTestConfig{
  1142  		Driver:    g,
  1143  		Prefix:    "gcepd",
  1144  		Framework: f,
  1145  	}
  1146  
  1147  	if framework.NodeOSDistroIs("windows") {
  1148  		config.ClientNodeSelection = e2epod.NodeSelection{
  1149  			Selector: map[string]string{
  1150  				"kubernetes.io/os": "windows",
  1151  			},
  1152  		}
  1153  	}
  1154  	return config
  1155  
  1156  }
  1157  
  1158  func (g *gcePdDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
  1159  	zone := getInlineVolumeZone(ctx, config.Framework)
  1160  	if volType == storageframework.InlineVolume {
  1161  		// PD will be created in framework.TestContext.CloudConfig.Zone zone,
  1162  		// so pods should be also scheduled there.
  1163  		config.ClientNodeSelection = e2epod.NodeSelection{
  1164  			Selector: map[string]string{
  1165  				v1.LabelTopologyZone: zone,
  1166  			},
  1167  		}
  1168  	}
  1169  	ginkgo.By("creating a test gce pd volume")
  1170  	vname, err := e2epv.CreatePDWithRetryAndZone(ctx, zone)
  1171  	framework.ExpectNoError(err)
  1172  	return &gcePdVolume{
  1173  		volumeName: vname,
  1174  	}
  1175  }
  1176  
  1177  func (v *gcePdVolume) DeleteVolume(ctx context.Context) {
  1178  	_ = e2epv.DeletePDWithRetry(ctx, v.volumeName)
  1179  }
  1180  
  1181  // vSphere
  1182  type vSphereDriver struct {
  1183  	driverInfo storageframework.DriverInfo
  1184  }
  1185  
  1186  type vSphereVolume struct {
  1187  	volumePath string
  1188  	nodeInfo   *vspheretest.NodeInfo
  1189  }
  1190  
  1191  var _ storageframework.TestDriver = &vSphereDriver{}
  1192  var _ storageframework.PreprovisionedVolumeTestDriver = &vSphereDriver{}
  1193  var _ storageframework.InlineVolumeTestDriver = &vSphereDriver{}
  1194  var _ storageframework.PreprovisionedPVTestDriver = &vSphereDriver{}
  1195  var _ storageframework.DynamicPVTestDriver = &vSphereDriver{}
  1196  
  1197  // InitVSphereDriver returns vSphereDriver that implements TestDriver interface
  1198  func InitVSphereDriver() storageframework.TestDriver {
  1199  	return &vSphereDriver{
  1200  		driverInfo: storageframework.DriverInfo{
  1201  			Name:             "vsphere",
  1202  			InTreePluginName: "kubernetes.io/vsphere-volume",
  1203  			MaxFileSize:      storageframework.FileSizeMedium,
  1204  			SupportedSizeRange: e2evolume.SizeRange{
  1205  				Min: "1Gi",
  1206  			},
  1207  			SupportedFsType: sets.NewString(
  1208  				"", // Default fsType
  1209  				"ext4",
  1210  				"ntfs",
  1211  			),
  1212  			TopologyKeys: []string{v1.LabelFailureDomainBetaZone},
  1213  			Capabilities: map[storageframework.Capability]bool{
  1214  				storageframework.CapPersistence:       true,
  1215  				storageframework.CapFsGroup:           true,
  1216  				storageframework.CapExec:              true,
  1217  				storageframework.CapMultiPODs:         true,
  1218  				storageframework.CapTopology:          true,
  1219  				storageframework.CapBlock:             true,
  1220  				storageframework.CapMultiplePVsSameID: false,
  1221  			},
  1222  		},
  1223  	}
  1224  }
  1225  func (v *vSphereDriver) GetDriverInfo() *storageframework.DriverInfo {
  1226  	return &v.driverInfo
  1227  }
  1228  
  1229  func (v *vSphereDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
  1230  	e2eskipper.SkipUnlessProviderIs("vsphere")
  1231  }
  1232  
  1233  func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
  1234  	vsv, ok := e2evolume.(*vSphereVolume)
  1235  	if !ok {
  1236  		framework.Failf("Failed to cast test volume of type %T to the cSphere test volume", e2evolume)
  1237  	}
  1238  
  1239  	// vSphere driver doesn't seem to support readOnly volume
  1240  	// TODO: check if it is correct
  1241  	if readOnly {
  1242  		return nil
  1243  	}
  1244  	volSource := v1.VolumeSource{
  1245  		VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
  1246  			VolumePath: vsv.volumePath,
  1247  		},
  1248  	}
  1249  	if fsType != "" {
  1250  		volSource.VsphereVolume.FSType = fsType
  1251  	}
  1252  	return &volSource
  1253  }
  1254  
  1255  func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
  1256  	vsv, ok := e2evolume.(*vSphereVolume)
  1257  	if !ok {
  1258  		framework.Failf("Failed to cast test volume of type %T to the vSphere test volume", e2evolume)
  1259  	}
  1260  
  1261  	// vSphere driver doesn't seem to support readOnly volume
  1262  	// TODO: check if it is correct
  1263  	if readOnly {
  1264  		return nil, nil
  1265  	}
  1266  	pvSource := v1.PersistentVolumeSource{
  1267  		VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
  1268  			VolumePath: vsv.volumePath,
  1269  		},
  1270  	}
  1271  	if fsType != "" {
  1272  		pvSource.VsphereVolume.FSType = fsType
  1273  	}
  1274  	return &pvSource, nil
  1275  }
  1276  
  1277  func (v *vSphereDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
  1278  	provisioner := "kubernetes.io/vsphere-volume"
  1279  	parameters := map[string]string{}
  1280  	if fsType != "" {
  1281  		parameters["fsType"] = fsType
  1282  	}
  1283  	ns := config.Framework.Namespace.Name
  1284  
  1285  	return storageframework.GetStorageClass(provisioner, parameters, nil, ns)
  1286  }
  1287  
  1288  func (v *vSphereDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
  1289  	vspheretest.Bootstrap(f)
  1290  	ginkgo.DeferCleanup(func(ctx context.Context) {
  1291  		// Driver Cleanup function
  1292  		// Logout each vSphere client connection to prevent session leakage
  1293  		nodes := vspheretest.GetReadySchedulableNodeInfos(ctx, f.ClientSet)
  1294  		for _, node := range nodes {
  1295  			if node.VSphere.Client != nil {
  1296  				_ = node.VSphere.Client.Logout(ctx)
  1297  			}
  1298  		}
  1299  	})
  1300  	return &storageframework.PerTestConfig{
  1301  		Driver:    v,
  1302  		Prefix:    "vsphere",
  1303  		Framework: f,
  1304  	}
  1305  }
  1306  
  1307  func (v *vSphereDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
  1308  	f := config.Framework
  1309  	nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo(ctx, f.ClientSet)
  1310  	volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
  1311  	framework.ExpectNoError(err)
  1312  	return &vSphereVolume{
  1313  		volumePath: volumePath,
  1314  		nodeInfo:   nodeInfo,
  1315  	}
  1316  }
  1317  
  1318  func (v *vSphereVolume) DeleteVolume(ctx context.Context) {
  1319  	v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef)
  1320  }
  1321  
  1322  // Azure Disk
  1323  type azureDiskDriver struct {
  1324  	driverInfo storageframework.DriverInfo
  1325  }
  1326  
  1327  type azureDiskVolume struct {
  1328  	volumeName string
  1329  }
  1330  
  1331  var _ storageframework.TestDriver = &azureDiskDriver{}
  1332  var _ storageframework.PreprovisionedVolumeTestDriver = &azureDiskDriver{}
  1333  var _ storageframework.InlineVolumeTestDriver = &azureDiskDriver{}
  1334  var _ storageframework.PreprovisionedPVTestDriver = &azureDiskDriver{}
  1335  var _ storageframework.DynamicPVTestDriver = &azureDiskDriver{}
  1336  var _ storageframework.CustomTimeoutsTestDriver = &azureDiskDriver{}
  1337  
  1338  // InitAzureDiskDriver returns azureDiskDriver that implements TestDriver interface
  1339  func InitAzureDiskDriver() storageframework.TestDriver {
  1340  	return &azureDiskDriver{
  1341  		driverInfo: storageframework.DriverInfo{
  1342  			Name:             "azure-disk",
  1343  			InTreePluginName: "kubernetes.io/azure-disk",
  1344  			MaxFileSize:      storageframework.FileSizeMedium,
  1345  			SupportedSizeRange: e2evolume.SizeRange{
  1346  				Min: "1Gi",
  1347  			},
  1348  			SupportedFsType: sets.NewString(
  1349  				"", // Default fsType
  1350  				"ext4",
  1351  				"xfs",
  1352  			),
  1353  			TopologyKeys: []string{v1.LabelFailureDomainBetaZone},
  1354  			Capabilities: map[storageframework.Capability]bool{
  1355  				storageframework.CapPersistence: true,
  1356  				storageframework.CapFsGroup:     true,
  1357  				storageframework.CapBlock:       true,
  1358  				storageframework.CapExec:        true,
  1359  				storageframework.CapMultiPODs:   true,
  1360  				// Azure supports volume limits, but the test creates large
  1361  				// number of volumes and times out test suites.
  1362  				storageframework.CapVolumeLimits:      false,
  1363  				storageframework.CapTopology:          true,
  1364  				storageframework.CapMultiplePVsSameID: true,
  1365  			},
  1366  		},
  1367  	}
  1368  }
  1369  
  1370  func (a *azureDiskDriver) GetDriverInfo() *storageframework.DriverInfo {
  1371  	return &a.driverInfo
  1372  }
  1373  
  1374  func (a *azureDiskDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
  1375  	e2eskipper.SkipUnlessProviderIs("azure")
  1376  }
  1377  
  1378  func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
  1379  	av, ok := e2evolume.(*azureDiskVolume)
  1380  	if !ok {
  1381  		framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
  1382  	}
  1383  	diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
  1384  
  1385  	kind := v1.AzureManagedDisk
  1386  	volSource := v1.VolumeSource{
  1387  		AzureDisk: &v1.AzureDiskVolumeSource{
  1388  			DiskName:    diskName,
  1389  			DataDiskURI: av.volumeName,
  1390  			Kind:        &kind,
  1391  			ReadOnly:    &readOnly,
  1392  		},
  1393  	}
  1394  	if fsType != "" {
  1395  		volSource.AzureDisk.FSType = &fsType
  1396  	}
  1397  	return &volSource
  1398  }
  1399  
  1400  func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
  1401  	av, ok := e2evolume.(*azureDiskVolume)
  1402  	if !ok {
  1403  		framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
  1404  	}
  1405  
  1406  	diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
  1407  
  1408  	kind := v1.AzureManagedDisk
  1409  	pvSource := v1.PersistentVolumeSource{
  1410  		AzureDisk: &v1.AzureDiskVolumeSource{
  1411  			DiskName:    diskName,
  1412  			DataDiskURI: av.volumeName,
  1413  			Kind:        &kind,
  1414  			ReadOnly:    &readOnly,
  1415  		},
  1416  	}
  1417  	if fsType != "" {
  1418  		pvSource.AzureDisk.FSType = &fsType
  1419  	}
  1420  	return &pvSource, nil
  1421  }
  1422  
  1423  func (a *azureDiskDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
  1424  	provisioner := "kubernetes.io/azure-disk"
  1425  	parameters := map[string]string{}
  1426  	if fsType != "" {
  1427  		parameters["fsType"] = fsType
  1428  	}
  1429  	ns := config.Framework.Namespace.Name
  1430  	delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
  1431  
  1432  	return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns)
  1433  }
  1434  
  1435  func (a *azureDiskDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
  1436  	return &storageframework.PerTestConfig{
  1437  		Driver:    a,
  1438  		Prefix:    "azure",
  1439  		Framework: f,
  1440  	}
  1441  }
  1442  
  1443  func (a *azureDiskDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
  1444  	ginkgo.By("creating a test azure disk volume")
  1445  	zone := getInlineVolumeZone(ctx, config.Framework)
  1446  	if volType == storageframework.InlineVolume {
  1447  		// PD will be created in framework.TestContext.CloudConfig.Zone zone,
  1448  		// so pods should be also scheduled there.
  1449  		config.ClientNodeSelection = e2epod.NodeSelection{
  1450  			Selector: map[string]string{
  1451  				v1.LabelTopologyZone: zone,
  1452  			},
  1453  		}
  1454  	}
  1455  	volumeName, err := e2epv.CreatePDWithRetryAndZone(ctx, zone)
  1456  	framework.ExpectNoError(err)
  1457  	return &azureDiskVolume{
  1458  		volumeName: volumeName,
  1459  	}
  1460  }
  1461  
  1462  func (v *azureDiskVolume) DeleteVolume(ctx context.Context) {
  1463  	_ = e2epv.DeletePDWithRetry(ctx, v.volumeName)
  1464  }
  1465  
  1466  // AWS
  1467  type awsDriver struct {
  1468  	driverInfo storageframework.DriverInfo
  1469  }
  1470  
  1471  var _ storageframework.TestDriver = &awsDriver{}
  1472  var _ storageframework.DynamicPVTestDriver = &awsDriver{}
  1473  
  1474  // InitAwsDriver returns awsDriver that implements TestDriver interface
  1475  func InitAwsDriver() storageframework.TestDriver {
  1476  	return &awsDriver{
  1477  		driverInfo: storageframework.DriverInfo{
  1478  			Name:             "aws",
  1479  			InTreePluginName: "kubernetes.io/aws-ebs",
  1480  			MaxFileSize:      storageframework.FileSizeMedium,
  1481  			SupportedSizeRange: e2evolume.SizeRange{
  1482  				Min: "1Gi",
  1483  			},
  1484  			SupportedFsType: sets.NewString(
  1485  				"", // Default fsType
  1486  				"ext4",
  1487  				"xfs",
  1488  				"ntfs",
  1489  			),
  1490  			SupportedMountOption: sets.NewString("debug", "nouid32"),
  1491  			TopologyKeys:         []string{v1.LabelTopologyZone},
  1492  			Capabilities: map[storageframework.Capability]bool{
  1493  				storageframework.CapPersistence:         true,
  1494  				storageframework.CapFsGroup:             true,
  1495  				storageframework.CapBlock:               true,
  1496  				storageframework.CapExec:                true,
  1497  				storageframework.CapMultiPODs:           true,
  1498  				storageframework.CapControllerExpansion: true,
  1499  				storageframework.CapNodeExpansion:       true,
  1500  				storageframework.CapOfflineExpansion:    true,
  1501  				storageframework.CapOnlineExpansion:     true,
  1502  				// AWS supports volume limits, but the test creates large
  1503  				// number of volumes and times out test suites.
  1504  				storageframework.CapVolumeLimits:      false,
  1505  				storageframework.CapTopology:          true,
  1506  				storageframework.CapMultiplePVsSameID: true,
  1507  			},
  1508  		},
  1509  	}
  1510  }
  1511  
  1512  func (a *awsDriver) GetDriverInfo() *storageframework.DriverInfo {
  1513  	return &a.driverInfo
  1514  }
  1515  
  1516  func (a *awsDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
  1517  	e2eskipper.SkipUnlessProviderIs("aws")
  1518  }
  1519  
  1520  func (a *awsDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
  1521  	provisioner := "kubernetes.io/aws-ebs"
  1522  	parameters := map[string]string{}
  1523  	if fsType != "" {
  1524  		parameters["fsType"] = fsType
  1525  	}
  1526  	ns := config.Framework.Namespace.Name
  1527  	delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
  1528  
  1529  	return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns)
  1530  }
  1531  
  1532  func (a *awsDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
  1533  	config := &storageframework.PerTestConfig{
  1534  		Driver:    a,
  1535  		Prefix:    "aws",
  1536  		Framework: f,
  1537  	}
  1538  
  1539  	if framework.NodeOSDistroIs("windows") {
  1540  		config.ClientNodeSelection = e2epod.NodeSelection{
  1541  			Selector: map[string]string{
  1542  				"kubernetes.io/os": "windows",
  1543  			},
  1544  		}
  1545  	}
  1546  	return config
  1547  }
  1548  
  1549  // local
  1550  type localDriver struct {
  1551  	driverInfo storageframework.DriverInfo
  1552  	node       *v1.Node
  1553  	hostExec   utils.HostExec
  1554  	// volumeType represents local volume type we are testing, e.g.  tmpfs,
  1555  	// directory, block device.
  1556  	volumeType utils.LocalVolumeType
  1557  	ltrMgr     utils.LocalTestResourceManager
  1558  }
  1559  
  1560  type localVolume struct {
  1561  	ltrMgr utils.LocalTestResourceManager
  1562  	ltr    *utils.LocalTestResource
  1563  }
  1564  
  1565  var (
  1566  	// capabilities
  1567  	defaultLocalVolumeCapabilities = map[storageframework.Capability]bool{
  1568  		storageframework.CapPersistence:       true,
  1569  		storageframework.CapFsGroup:           true,
  1570  		storageframework.CapBlock:             false,
  1571  		storageframework.CapExec:              true,
  1572  		storageframework.CapMultiPODs:         true,
  1573  		storageframework.CapSingleNodeVolume:  true,
  1574  		storageframework.CapMultiplePVsSameID: true,
  1575  	}
  1576  	localVolumeCapabitilies = map[utils.LocalVolumeType]map[storageframework.Capability]bool{
  1577  		utils.LocalVolumeBlock: {
  1578  			storageframework.CapPersistence:       true,
  1579  			storageframework.CapFsGroup:           true,
  1580  			storageframework.CapBlock:             true,
  1581  			storageframework.CapExec:              true,
  1582  			storageframework.CapMultiPODs:         true,
  1583  			storageframework.CapSingleNodeVolume:  true,
  1584  			storageframework.CapMultiplePVsSameID: true,
  1585  		},
  1586  	}
  1587  	// fstype
  1588  	defaultLocalVolumeSupportedFsTypes = sets.NewString("")
  1589  	localVolumeSupportedFsTypes        = map[utils.LocalVolumeType]sets.String{
  1590  		utils.LocalVolumeBlock: sets.NewString(
  1591  			"", // Default fsType
  1592  			"ext4",
  1593  			//"xfs", disabled see issue https://github.com/kubernetes/kubernetes/issues/74095
  1594  		),
  1595  	}
  1596  	// max file size
  1597  	defaultLocalVolumeMaxFileSize = storageframework.FileSizeSmall
  1598  	localVolumeMaxFileSizes       = map[utils.LocalVolumeType]int64{}
  1599  )
  1600  
  1601  var _ storageframework.TestDriver = &localDriver{}
  1602  var _ storageframework.PreprovisionedVolumeTestDriver = &localDriver{}
  1603  var _ storageframework.PreprovisionedPVTestDriver = &localDriver{}
  1604  
  1605  // InitLocalDriverWithVolumeType initializes the local driver based on the volume type.
  1606  func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() storageframework.TestDriver {
  1607  	maxFileSize := defaultLocalVolumeMaxFileSize
  1608  	if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok {
  1609  		maxFileSize = maxFileSizeByVolType
  1610  	}
  1611  	supportedFsTypes := defaultLocalVolumeSupportedFsTypes
  1612  	if supportedFsTypesByType, ok := localVolumeSupportedFsTypes[volumeType]; ok {
  1613  		supportedFsTypes = supportedFsTypesByType
  1614  	}
  1615  	capabilities := defaultLocalVolumeCapabilities
  1616  	if capabilitiesByType, ok := localVolumeCapabitilies[volumeType]; ok {
  1617  		capabilities = capabilitiesByType
  1618  	}
  1619  	return func() storageframework.TestDriver {
  1620  		// custom tag to distinguish from tests of other volume types
  1621  		testTags := []interface{}{fmt.Sprintf("[LocalVolumeType: %s]", volumeType)}
  1622  		// For GCE Local SSD volumes, we must run serially
  1623  		if volumeType == utils.LocalVolumeGCELocalSSD {
  1624  			testTags = append(testTags, framework.WithSerial())
  1625  		}
  1626  		return &localDriver{
  1627  			driverInfo: storageframework.DriverInfo{
  1628  				Name:             "local",
  1629  				InTreePluginName: "kubernetes.io/local-volume",
  1630  				TestTags:         testTags,
  1631  				MaxFileSize:      maxFileSize,
  1632  				SupportedFsType:  supportedFsTypes,
  1633  				Capabilities:     capabilities,
  1634  			},
  1635  			volumeType: volumeType,
  1636  		}
  1637  	}
  1638  }
  1639  
  1640  func (l *localDriver) GetDriverInfo() *storageframework.DriverInfo {
  1641  	return &l.driverInfo
  1642  }
  1643  
  1644  func (l *localDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
  1645  }
  1646  
  1647  func (l *localDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
  1648  	var err error
  1649  	l.node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
  1650  	framework.ExpectNoError(err)
  1651  
  1652  	l.hostExec = utils.NewHostExec(f)
  1653  	l.ltrMgr = utils.NewLocalResourceManager("local-driver", l.hostExec, "/tmp")
  1654  
  1655  	// This can't be done in SkipUnsupportedTest because the test framework is not initialized yet
  1656  	if l.volumeType == utils.LocalVolumeGCELocalSSD {
  1657  		ssdInterface := "scsi"
  1658  		filesystemType := "fs"
  1659  		ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType)
  1660  		res, err := l.hostExec.IssueCommandWithResult(ctx, ssdCmd, l.node)
  1661  		framework.ExpectNoError(err)
  1662  		num, err := strconv.Atoi(strings.TrimSpace(res))
  1663  		framework.ExpectNoError(err)
  1664  		if num < 1 {
  1665  			e2eskipper.Skipf("Requires at least 1 %s %s localSSD ", ssdInterface, filesystemType)
  1666  		}
  1667  	}
  1668  
  1669  	ginkgo.DeferCleanup(l.hostExec.Cleanup)
  1670  	return &storageframework.PerTestConfig{
  1671  		Driver:              l,
  1672  		Prefix:              "local",
  1673  		Framework:           f,
  1674  		ClientNodeSelection: e2epod.NodeSelection{Name: l.node.Name},
  1675  	}
  1676  }
  1677  
  1678  func (l *localDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
  1679  	switch volType {
  1680  	case storageframework.PreprovisionedPV:
  1681  		node := l.node
  1682  		// assign this to schedule pod on this node
  1683  		config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name}
  1684  		return &localVolume{
  1685  			ltrMgr: l.ltrMgr,
  1686  			ltr:    l.ltrMgr.Create(ctx, node, l.volumeType, nil),
  1687  		}
  1688  	default:
  1689  		framework.Failf("Unsupported volType: %v is specified", volType)
  1690  	}
  1691  	return nil
  1692  }
  1693  
  1694  func (v *localVolume) DeleteVolume(ctx context.Context) {
  1695  	v.ltrMgr.Remove(ctx, v.ltr)
  1696  }
  1697  
  1698  func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity {
  1699  	nodeKey := "kubernetes.io/hostname"
  1700  	if node.Labels == nil {
  1701  		framework.Failf("Node does not have labels")
  1702  	}
  1703  	nodeValue, found := node.Labels[nodeKey]
  1704  	if !found {
  1705  		framework.Failf("Node does not have required label %q", nodeKey)
  1706  	}
  1707  	return &v1.VolumeNodeAffinity{
  1708  		Required: &v1.NodeSelector{
  1709  			NodeSelectorTerms: []v1.NodeSelectorTerm{
  1710  				{
  1711  					MatchExpressions: []v1.NodeSelectorRequirement{
  1712  						{
  1713  							Key:      nodeKey,
  1714  							Operator: v1.NodeSelectorOpIn,
  1715  							Values:   []string{nodeValue},
  1716  						},
  1717  					},
  1718  				},
  1719  			},
  1720  		},
  1721  	}
  1722  }
  1723  
  1724  func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
  1725  	lv, ok := e2evolume.(*localVolume)
  1726  	if !ok {
  1727  		framework.Failf("Failed to cast test volume of type %T to the local test volume", e2evolume)
  1728  	}
  1729  	return &v1.PersistentVolumeSource{
  1730  		Local: &v1.LocalVolumeSource{
  1731  			Path:   lv.ltr.Path,
  1732  			FSType: &fsType,
  1733  		},
  1734  	}, l.nodeAffinityForNode(lv.ltr.Node)
  1735  }
  1736  
  1737  // cleanUpVolumeServer is a wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
  1738  func cleanUpVolumeServer(ctx context.Context, f *framework.Framework, serverPod *v1.Pod) {
  1739  	cleanUpVolumeServerWithSecret(ctx, f, serverPod, nil)
  1740  }
  1741  
  1742  func getInlineVolumeZone(ctx context.Context, f *framework.Framework) string {
  1743  	if framework.TestContext.CloudConfig.Zone != "" {
  1744  		return framework.TestContext.CloudConfig.Zone
  1745  	}
  1746  	// if zone is not specified we will randomly pick a zone from schedulable nodes for inline tests
  1747  	node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
  1748  	framework.ExpectNoError(err)
  1749  	zone, ok := node.Labels[v1.LabelFailureDomainBetaZone]
  1750  	if ok {
  1751  		return zone
  1752  	}
  1753  	topologyZone, ok := node.Labels[v1.LabelTopologyZone]
  1754  	if ok {
  1755  		return topologyZone
  1756  	}
  1757  	return ""
  1758  }
  1759  
  1760  // cleanUpVolumeServerWithSecret is a wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
  1761  func cleanUpVolumeServerWithSecret(ctx context.Context, f *framework.Framework, serverPod *v1.Pod, secret *v1.Secret) {
  1762  	cs := f.ClientSet
  1763  	ns := f.Namespace
  1764  
  1765  	if secret != nil {
  1766  		framework.Logf("Deleting server secret %q...", secret.Name)
  1767  		err := cs.CoreV1().Secrets(ns.Name).Delete(ctx, secret.Name, metav1.DeleteOptions{})
  1768  		if err != nil {
  1769  			framework.Logf("Delete secret failed: %v", err)
  1770  		}
  1771  	}
  1772  
  1773  	framework.Logf("Deleting server pod %q...", serverPod.Name)
  1774  	err := e2epod.DeletePodWithWait(ctx, cs, serverPod)
  1775  	if err != nil {
  1776  		framework.Logf("Server pod delete failed: %v", err)
  1777  	}
  1778  }
  1779  
  1780  // Azure File
  1781  type azureFileDriver struct {
  1782  	driverInfo storageframework.DriverInfo
  1783  }
  1784  
  1785  type azureFileVolume struct {
  1786  	accountName     string
  1787  	shareName       string
  1788  	secretName      string
  1789  	secretNamespace string
  1790  }
  1791  
  1792  var _ storageframework.TestDriver = &azureFileDriver{}
  1793  var _ storageframework.PreprovisionedVolumeTestDriver = &azureFileDriver{}
  1794  var _ storageframework.InlineVolumeTestDriver = &azureFileDriver{}
  1795  var _ storageframework.PreprovisionedPVTestDriver = &azureFileDriver{}
  1796  var _ storageframework.DynamicPVTestDriver = &azureFileDriver{}
  1797  
  1798  // InitAzureFileDriver returns azureFileDriver that implements TestDriver interface
  1799  func InitAzureFileDriver() storageframework.TestDriver {
  1800  	return &azureFileDriver{
  1801  		driverInfo: storageframework.DriverInfo{
  1802  			Name:             "azure-file",
  1803  			InTreePluginName: "kubernetes.io/azure-file",
  1804  			MaxFileSize:      storageframework.FileSizeMedium,
  1805  			SupportedSizeRange: e2evolume.SizeRange{
  1806  				Min: "1Gi",
  1807  			},
  1808  			SupportedFsType: sets.NewString(
  1809  				"", // Default fsType
  1810  			),
  1811  			Capabilities: map[storageframework.Capability]bool{
  1812  				storageframework.CapPersistence:         true,
  1813  				storageframework.CapExec:                true,
  1814  				storageframework.CapRWX:                 true,
  1815  				storageframework.CapMultiPODs:           true,
  1816  				storageframework.CapControllerExpansion: true,
  1817  				storageframework.CapNodeExpansion:       true,
  1818  				storageframework.CapMultiplePVsSameID:   true,
  1819  			},
  1820  		},
  1821  	}
  1822  }
  1823  
  1824  func (a *azureFileDriver) GetDriverInfo() *storageframework.DriverInfo {
  1825  	return &a.driverInfo
  1826  }
  1827  
  1828  func (a *azureFileDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
  1829  	e2eskipper.SkipUnlessProviderIs("azure")
  1830  }
  1831  
  1832  func (a *azureFileDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
  1833  	av, ok := e2evolume.(*azureFileVolume)
  1834  	if !ok {
  1835  		framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
  1836  	}
  1837  	volSource := v1.VolumeSource{
  1838  		AzureFile: &v1.AzureFileVolumeSource{
  1839  			SecretName: av.secretName,
  1840  			ShareName:  av.shareName,
  1841  			ReadOnly:   readOnly,
  1842  		},
  1843  	}
  1844  	return &volSource
  1845  }
  1846  
  1847  func (a *azureFileDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
  1848  	av, ok := e2evolume.(*azureFileVolume)
  1849  	if !ok {
  1850  		framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
  1851  	}
  1852  	pvSource := v1.PersistentVolumeSource{
  1853  		AzureFile: &v1.AzureFilePersistentVolumeSource{
  1854  			SecretName:      av.secretName,
  1855  			ShareName:       av.shareName,
  1856  			SecretNamespace: &av.secretNamespace,
  1857  			ReadOnly:        readOnly,
  1858  		},
  1859  	}
  1860  	return &pvSource, nil
  1861  }
  1862  
  1863  func (a *azureFileDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
  1864  	provisioner := "kubernetes.io/azure-file"
  1865  	parameters := map[string]string{}
  1866  	ns := config.Framework.Namespace.Name
  1867  	immediateBinding := storagev1.VolumeBindingImmediate
  1868  	return storageframework.GetStorageClass(provisioner, parameters, &immediateBinding, ns)
  1869  }
  1870  
  1871  func (a *azureFileDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
  1872  	return &storageframework.PerTestConfig{
  1873  		Driver:    a,
  1874  		Prefix:    "azure-file",
  1875  		Framework: f,
  1876  	}
  1877  }
  1878  
  1879  func (a *azureFileDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
  1880  	ginkgo.By("creating a test azure file volume")
  1881  	accountName, accountKey, shareName, err := e2epv.CreateShare()
  1882  	framework.ExpectNoError(err)
  1883  
  1884  	secretName := "azure-storage-account-" + accountName + "-secret"
  1885  	secret := &v1.Secret{
  1886  		ObjectMeta: metav1.ObjectMeta{
  1887  			Namespace: config.Framework.Namespace.Name,
  1888  			Name:      secretName,
  1889  		},
  1890  
  1891  		Data: map[string][]byte{
  1892  			"azurestorageaccountname": []byte(accountName),
  1893  			"azurestorageaccountkey":  []byte(accountKey),
  1894  		},
  1895  		Type: "Opaque",
  1896  	}
  1897  
  1898  	_, err = config.Framework.ClientSet.CoreV1().Secrets(config.Framework.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{})
  1899  	framework.ExpectNoError(err)
  1900  	return &azureFileVolume{
  1901  		accountName:     accountName,
  1902  		shareName:       shareName,
  1903  		secretName:      secretName,
  1904  		secretNamespace: config.Framework.Namespace.Name,
  1905  	}
  1906  }
  1907  
  1908  func (v *azureFileVolume) DeleteVolume(ctx context.Context) {
  1909  	err := e2epv.DeleteShare(v.accountName, v.shareName)
  1910  	framework.ExpectNoError(err)
  1911  }
  1912  
  1913  func (a *azureDiskDriver) GetTimeouts() *framework.TimeoutContext {
  1914  	timeouts := framework.NewTimeoutContext()
  1915  	timeouts.PodStart = time.Minute * 15
  1916  	timeouts.PodDelete = time.Minute * 15
  1917  	timeouts.PVDelete = time.Minute * 20
  1918  	return timeouts
  1919  }