k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/storage/testsuites/subpath.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"path/filepath"
    23  	"regexp"
    24  	"strings"
    25  	"time"
    26  
    27  	"github.com/onsi/ginkgo/v2"
    28  	"github.com/onsi/gomega"
    29  
    30  	v1 "k8s.io/api/core/v1"
    31  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/util/errors"
    34  	"k8s.io/apimachinery/pkg/util/rand"
    35  	"k8s.io/apimachinery/pkg/util/sets"
    36  	"k8s.io/apimachinery/pkg/util/wait"
    37  	"k8s.io/kubernetes/test/e2e/framework"
    38  	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
    39  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    40  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    41  	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
    42  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    43  	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
    44  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    45  	storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
    46  	imageutils "k8s.io/kubernetes/test/utils/image"
    47  	admissionapi "k8s.io/pod-security-admission/api"
    48  )
    49  
    50  var (
    51  	volumePath      = "/test-volume"
    52  	volumeName      = "test-volume"
    53  	probeVolumePath = "/probe-volume"
    54  	probeFilePath   = probeVolumePath + "/probe-file"
    55  	fileName        = "test-file"
    56  	retryDuration   = 20
    57  )
    58  
    59  type subPathTestSuite struct {
    60  	tsInfo storageframework.TestSuiteInfo
    61  }
    62  
    63  // InitCustomSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
    64  // using custom test patterns
    65  func InitCustomSubPathTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
    66  	return &subPathTestSuite{
    67  		tsInfo: storageframework.TestSuiteInfo{
    68  			Name:         "subPath",
    69  			TestPatterns: patterns,
    70  			SupportedSizeRange: e2evolume.SizeRange{
    71  				Min: "1Mi",
    72  			},
    73  		},
    74  	}
    75  }
    76  
    77  // InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
    78  // using testsuite default patterns
    79  func InitSubPathTestSuite() storageframework.TestSuite {
    80  	patterns := []storageframework.TestPattern{
    81  		storageframework.DefaultFsInlineVolume,
    82  		storageframework.DefaultFsPreprovisionedPV,
    83  		storageframework.DefaultFsDynamicPV,
    84  		storageframework.NtfsDynamicPV,
    85  	}
    86  	return InitCustomSubPathTestSuite(patterns)
    87  }
    88  
    89  func (s *subPathTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
    90  	return s.tsInfo
    91  }
    92  
    93  func (s *subPathTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    94  	skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(
    95  		storageframework.PreprovisionedPV,
    96  		storageframework.InlineVolume))
    97  }
    98  
    99  func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
   100  	type local struct {
   101  		config *storageframework.PerTestConfig
   102  
   103  		hostExec          storageutils.HostExec
   104  		resource          *storageframework.VolumeResource
   105  		roVolSource       *v1.VolumeSource
   106  		pod               *v1.Pod
   107  		formatPod         *v1.Pod
   108  		subPathDir        string
   109  		filePathInSubpath string
   110  		filePathInVolume  string
   111  
   112  		migrationCheck *migrationOpCheck
   113  	}
   114  	var l local
   115  
   116  	// Beware that it also registers an AfterEach which renders f unusable. Any code using
   117  	// f must run inside an It or Context callback.
   118  	f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver))
   119  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   120  
   121  	init := func(ctx context.Context) {
   122  		l = local{}
   123  
   124  		// Now do the more expensive test initialization.
   125  		l.config = driver.PrepareTest(ctx, f)
   126  		l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName)
   127  		testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
   128  		l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange)
   129  		l.hostExec = storageutils.NewHostExec(f)
   130  
   131  		// Setup subPath test dependent resource
   132  		volType := pattern.VolType
   133  		switch volType {
   134  		case storageframework.InlineVolume:
   135  			if iDriver, ok := driver.(storageframework.InlineVolumeTestDriver); ok {
   136  				l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.Volume)
   137  			}
   138  		case storageframework.PreprovisionedPV:
   139  			l.roVolSource = &v1.VolumeSource{
   140  				PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   141  					ClaimName: l.resource.Pvc.Name,
   142  					ReadOnly:  true,
   143  				},
   144  			}
   145  		case storageframework.DynamicPV:
   146  			l.roVolSource = &v1.VolumeSource{
   147  				PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   148  					ClaimName: l.resource.Pvc.Name,
   149  					ReadOnly:  true,
   150  				},
   151  			}
   152  		default:
   153  			framework.Failf("SubPath test doesn't support: %s", volType)
   154  		}
   155  
   156  		subPath := f.Namespace.Name
   157  		l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, admissionapi.LevelPrivileged)
   158  		e2epod.SetNodeSelection(&l.pod.Spec, l.config.ClientNodeSelection)
   159  
   160  		l.formatPod = volumeFormatPod(f, l.resource.VolSource)
   161  		e2epod.SetNodeSelection(&l.formatPod.Spec, l.config.ClientNodeSelection)
   162  
   163  		l.subPathDir = filepath.Join(volumePath, subPath)
   164  		l.filePathInSubpath = filepath.Join(volumePath, fileName)
   165  		l.filePathInVolume = filepath.Join(l.subPathDir, fileName)
   166  	}
   167  
   168  	cleanup := func(ctx context.Context) {
   169  		var errs []error
   170  		if l.pod != nil {
   171  			ginkgo.By("Deleting pod")
   172  			err := e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod)
   173  			errs = append(errs, err)
   174  			l.pod = nil
   175  		}
   176  
   177  		if l.resource != nil {
   178  			errs = append(errs, l.resource.CleanupResource(ctx))
   179  			l.resource = nil
   180  		}
   181  
   182  		framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
   183  
   184  		if l.hostExec != nil {
   185  			l.hostExec.Cleanup(ctx)
   186  		}
   187  
   188  		l.migrationCheck.validateMigrationVolumeOpCounts(ctx)
   189  	}
   190  
   191  	driverName := driver.GetDriverInfo().Name
   192  
   193  	ginkgo.It("should support non-existent path", func(ctx context.Context) {
   194  		init(ctx)
   195  		ginkgo.DeferCleanup(cleanup)
   196  
   197  		// Write the file in the subPath from init container 1
   198  		setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
   199  
   200  		// Read it from outside the subPath from container 1
   201  		testReadFile(ctx, f, l.filePathInVolume, l.pod, 1)
   202  	})
   203  
   204  	ginkgo.It("should support existing directory", func(ctx context.Context) {
   205  		init(ctx)
   206  		ginkgo.DeferCleanup(cleanup)
   207  
   208  		// Create the directory
   209  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
   210  
   211  		// Write the file in the subPath from init container 1
   212  		setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
   213  
   214  		// Read it from outside the subPath from container 1
   215  		testReadFile(ctx, f, l.filePathInVolume, l.pod, 1)
   216  	})
   217  
   218  	ginkgo.It("should support existing single file [LinuxOnly]", func(ctx context.Context) {
   219  		init(ctx)
   220  		ginkgo.DeferCleanup(cleanup)
   221  
   222  		// Create the file in the init container
   223  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume))
   224  
   225  		// Read it from inside the subPath from container 0
   226  		testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0)
   227  	})
   228  
   229  	ginkgo.It("should support file as subpath [LinuxOnly]", func(ctx context.Context) {
   230  		init(ctx)
   231  		ginkgo.DeferCleanup(cleanup)
   232  
   233  		// Create the file in the init container
   234  		setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir))
   235  
   236  		TestBasicSubpath(ctx, f, f.Namespace.Name, l.pod)
   237  	})
   238  
   239  	f.It("should fail if subpath directory is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   240  		init(ctx)
   241  		ginkgo.DeferCleanup(cleanup)
   242  
   243  		// Create the subpath outside the volume
   244  		var command string
   245  		if framework.NodeOSDistroIs("windows") {
   246  			command = fmt.Sprintf("New-Item -ItemType SymbolicLink -Path %s -value \\Windows", l.subPathDir)
   247  		} else {
   248  			command = fmt.Sprintf("ln -s /bin %s", l.subPathDir)
   249  		}
   250  		setInitCommand(l.pod, command)
   251  		// Pod should fail
   252  		testPodFailSubpath(ctx, f, l.pod, false)
   253  	})
   254  
   255  	f.It("should fail if subpath file is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   256  		init(ctx)
   257  		ginkgo.DeferCleanup(cleanup)
   258  
   259  		// Create the subpath outside the volume
   260  		setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir))
   261  
   262  		// Pod should fail
   263  		testPodFailSubpath(ctx, f, l.pod, false)
   264  	})
   265  
   266  	f.It("should fail if non-existent subpath is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   267  		init(ctx)
   268  		ginkgo.DeferCleanup(cleanup)
   269  
   270  		// Create the subpath outside the volume
   271  		setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir))
   272  
   273  		// Pod should fail
   274  		testPodFailSubpath(ctx, f, l.pod, false)
   275  	})
   276  
   277  	f.It("should fail if subpath with backstepping is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   278  		init(ctx)
   279  		ginkgo.DeferCleanup(cleanup)
   280  
   281  		// Create the subpath outside the volume
   282  		var command string
   283  		if framework.NodeOSDistroIs("windows") {
   284  			command = fmt.Sprintf("New-Item -ItemType SymbolicLink -Path %s -value ..\\", l.subPathDir)
   285  		} else {
   286  			command = fmt.Sprintf("ln -s ../ %s", l.subPathDir)
   287  		}
   288  		setInitCommand(l.pod, command)
   289  		// Pod should fail
   290  		testPodFailSubpath(ctx, f, l.pod, false)
   291  	})
   292  
   293  	f.It("should support creating multiple subpath from same volumes", f.WithSlow(), func(ctx context.Context) {
   294  		init(ctx)
   295  		ginkgo.DeferCleanup(cleanup)
   296  
   297  		subpathDir1 := filepath.Join(volumePath, "subpath1")
   298  		subpathDir2 := filepath.Join(volumePath, "subpath2")
   299  		filepath1 := filepath.Join("/test-subpath1", fileName)
   300  		filepath2 := filepath.Join("/test-subpath2", fileName)
   301  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
   302  
   303  		addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
   304  			Name:      volumeName,
   305  			MountPath: "/test-subpath1",
   306  			SubPath:   "subpath1",
   307  		})
   308  		addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
   309  			Name:      volumeName,
   310  			MountPath: "/test-subpath2",
   311  			SubPath:   "subpath2",
   312  		})
   313  
   314  		// Write the files from container 0 and instantly read them back
   315  		addMultipleWrites(&l.pod.Spec.Containers[0], filepath1, filepath2)
   316  		testMultipleReads(ctx, f, l.pod, 0, filepath1, filepath2)
   317  	})
   318  
   319  	f.It("should support restarting containers using directory as subpath", f.WithSlow(), func(ctx context.Context) {
   320  		init(ctx)
   321  		ginkgo.DeferCleanup(cleanup)
   322  
   323  		// Create the directory
   324  		var command string
   325  		command = fmt.Sprintf("mkdir -p %v; touch %v", l.subPathDir, probeFilePath)
   326  		setInitCommand(l.pod, command)
   327  		testPodContainerRestart(ctx, f, l.pod)
   328  	})
   329  
   330  	f.It("should support restarting containers using file as subpath", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   331  		init(ctx)
   332  		ginkgo.DeferCleanup(cleanup)
   333  
   334  		// Create the file
   335  		setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath))
   336  
   337  		testPodContainerRestart(ctx, f, l.pod)
   338  	})
   339  
   340  	f.It("should unmount if pod is gracefully deleted while kubelet is down", f.WithDisruptive(), f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   341  		e2eskipper.SkipUnlessSSHKeyPresent()
   342  		init(ctx)
   343  		ginkgo.DeferCleanup(cleanup)
   344  
   345  		if strings.HasPrefix(driverName, "hostPath") {
   346  			// TODO: This skip should be removed once #61446 is fixed
   347  			e2eskipper.Skipf("Driver %s does not support reconstruction, skipping", driverName)
   348  		}
   349  
   350  		testSubpathReconstruction(ctx, f, l.hostExec, l.pod, false)
   351  	})
   352  
   353  	f.It("should unmount if pod is force deleted while kubelet is down", f.WithDisruptive(), f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   354  		e2eskipper.SkipUnlessSSHKeyPresent()
   355  		init(ctx)
   356  		ginkgo.DeferCleanup(cleanup)
   357  
   358  		if strings.HasPrefix(driverName, "hostPath") {
   359  			// TODO: This skip should be removed once #61446 is fixed
   360  			e2eskipper.Skipf("Driver %s does not support reconstruction, skipping", driverName)
   361  		}
   362  
   363  		testSubpathReconstruction(ctx, f, l.hostExec, l.pod, true)
   364  	})
   365  
   366  	ginkgo.It("should support readOnly directory specified in the volumeMount", func(ctx context.Context) {
   367  		init(ctx)
   368  		ginkgo.DeferCleanup(cleanup)
   369  
   370  		// Create the directory
   371  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
   372  
   373  		// Write the file in the volume from init container 2
   374  		setWriteCommand(l.filePathInVolume, &l.pod.Spec.InitContainers[2])
   375  
   376  		// Read it from inside the subPath from container 0
   377  		l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
   378  		testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0)
   379  	})
   380  
   381  	ginkgo.It("should support readOnly file specified in the volumeMount [LinuxOnly]", func(ctx context.Context) {
   382  		init(ctx)
   383  		ginkgo.DeferCleanup(cleanup)
   384  
   385  		// Create the file
   386  		setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir))
   387  
   388  		// Write the file in the volume from init container 2
   389  		setWriteCommand(l.subPathDir, &l.pod.Spec.InitContainers[2])
   390  
   391  		// Read it from inside the subPath from container 0
   392  		l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
   393  		testReadFile(ctx, f, volumePath, l.pod, 0)
   394  	})
   395  
   396  	ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func(ctx context.Context) {
   397  		init(ctx)
   398  		ginkgo.DeferCleanup(cleanup)
   399  		if l.roVolSource == nil {
   400  			e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
   401  		}
   402  
   403  		origpod := l.pod.DeepCopy()
   404  
   405  		// Create the directory
   406  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
   407  
   408  		// Write the file in the subPath from init container 1
   409  		setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
   410  
   411  		// Read it from inside the subPath from container 0
   412  		testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0)
   413  
   414  		// Reset the pod
   415  		l.pod = origpod
   416  
   417  		// Set volume source to read only
   418  		l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
   419  
   420  		// Read it from inside the subPath from container 0
   421  		testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0)
   422  	})
   423  
   424  	f.It("should verify container cannot write to subpath readonly volumes", f.WithSlow(), func(ctx context.Context) {
   425  		init(ctx)
   426  		ginkgo.DeferCleanup(cleanup)
   427  		if l.roVolSource == nil {
   428  			e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
   429  		}
   430  
   431  		// Format the volume while it's writable
   432  		formatVolume(ctx, f, l.formatPod)
   433  
   434  		// Set volume source to read only
   435  		l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
   436  
   437  		// Write the file in the volume from container 0
   438  		setWriteCommand(l.subPathDir, &l.pod.Spec.Containers[0])
   439  
   440  		// Pod should fail
   441  		testPodFailSubpath(ctx, f, l.pod, true)
   442  	})
   443  
   444  	// Set this test linux-only because the test will fail in Windows when
   445  	// deleting a dir from one container while another container still use it.
   446  	ginkgo.It("should be able to unmount after the subpath directory is deleted [LinuxOnly]", func(ctx context.Context) {
   447  		init(ctx)
   448  		ginkgo.DeferCleanup(cleanup)
   449  
   450  		// Change volume container to busybox so we can exec later
   451  		l.pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
   452  		l.pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
   453  		l.pod.Spec.Containers[1].Args = nil
   454  
   455  		ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
   456  		removeUnusedContainers(l.pod)
   457  		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, l.pod, metav1.CreateOptions{})
   458  		framework.ExpectNoError(err, "while creating pod")
   459  		ginkgo.DeferCleanup(func(ctx context.Context) error {
   460  			ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
   461  			return e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
   462  		})
   463  
   464  		// Wait for pod to be running
   465  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
   466  		framework.ExpectNoError(err, "while waiting for pod to be running")
   467  
   468  		// Exec into container that mounted the volume, delete subpath directory
   469  		rmCmd := fmt.Sprintf("rm -r %s", l.subPathDir)
   470  		_, err = podContainerExec(l.pod, 1, rmCmd)
   471  		framework.ExpectNoError(err, "while removing subpath directory")
   472  
   473  		// Delete pod (from defer) and wait for it to be successfully deleted
   474  	})
   475  
   476  	// TODO: add a test case for the same disk with two partitions
   477  }
   478  
   479  // TestBasicSubpath runs basic subpath test
   480  func TestBasicSubpath(ctx context.Context, f *framework.Framework, contents string, pod *v1.Pod) {
   481  	TestBasicSubpathFile(ctx, f, contents, pod, volumePath)
   482  }
   483  
   484  // TestBasicSubpathFile runs basic subpath file test
   485  func TestBasicSubpathFile(ctx context.Context, f *framework.Framework, contents string, pod *v1.Pod, filepath string) {
   486  	setReadCommand(filepath, &pod.Spec.Containers[0])
   487  
   488  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   489  	removeUnusedContainers(pod)
   490  	e2eoutput.TestContainerOutput(ctx, f, "atomic-volume-subpath", pod, 0, []string{contents})
   491  
   492  	ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
   493  	err := e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
   494  	framework.ExpectNoError(err, "while deleting pod")
   495  }
   496  
   497  func generateSuffixForPodName(s string) string {
   498  	// Pod name must:
   499  	//   1. consist of lower case alphanumeric characters or '-',
   500  	//   2. start and end with an alphanumeric character.
   501  	// (e.g. 'my-name',  or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')
   502  	// Therefore, suffix is generated by following steps:
   503  	//   1. all strings other than [A-Za-z0-9] is replaced with "-",
   504  	//   2. add lower case alphanumeric characters at the end ('-[a-z0-9]{4}' is added),
   505  	//   3. convert the entire strings to lower case.
   506  	re := regexp.MustCompile("[^A-Za-z0-9]")
   507  	return strings.ToLower(fmt.Sprintf("%s-%s", re.ReplaceAllString(s, "-"), rand.String(4)))
   508  }
   509  
   510  // SubpathTestPod returns a pod spec for subpath tests
   511  func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, securityLevel admissionapi.Level) *v1.Pod {
   512  	var (
   513  		suffix          = generateSuffixForPodName(volumeType)
   514  		gracePeriod     = int64(1)
   515  		probeVolumeName = "liveness-probe-volume"
   516  		seLinuxOptions  = &v1.SELinuxOptions{Level: "s0:c0,c1"}
   517  	)
   518  
   519  	volumeMount := v1.VolumeMount{Name: volumeName, MountPath: volumePath}
   520  	volumeSubpathMount := v1.VolumeMount{Name: volumeName, MountPath: volumePath, SubPath: subpath}
   521  	probeMount := v1.VolumeMount{Name: probeVolumeName, MountPath: probeVolumePath}
   522  
   523  	initSubpathContainer := e2epod.NewAgnhostContainer(
   524  		fmt.Sprintf("test-init-subpath-%s", suffix),
   525  		[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
   526  	initSubpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
   527  	initVolumeContainer := e2epod.NewAgnhostContainer(
   528  		fmt.Sprintf("test-init-volume-%s", suffix),
   529  		[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
   530  	initVolumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
   531  	subpathContainer := e2epod.NewAgnhostContainer(
   532  		fmt.Sprintf("test-container-subpath-%s", suffix),
   533  		[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
   534  	subpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
   535  	volumeContainer := e2epod.NewAgnhostContainer(
   536  		fmt.Sprintf("test-container-volume-%s", suffix),
   537  		[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
   538  	volumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
   539  
   540  	return &v1.Pod{
   541  		ObjectMeta: metav1.ObjectMeta{
   542  			Name:      fmt.Sprintf("pod-subpath-test-%s", suffix),
   543  			Namespace: f.Namespace.Name,
   544  		},
   545  		Spec: v1.PodSpec{
   546  			InitContainers: []v1.Container{
   547  				{
   548  					Name:            fmt.Sprintf("init-volume-%s", suffix),
   549  					Image:           e2epod.GetDefaultTestImage(),
   550  					VolumeMounts:    []v1.VolumeMount{volumeMount, probeMount},
   551  					SecurityContext: e2epod.GenerateContainerSecurityContext(securityLevel),
   552  				},
   553  				initSubpathContainer,
   554  				initVolumeContainer,
   555  			},
   556  			Containers: []v1.Container{
   557  				subpathContainer,
   558  				volumeContainer,
   559  			},
   560  			RestartPolicy:                 v1.RestartPolicyNever,
   561  			TerminationGracePeriodSeconds: &gracePeriod,
   562  			Volumes: []v1.Volume{
   563  				{
   564  					Name:         volumeName,
   565  					VolumeSource: *source,
   566  				},
   567  				{
   568  					Name: probeVolumeName,
   569  					VolumeSource: v1.VolumeSource{
   570  						EmptyDir: &v1.EmptyDirVolumeSource{},
   571  					},
   572  				},
   573  			},
   574  			SecurityContext: e2epod.GeneratePodSecurityContext(nil, seLinuxOptions),
   575  		},
   576  	}
   577  }
   578  
   579  func containerIsUnused(container *v1.Container) bool {
   580  	// agnhost image with nil command and nil Args or with just "mounttest" as Args does nothing. Leave everything else
   581  	return container.Image == imageutils.GetE2EImage(imageutils.Agnhost) && container.Command == nil &&
   582  		(container.Args == nil || (len(container.Args) == 1 && container.Args[0] == "mounttest"))
   583  }
   584  
   585  // removeUnusedContainers removes containers from a SubpathTestPod that aren't
   586  // needed for a test. e.g. to test for subpath mount failure, only one
   587  // container needs to run and get its status checked.
   588  func removeUnusedContainers(pod *v1.Pod) {
   589  	initContainers := []v1.Container{}
   590  	containers := []v1.Container{}
   591  	if pod.Spec.InitContainers[0].Command != nil {
   592  		initContainers = append(initContainers, pod.Spec.InitContainers[0])
   593  	}
   594  	for _, ic := range pod.Spec.InitContainers[1:] {
   595  		if !containerIsUnused(&ic) {
   596  			initContainers = append(initContainers, ic)
   597  		}
   598  	}
   599  	containers = append(containers, pod.Spec.Containers[0])
   600  	if !containerIsUnused(&pod.Spec.Containers[1]) {
   601  		containers = append(containers, pod.Spec.Containers[1])
   602  	}
   603  	pod.Spec.InitContainers = initContainers
   604  	pod.Spec.Containers = containers
   605  }
   606  
   607  // volumeFormatPod returns a Pod that does nothing but will cause the plugin to format a filesystem
   608  // on first use
   609  func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.Pod {
   610  	return &v1.Pod{
   611  		ObjectMeta: metav1.ObjectMeta{
   612  			Name: fmt.Sprintf("volume-prep-%s", f.Namespace.Name),
   613  		},
   614  		Spec: v1.PodSpec{
   615  			Containers: []v1.Container{
   616  				{
   617  					Name:    fmt.Sprintf("init-volume-%s", f.Namespace.Name),
   618  					Image:   e2epod.GetDefaultTestImage(),
   619  					Command: e2epod.GenerateScriptCmd("echo nothing"),
   620  					VolumeMounts: []v1.VolumeMount{
   621  						{
   622  							Name:      volumeName,
   623  							MountPath: "/vol",
   624  						},
   625  					},
   626  				},
   627  			},
   628  			RestartPolicy: v1.RestartPolicyNever,
   629  			Volumes: []v1.Volume{
   630  				{
   631  					Name:         volumeName,
   632  					VolumeSource: *volumeSource,
   633  				},
   634  			},
   635  		},
   636  	}
   637  }
   638  
   639  func setInitCommand(pod *v1.Pod, command string) {
   640  	pod.Spec.InitContainers[0].Command = e2epod.GenerateScriptCmd(command)
   641  }
   642  
   643  func setWriteCommand(file string, container *v1.Container) {
   644  	container.Args = []string{
   645  		"mounttest",
   646  		fmt.Sprintf("--new_file_0644=%v", file),
   647  	}
   648  	// See issue https://github.com/kubernetes/kubernetes/issues/94237 about file_mode
   649  	// not working well on Windows
   650  	// TODO: remove this check after issue is resolved
   651  	if !framework.NodeOSDistroIs("windows") {
   652  		container.Args = append(container.Args, fmt.Sprintf("--file_mode=%v", file))
   653  	}
   654  
   655  }
   656  
   657  func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) {
   658  	existingMounts := container.VolumeMounts
   659  	container.VolumeMounts = append(existingMounts, volumeMount)
   660  }
   661  
   662  func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
   663  	container.Args = []string{
   664  		"mounttest",
   665  		fmt.Sprintf("--new_file_0644=%v", file1),
   666  		fmt.Sprintf("--new_file_0666=%v", file2),
   667  	}
   668  }
   669  
   670  func testMultipleReads(ctx context.Context, f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
   671  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   672  	removeUnusedContainers(pod)
   673  	e2eoutput.TestContainerOutput(ctx, f, "multi_subpath", pod, containerIndex, []string{
   674  		"content of file \"" + file1 + "\": mount-tester new file",
   675  		"content of file \"" + file2 + "\": mount-tester new file",
   676  	})
   677  }
   678  
   679  func setReadCommand(file string, container *v1.Container) {
   680  	container.Args = []string{
   681  		"mounttest",
   682  		fmt.Sprintf("--file_content_in_loop=%v", file),
   683  		fmt.Sprintf("--retry_time=%d", retryDuration),
   684  	}
   685  }
   686  
   687  func testReadFile(ctx context.Context, f *framework.Framework, file string, pod *v1.Pod, containerIndex int) {
   688  	setReadCommand(file, &pod.Spec.Containers[containerIndex])
   689  
   690  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   691  	removeUnusedContainers(pod)
   692  	e2eoutput.TestContainerOutput(ctx, f, "subpath", pod, containerIndex, []string{
   693  		"content of file \"" + file + "\": mount-tester new file",
   694  	})
   695  
   696  	ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
   697  	err := e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
   698  	framework.ExpectNoError(err, "while deleting pod")
   699  }
   700  
   701  func testPodFailSubpath(ctx context.Context, f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) {
   702  	testPodFailSubpathError(ctx, f, pod, "subPath", allowContainerTerminationError)
   703  }
   704  
   705  func testPodFailSubpathError(ctx context.Context, f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) {
   706  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   707  	removeUnusedContainers(pod)
   708  	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   709  	framework.ExpectNoError(err, "while creating pod")
   710  	ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
   711  	ginkgo.By("Checking for subpath error in container status")
   712  	err = waitForPodSubpathError(ctx, f, pod, allowContainerTerminationError)
   713  	framework.ExpectNoError(err, "while waiting for subpath failure")
   714  }
   715  
   716  func findSubpathContainerName(pod *v1.Pod) string {
   717  	for _, container := range pod.Spec.Containers {
   718  		for _, mount := range container.VolumeMounts {
   719  			if mount.SubPath != "" {
   720  				return container.Name
   721  			}
   722  		}
   723  	}
   724  	return ""
   725  }
   726  
   727  func waitForPodSubpathError(ctx context.Context, f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) error {
   728  	subpathContainerName := findSubpathContainerName(pod)
   729  	if subpathContainerName == "" {
   730  		return fmt.Errorf("failed to find container that uses subpath")
   731  	}
   732  
   733  	waitErr := wait.PollImmediate(framework.Poll, f.Timeouts.PodStart, func() (bool, error) {
   734  		pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   735  		if err != nil {
   736  			return false, err
   737  		}
   738  		for _, status := range pod.Status.ContainerStatuses {
   739  			// 0 is the container that uses subpath
   740  			if status.Name == subpathContainerName {
   741  				switch {
   742  				case status.State.Terminated != nil:
   743  					if status.State.Terminated.ExitCode != 0 && allowContainerTerminationError {
   744  						return true, nil
   745  					}
   746  					return false, fmt.Errorf("subpath container unexpectedly terminated")
   747  				case status.State.Waiting != nil:
   748  					if status.State.Waiting.Reason == "CreateContainerConfigError" &&
   749  						strings.Contains(status.State.Waiting.Message, "subPath") {
   750  						return true, nil
   751  					}
   752  					return false, nil
   753  				default:
   754  					return false, nil
   755  				}
   756  			}
   757  		}
   758  		return false, nil
   759  	})
   760  	if waitErr != nil {
   761  		return fmt.Errorf("error waiting for pod subpath error to occur: %v", waitErr)
   762  	}
   763  	return nil
   764  }
   765  
   766  type podContainerRestartHooks struct {
   767  	AddLivenessProbeFunc  func(pod *v1.Pod, probeFilePath string)
   768  	FailLivenessProbeFunc func(pod *v1.Pod, probeFilePath string)
   769  	FixLivenessProbeFunc  func(pod *v1.Pod, probeFilePath string)
   770  }
   771  
   772  func (h *podContainerRestartHooks) AddLivenessProbe(pod *v1.Pod, probeFilePath string) {
   773  	if h.AddLivenessProbeFunc != nil {
   774  		h.AddLivenessProbeFunc(pod, probeFilePath)
   775  	}
   776  }
   777  
   778  func (h *podContainerRestartHooks) FailLivenessProbe(pod *v1.Pod, probeFilePath string) {
   779  	if h.FailLivenessProbeFunc != nil {
   780  		h.FailLivenessProbeFunc(pod, probeFilePath)
   781  	}
   782  }
   783  
   784  func (h *podContainerRestartHooks) FixLivenessProbe(pod *v1.Pod, probeFilePath string) {
   785  	if h.FixLivenessProbeFunc != nil {
   786  		h.FixLivenessProbeFunc(pod, probeFilePath)
   787  	}
   788  }
   789  
   790  // testPodContainerRestartWithHooks tests that container restarts to stabilize.
   791  // hooks wrap functions between container restarts.
   792  func testPodContainerRestartWithHooks(ctx context.Context, f *framework.Framework, pod *v1.Pod, hooks *podContainerRestartHooks) {
   793  	pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
   794  
   795  	pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage()
   796  	pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000")
   797  	pod.Spec.Containers[0].Args = nil
   798  	pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
   799  	pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
   800  	pod.Spec.Containers[1].Args = nil
   801  	hooks.AddLivenessProbe(pod, probeFilePath)
   802  
   803  	// Start pod
   804  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   805  	removeUnusedContainers(pod)
   806  	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   807  	framework.ExpectNoError(err, "while creating pod")
   808  	ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
   809  	err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
   810  	framework.ExpectNoError(err, "while waiting for pod to be running")
   811  
   812  	ginkgo.By("Failing liveness probe")
   813  	hooks.FailLivenessProbe(pod, probeFilePath)
   814  
   815  	// Check that container has restarted. The time that this
   816  	// might take is estimated to be lower than for "delete pod"
   817  	// and "start pod".
   818  	ginkgo.By("Waiting for container to restart")
   819  	restarts := int32(0)
   820  	err = wait.PollImmediate(10*time.Second, f.Timeouts.PodDelete+f.Timeouts.PodStart, func() (bool, error) {
   821  		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   822  		if err != nil {
   823  			return false, err
   824  		}
   825  		for _, status := range pod.Status.ContainerStatuses {
   826  			if status.Name == pod.Spec.Containers[0].Name {
   827  				framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
   828  				restarts = status.RestartCount
   829  				if restarts > 0 {
   830  					framework.Logf("Container has restart count: %v", restarts)
   831  					return true, nil
   832  				}
   833  			}
   834  		}
   835  		return false, nil
   836  	})
   837  	framework.ExpectNoError(err, "while waiting for container to restart")
   838  
   839  	// Fix liveness probe
   840  	ginkgo.By("Fix liveness probe")
   841  	hooks.FixLivenessProbe(pod, probeFilePath)
   842  
   843  	// Wait for container restarts to stabilize. Estimating the
   844  	// time for this is harder. In practice,
   845  	// framework.PodStartTimeout = f.Timeouts.PodStart = 5min
   846  	// turned out to be too low, therefore
   847  	// f.Timeouts.PodStartSlow = 15min is used now.
   848  	ginkgo.By("Waiting for container to stop restarting")
   849  	stableCount := int(0)
   850  	stableThreshold := int(time.Minute / framework.Poll)
   851  	err = wait.PollImmediate(framework.Poll, f.Timeouts.PodStartSlow, func() (bool, error) {
   852  		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   853  		if err != nil {
   854  			return false, err
   855  		}
   856  		for _, status := range pod.Status.ContainerStatuses {
   857  			if status.Name == pod.Spec.Containers[0].Name {
   858  				if status.RestartCount == restarts {
   859  					stableCount++
   860  					if stableCount > stableThreshold {
   861  						framework.Logf("Container restart has stabilized")
   862  						return true, nil
   863  					}
   864  				} else {
   865  					restarts = status.RestartCount
   866  					stableCount = 0
   867  					framework.Logf("Container has restart count: %v", restarts)
   868  				}
   869  				break
   870  			}
   871  		}
   872  		return false, nil
   873  	})
   874  	framework.ExpectNoError(err, "while waiting for container to stabilize")
   875  }
   876  
   877  // testPodContainerRestart tests that the existing subpath mount is detected when a container restarts
   878  func testPodContainerRestart(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
   879  	testPodContainerRestartWithHooks(ctx, f, pod, &podContainerRestartHooks{
   880  		AddLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   881  			p.Spec.Containers[0].LivenessProbe = &v1.Probe{
   882  				ProbeHandler: v1.ProbeHandler{
   883  					Exec: &v1.ExecAction{
   884  						Command: []string{"cat", probeFilePath},
   885  					},
   886  				},
   887  				InitialDelaySeconds: 1,
   888  				FailureThreshold:    1,
   889  				PeriodSeconds:       2,
   890  			}
   891  		},
   892  		FailLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   893  			out, err := podContainerExec(p, 1, fmt.Sprintf("rm %v", probeFilePath))
   894  			framework.Logf("Pod exec output: %v", out)
   895  			framework.ExpectNoError(err, "while failing liveness probe")
   896  		},
   897  		FixLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   898  			ginkgo.By("Rewriting the file")
   899  			var writeCmd string
   900  			if framework.NodeOSDistroIs("windows") {
   901  				writeCmd = fmt.Sprintf("echo test-after | Out-File -FilePath %v", probeFilePath)
   902  			} else {
   903  				writeCmd = fmt.Sprintf("echo test-after > %v", probeFilePath)
   904  			}
   905  			out, err := podContainerExec(pod, 1, writeCmd)
   906  			framework.Logf("Pod exec output: %v", out)
   907  			framework.ExpectNoError(err, "while rewriting the probe file")
   908  		},
   909  	})
   910  }
   911  
   912  // TestPodContainerRestartWithConfigmapModified tests that container can restart to stabilize when configmap has been modified.
   913  // 1. valid container running
   914  // 2. update configmap
   915  // 3. container restarts
   916  // 4. container becomes stable after configmap mounted file has been modified
   917  func TestPodContainerRestartWithConfigmapModified(ctx context.Context, f *framework.Framework, original, modified *v1.ConfigMap) {
   918  	ginkgo.By("Create configmap")
   919  	_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, original, metav1.CreateOptions{})
   920  	if err != nil && !apierrors.IsAlreadyExists(err) {
   921  		framework.ExpectNoError(err, "while creating configmap to modify")
   922  	}
   923  
   924  	var subpath string
   925  	for k := range original.Data {
   926  		subpath = k
   927  		break
   928  	}
   929  	pod := SubpathTestPod(f, subpath, "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: original.Name}}}, admissionapi.LevelBaseline)
   930  	pod.Spec.InitContainers[0].Command = e2epod.GenerateScriptCmd(fmt.Sprintf("touch %v", probeFilePath))
   931  
   932  	modifiedValue := modified.Data[subpath]
   933  	testPodContainerRestartWithHooks(ctx, f, pod, &podContainerRestartHooks{
   934  		AddLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   935  			p.Spec.Containers[0].LivenessProbe = &v1.Probe{
   936  				ProbeHandler: v1.ProbeHandler{
   937  					Exec: &v1.ExecAction{
   938  						// Expect probe file exist or configmap mounted file has been modified.
   939  						Command: []string{"sh", "-c", fmt.Sprintf("cat %s || test `cat %s` = '%s'", probeFilePath, volumePath, modifiedValue)},
   940  					},
   941  				},
   942  				InitialDelaySeconds: 1,
   943  				FailureThreshold:    1,
   944  				PeriodSeconds:       2,
   945  			}
   946  		},
   947  		FailLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   948  			out, err := podContainerExec(p, 1, fmt.Sprintf("rm %v", probeFilePath))
   949  			framework.Logf("Pod exec output: %v", out)
   950  			framework.ExpectNoError(err, "while failing liveness probe")
   951  		},
   952  		FixLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   953  			_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, modified, metav1.UpdateOptions{})
   954  			framework.ExpectNoError(err, "while fixing liveness probe")
   955  		},
   956  	})
   957  
   958  }
   959  
   960  func testSubpathReconstruction(ctx context.Context, f *framework.Framework, hostExec storageutils.HostExec, pod *v1.Pod, forceDelete bool) {
   961  	// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
   962  
   963  	// Disruptive test run serially, we can cache all voluem global mount
   964  	// points and verify after the test that we do not leak any global mount point.
   965  	nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
   966  	framework.ExpectNoError(err, "while listing schedulable nodes")
   967  	globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items))
   968  	for _, node := range nodeList.Items {
   969  		globalMountPointsByNode[node.Name] = storageutils.FindVolumeGlobalMountPoints(ctx, hostExec, &node)
   970  	}
   971  
   972  	// Change to busybox
   973  	pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage()
   974  	pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000")
   975  	pod.Spec.Containers[0].Args = nil
   976  	pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
   977  	pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
   978  	pod.Spec.Containers[1].Args = nil
   979  	// If grace period is too short, then there is not enough time for the volume
   980  	// manager to cleanup the volumes
   981  	gracePeriod := int64(30)
   982  	pod.Spec.TerminationGracePeriodSeconds = &gracePeriod
   983  
   984  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   985  	removeUnusedContainers(pod)
   986  	pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   987  	framework.ExpectNoError(err, "while creating pod")
   988  	err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
   989  	framework.ExpectNoError(err, "while waiting for pod to be running")
   990  
   991  	pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   992  	framework.ExpectNoError(err, "while getting pod")
   993  
   994  	var podNode *v1.Node
   995  	for i := range nodeList.Items {
   996  		if nodeList.Items[i].Name == pod.Spec.NodeName {
   997  			podNode = &nodeList.Items[i]
   998  		}
   999  	}
  1000  	gomega.Expect(podNode).ToNot(gomega.BeNil(), "pod node should exist in schedulable nodes")
  1001  
  1002  	storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, f.ClientSet, f, pod, forceDelete, true, nil, volumePath)
  1003  
  1004  	if podNode != nil {
  1005  		mountPoints := globalMountPointsByNode[podNode.Name]
  1006  		mountPointsAfter := storageutils.FindVolumeGlobalMountPoints(ctx, hostExec, podNode)
  1007  		s1 := mountPointsAfter.Difference(mountPoints)
  1008  		s2 := mountPoints.Difference(mountPointsAfter)
  1009  		gomega.Expect(s1).To(gomega.BeEmpty(), "global mount points leaked: %v", s1)
  1010  		gomega.Expect(s2).To(gomega.BeEmpty(), "global mount points not found: %v", s2)
  1011  	}
  1012  }
  1013  
  1014  func formatVolume(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
  1015  	ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
  1016  	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1017  	framework.ExpectNoError(err, "while creating volume init pod")
  1018  
  1019  	err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
  1020  	framework.ExpectNoError(err, "while waiting for volume init pod to succeed")
  1021  
  1022  	err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
  1023  	framework.ExpectNoError(err, "while deleting volume init pod")
  1024  }
  1025  
  1026  func podContainerExec(pod *v1.Pod, containerIndex int, command string) (string, error) {
  1027  	if containerIndex > len(pod.Spec.Containers)-1 {
  1028  		return "", fmt.Errorf("container not found in pod: index %d", containerIndex)
  1029  	}
  1030  	var shell string
  1031  	var option string
  1032  	if framework.NodeOSDistroIs("windows") {
  1033  		shell = "powershell"
  1034  		option = "/c"
  1035  	} else {
  1036  		shell = "/bin/sh"
  1037  		option = "-c"
  1038  	}
  1039  	return e2ekubectl.RunKubectl(pod.Namespace, "exec", pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command)
  1040  }