k8s.io/kubernetes@v1.29.3/test/e2e/storage/testsuites/subpath.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"path/filepath"
    23  	"regexp"
    24  	"strings"
    25  	"time"
    26  
    27  	"github.com/onsi/ginkgo/v2"
    28  	"github.com/onsi/gomega"
    29  
    30  	v1 "k8s.io/api/core/v1"
    31  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/util/errors"
    34  	"k8s.io/apimachinery/pkg/util/rand"
    35  	"k8s.io/apimachinery/pkg/util/sets"
    36  	"k8s.io/apimachinery/pkg/util/wait"
    37  	"k8s.io/kubernetes/test/e2e/framework"
    38  	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
    39  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    40  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    41  	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
    42  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    43  	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
    44  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    45  	storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
    46  	imageutils "k8s.io/kubernetes/test/utils/image"
    47  	admissionapi "k8s.io/pod-security-admission/api"
    48  )
    49  
    50  var (
    51  	volumePath      = "/test-volume"
    52  	volumeName      = "test-volume"
    53  	probeVolumePath = "/probe-volume"
    54  	probeFilePath   = probeVolumePath + "/probe-file"
    55  	fileName        = "test-file"
    56  	retryDuration   = 20
    57  	mountImage      = imageutils.GetE2EImage(imageutils.Agnhost)
    58  )
    59  
    60  type subPathTestSuite struct {
    61  	tsInfo storageframework.TestSuiteInfo
    62  }
    63  
    64  // InitCustomSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
    65  // using custom test patterns
    66  func InitCustomSubPathTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
    67  	return &subPathTestSuite{
    68  		tsInfo: storageframework.TestSuiteInfo{
    69  			Name:         "subPath",
    70  			TestPatterns: patterns,
    71  			SupportedSizeRange: e2evolume.SizeRange{
    72  				Min: "1Mi",
    73  			},
    74  		},
    75  	}
    76  }
    77  
    78  // InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
    79  // using testsuite default patterns
    80  func InitSubPathTestSuite() storageframework.TestSuite {
    81  	patterns := []storageframework.TestPattern{
    82  		storageframework.DefaultFsInlineVolume,
    83  		storageframework.DefaultFsPreprovisionedPV,
    84  		storageframework.DefaultFsDynamicPV,
    85  		storageframework.NtfsDynamicPV,
    86  	}
    87  	return InitCustomSubPathTestSuite(patterns)
    88  }
    89  
    90  func (s *subPathTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
    91  	return s.tsInfo
    92  }
    93  
    94  func (s *subPathTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    95  	skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(
    96  		storageframework.PreprovisionedPV,
    97  		storageframework.InlineVolume))
    98  }
    99  
   100  func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
   101  	type local struct {
   102  		config *storageframework.PerTestConfig
   103  
   104  		hostExec          storageutils.HostExec
   105  		resource          *storageframework.VolumeResource
   106  		roVolSource       *v1.VolumeSource
   107  		pod               *v1.Pod
   108  		formatPod         *v1.Pod
   109  		subPathDir        string
   110  		filePathInSubpath string
   111  		filePathInVolume  string
   112  
   113  		migrationCheck *migrationOpCheck
   114  	}
   115  	var l local
   116  
   117  	// Beware that it also registers an AfterEach which renders f unusable. Any code using
   118  	// f must run inside an It or Context callback.
   119  	f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver))
   120  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   121  
   122  	init := func(ctx context.Context) {
   123  		l = local{}
   124  
   125  		// Now do the more expensive test initialization.
   126  		l.config = driver.PrepareTest(ctx, f)
   127  		l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName)
   128  		testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
   129  		l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange)
   130  		l.hostExec = storageutils.NewHostExec(f)
   131  
   132  		// Setup subPath test dependent resource
   133  		volType := pattern.VolType
   134  		switch volType {
   135  		case storageframework.InlineVolume:
   136  			if iDriver, ok := driver.(storageframework.InlineVolumeTestDriver); ok {
   137  				l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.Volume)
   138  			}
   139  		case storageframework.PreprovisionedPV:
   140  			l.roVolSource = &v1.VolumeSource{
   141  				PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   142  					ClaimName: l.resource.Pvc.Name,
   143  					ReadOnly:  true,
   144  				},
   145  			}
   146  		case storageframework.DynamicPV:
   147  			l.roVolSource = &v1.VolumeSource{
   148  				PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   149  					ClaimName: l.resource.Pvc.Name,
   150  					ReadOnly:  true,
   151  				},
   152  			}
   153  		default:
   154  			framework.Failf("SubPath test doesn't support: %s", volType)
   155  		}
   156  
   157  		subPath := f.Namespace.Name
   158  		l.pod = SubpathTestPod(f, subPath, string(volType), l.resource.VolSource, admissionapi.LevelPrivileged)
   159  		e2epod.SetNodeSelection(&l.pod.Spec, l.config.ClientNodeSelection)
   160  
   161  		l.formatPod = volumeFormatPod(f, l.resource.VolSource)
   162  		e2epod.SetNodeSelection(&l.formatPod.Spec, l.config.ClientNodeSelection)
   163  
   164  		l.subPathDir = filepath.Join(volumePath, subPath)
   165  		l.filePathInSubpath = filepath.Join(volumePath, fileName)
   166  		l.filePathInVolume = filepath.Join(l.subPathDir, fileName)
   167  	}
   168  
   169  	cleanup := func(ctx context.Context) {
   170  		var errs []error
   171  		if l.pod != nil {
   172  			ginkgo.By("Deleting pod")
   173  			err := e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod)
   174  			errs = append(errs, err)
   175  			l.pod = nil
   176  		}
   177  
   178  		if l.resource != nil {
   179  			errs = append(errs, l.resource.CleanupResource(ctx))
   180  			l.resource = nil
   181  		}
   182  
   183  		framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
   184  
   185  		if l.hostExec != nil {
   186  			l.hostExec.Cleanup(ctx)
   187  		}
   188  
   189  		l.migrationCheck.validateMigrationVolumeOpCounts(ctx)
   190  	}
   191  
   192  	driverName := driver.GetDriverInfo().Name
   193  
   194  	ginkgo.It("should support non-existent path", func(ctx context.Context) {
   195  		init(ctx)
   196  		ginkgo.DeferCleanup(cleanup)
   197  
   198  		// Write the file in the subPath from init container 1
   199  		setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
   200  
   201  		// Read it from outside the subPath from container 1
   202  		testReadFile(ctx, f, l.filePathInVolume, l.pod, 1)
   203  	})
   204  
   205  	ginkgo.It("should support existing directory", func(ctx context.Context) {
   206  		init(ctx)
   207  		ginkgo.DeferCleanup(cleanup)
   208  
   209  		// Create the directory
   210  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
   211  
   212  		// Write the file in the subPath from init container 1
   213  		setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
   214  
   215  		// Read it from outside the subPath from container 1
   216  		testReadFile(ctx, f, l.filePathInVolume, l.pod, 1)
   217  	})
   218  
   219  	ginkgo.It("should support existing single file [LinuxOnly]", func(ctx context.Context) {
   220  		init(ctx)
   221  		ginkgo.DeferCleanup(cleanup)
   222  
   223  		// Create the file in the init container
   224  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume))
   225  
   226  		// Read it from inside the subPath from container 0
   227  		testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0)
   228  	})
   229  
   230  	ginkgo.It("should support file as subpath [LinuxOnly]", func(ctx context.Context) {
   231  		init(ctx)
   232  		ginkgo.DeferCleanup(cleanup)
   233  
   234  		// Create the file in the init container
   235  		setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir))
   236  
   237  		TestBasicSubpath(ctx, f, f.Namespace.Name, l.pod)
   238  	})
   239  
   240  	f.It("should fail if subpath directory is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   241  		init(ctx)
   242  		ginkgo.DeferCleanup(cleanup)
   243  
   244  		// Create the subpath outside the volume
   245  		var command string
   246  		if framework.NodeOSDistroIs("windows") {
   247  			command = fmt.Sprintf("New-Item -ItemType SymbolicLink -Path %s -value \\Windows", l.subPathDir)
   248  		} else {
   249  			command = fmt.Sprintf("ln -s /bin %s", l.subPathDir)
   250  		}
   251  		setInitCommand(l.pod, command)
   252  		// Pod should fail
   253  		testPodFailSubpath(ctx, f, l.pod, false)
   254  	})
   255  
   256  	f.It("should fail if subpath file is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   257  		init(ctx)
   258  		ginkgo.DeferCleanup(cleanup)
   259  
   260  		// Create the subpath outside the volume
   261  		setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir))
   262  
   263  		// Pod should fail
   264  		testPodFailSubpath(ctx, f, l.pod, false)
   265  	})
   266  
   267  	f.It("should fail if non-existent subpath is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   268  		init(ctx)
   269  		ginkgo.DeferCleanup(cleanup)
   270  
   271  		// Create the subpath outside the volume
   272  		setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir))
   273  
   274  		// Pod should fail
   275  		testPodFailSubpath(ctx, f, l.pod, false)
   276  	})
   277  
   278  	f.It("should fail if subpath with backstepping is outside the volume", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   279  		init(ctx)
   280  		ginkgo.DeferCleanup(cleanup)
   281  
   282  		// Create the subpath outside the volume
   283  		var command string
   284  		if framework.NodeOSDistroIs("windows") {
   285  			command = fmt.Sprintf("New-Item -ItemType SymbolicLink -Path %s -value ..\\", l.subPathDir)
   286  		} else {
   287  			command = fmt.Sprintf("ln -s ../ %s", l.subPathDir)
   288  		}
   289  		setInitCommand(l.pod, command)
   290  		// Pod should fail
   291  		testPodFailSubpath(ctx, f, l.pod, false)
   292  	})
   293  
   294  	f.It("should support creating multiple subpath from same volumes", f.WithSlow(), func(ctx context.Context) {
   295  		init(ctx)
   296  		ginkgo.DeferCleanup(cleanup)
   297  
   298  		subpathDir1 := filepath.Join(volumePath, "subpath1")
   299  		subpathDir2 := filepath.Join(volumePath, "subpath2")
   300  		filepath1 := filepath.Join("/test-subpath1", fileName)
   301  		filepath2 := filepath.Join("/test-subpath2", fileName)
   302  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
   303  
   304  		addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
   305  			Name:      volumeName,
   306  			MountPath: "/test-subpath1",
   307  			SubPath:   "subpath1",
   308  		})
   309  		addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{
   310  			Name:      volumeName,
   311  			MountPath: "/test-subpath2",
   312  			SubPath:   "subpath2",
   313  		})
   314  
   315  		// Write the files from container 0 and instantly read them back
   316  		addMultipleWrites(&l.pod.Spec.Containers[0], filepath1, filepath2)
   317  		testMultipleReads(ctx, f, l.pod, 0, filepath1, filepath2)
   318  	})
   319  
   320  	f.It("should support restarting containers using directory as subpath", f.WithSlow(), func(ctx context.Context) {
   321  		init(ctx)
   322  		ginkgo.DeferCleanup(cleanup)
   323  
   324  		// Create the directory
   325  		var command string
   326  		command = fmt.Sprintf("mkdir -p %v; touch %v", l.subPathDir, probeFilePath)
   327  		setInitCommand(l.pod, command)
   328  		testPodContainerRestart(ctx, f, l.pod)
   329  	})
   330  
   331  	f.It("should support restarting containers using file as subpath", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   332  		init(ctx)
   333  		ginkgo.DeferCleanup(cleanup)
   334  
   335  		// Create the file
   336  		setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath))
   337  
   338  		testPodContainerRestart(ctx, f, l.pod)
   339  	})
   340  
   341  	f.It("should unmount if pod is gracefully deleted while kubelet is down", f.WithDisruptive(), f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   342  		e2eskipper.SkipUnlessSSHKeyPresent()
   343  		init(ctx)
   344  		ginkgo.DeferCleanup(cleanup)
   345  
   346  		if strings.HasPrefix(driverName, "hostPath") {
   347  			// TODO: This skip should be removed once #61446 is fixed
   348  			e2eskipper.Skipf("Driver %s does not support reconstruction, skipping", driverName)
   349  		}
   350  
   351  		testSubpathReconstruction(ctx, f, l.hostExec, l.pod, false)
   352  	})
   353  
   354  	f.It("should unmount if pod is force deleted while kubelet is down", f.WithDisruptive(), f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   355  		e2eskipper.SkipUnlessSSHKeyPresent()
   356  		init(ctx)
   357  		ginkgo.DeferCleanup(cleanup)
   358  
   359  		if strings.HasPrefix(driverName, "hostPath") {
   360  			// TODO: This skip should be removed once #61446 is fixed
   361  			e2eskipper.Skipf("Driver %s does not support reconstruction, skipping", driverName)
   362  		}
   363  
   364  		testSubpathReconstruction(ctx, f, l.hostExec, l.pod, true)
   365  	})
   366  
   367  	ginkgo.It("should support readOnly directory specified in the volumeMount", func(ctx context.Context) {
   368  		init(ctx)
   369  		ginkgo.DeferCleanup(cleanup)
   370  
   371  		// Create the directory
   372  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
   373  
   374  		// Write the file in the volume from init container 2
   375  		setWriteCommand(l.filePathInVolume, &l.pod.Spec.InitContainers[2])
   376  
   377  		// Read it from inside the subPath from container 0
   378  		l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
   379  		testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0)
   380  	})
   381  
   382  	ginkgo.It("should support readOnly file specified in the volumeMount [LinuxOnly]", func(ctx context.Context) {
   383  		init(ctx)
   384  		ginkgo.DeferCleanup(cleanup)
   385  
   386  		// Create the file
   387  		setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir))
   388  
   389  		// Write the file in the volume from init container 2
   390  		setWriteCommand(l.subPathDir, &l.pod.Spec.InitContainers[2])
   391  
   392  		// Read it from inside the subPath from container 0
   393  		l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true
   394  		testReadFile(ctx, f, volumePath, l.pod, 0)
   395  	})
   396  
   397  	ginkgo.It("should support existing directories when readOnly specified in the volumeSource", func(ctx context.Context) {
   398  		init(ctx)
   399  		ginkgo.DeferCleanup(cleanup)
   400  		if l.roVolSource == nil {
   401  			e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
   402  		}
   403  
   404  		origpod := l.pod.DeepCopy()
   405  
   406  		// Create the directory
   407  		setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir))
   408  
   409  		// Write the file in the subPath from init container 1
   410  		setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1])
   411  
   412  		// Read it from inside the subPath from container 0
   413  		testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0)
   414  
   415  		// Reset the pod
   416  		l.pod = origpod
   417  
   418  		// Set volume source to read only
   419  		l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
   420  
   421  		// Read it from inside the subPath from container 0
   422  		testReadFile(ctx, f, l.filePathInSubpath, l.pod, 0)
   423  	})
   424  
   425  	f.It("should verify container cannot write to subpath readonly volumes", f.WithSlow(), func(ctx context.Context) {
   426  		init(ctx)
   427  		ginkgo.DeferCleanup(cleanup)
   428  		if l.roVolSource == nil {
   429  			e2eskipper.Skipf("Driver %s on volume type %s doesn't support readOnly source", driverName, pattern.VolType)
   430  		}
   431  
   432  		// Format the volume while it's writable
   433  		formatVolume(ctx, f, l.formatPod)
   434  
   435  		// Set volume source to read only
   436  		l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource
   437  
   438  		// Write the file in the volume from container 0
   439  		setWriteCommand(l.subPathDir, &l.pod.Spec.Containers[0])
   440  
   441  		// Pod should fail
   442  		testPodFailSubpath(ctx, f, l.pod, true)
   443  	})
   444  
   445  	// Set this test linux-only because the test will fail in Windows when
   446  	// deleting a dir from one container while another container still use it.
   447  	ginkgo.It("should be able to unmount after the subpath directory is deleted [LinuxOnly]", func(ctx context.Context) {
   448  		init(ctx)
   449  		ginkgo.DeferCleanup(cleanup)
   450  
   451  		// Change volume container to busybox so we can exec later
   452  		l.pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
   453  		l.pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
   454  		l.pod.Spec.Containers[1].Args = nil
   455  
   456  		ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
   457  		removeUnusedContainers(l.pod)
   458  		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, l.pod, metav1.CreateOptions{})
   459  		framework.ExpectNoError(err, "while creating pod")
   460  		ginkgo.DeferCleanup(func(ctx context.Context) error {
   461  			ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
   462  			return e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
   463  		})
   464  
   465  		// Wait for pod to be running
   466  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
   467  		framework.ExpectNoError(err, "while waiting for pod to be running")
   468  
   469  		// Exec into container that mounted the volume, delete subpath directory
   470  		rmCmd := fmt.Sprintf("rm -r %s", l.subPathDir)
   471  		_, err = podContainerExec(l.pod, 1, rmCmd)
   472  		framework.ExpectNoError(err, "while removing subpath directory")
   473  
   474  		// Delete pod (from defer) and wait for it to be successfully deleted
   475  	})
   476  
   477  	// TODO: add a test case for the same disk with two partitions
   478  }
   479  
   480  // TestBasicSubpath runs basic subpath test
   481  func TestBasicSubpath(ctx context.Context, f *framework.Framework, contents string, pod *v1.Pod) {
   482  	TestBasicSubpathFile(ctx, f, contents, pod, volumePath)
   483  }
   484  
   485  // TestBasicSubpathFile runs basic subpath file test
   486  func TestBasicSubpathFile(ctx context.Context, f *framework.Framework, contents string, pod *v1.Pod, filepath string) {
   487  	setReadCommand(filepath, &pod.Spec.Containers[0])
   488  
   489  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   490  	removeUnusedContainers(pod)
   491  	e2eoutput.TestContainerOutput(ctx, f, "atomic-volume-subpath", pod, 0, []string{contents})
   492  
   493  	ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
   494  	err := e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
   495  	framework.ExpectNoError(err, "while deleting pod")
   496  }
   497  
   498  func generateSuffixForPodName(s string) string {
   499  	// Pod name must:
   500  	//   1. consist of lower case alphanumeric characters or '-',
   501  	//   2. start and end with an alphanumeric character.
   502  	// (e.g. 'my-name',  or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')
   503  	// Therefore, suffix is generated by following steps:
   504  	//   1. all strings other than [A-Za-z0-9] is replaced with "-",
   505  	//   2. add lower case alphanumeric characters at the end ('-[a-z0-9]{4}' is added),
   506  	//   3. convert the entire strings to lower case.
   507  	re := regexp.MustCompile("[^A-Za-z0-9]")
   508  	return strings.ToLower(fmt.Sprintf("%s-%s", re.ReplaceAllString(s, "-"), rand.String(4)))
   509  }
   510  
   511  // SubpathTestPod returns a pod spec for subpath tests
   512  func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, securityLevel admissionapi.Level) *v1.Pod {
   513  	var (
   514  		suffix          = generateSuffixForPodName(volumeType)
   515  		gracePeriod     = int64(1)
   516  		probeVolumeName = "liveness-probe-volume"
   517  		seLinuxOptions  = &v1.SELinuxOptions{Level: "s0:c0,c1"}
   518  	)
   519  
   520  	volumeMount := v1.VolumeMount{Name: volumeName, MountPath: volumePath}
   521  	volumeSubpathMount := v1.VolumeMount{Name: volumeName, MountPath: volumePath, SubPath: subpath}
   522  	probeMount := v1.VolumeMount{Name: probeVolumeName, MountPath: probeVolumePath}
   523  
   524  	initSubpathContainer := e2epod.NewAgnhostContainer(
   525  		fmt.Sprintf("test-init-subpath-%s", suffix),
   526  		[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
   527  	initSubpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
   528  	initVolumeContainer := e2epod.NewAgnhostContainer(
   529  		fmt.Sprintf("test-init-volume-%s", suffix),
   530  		[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
   531  	initVolumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
   532  	subpathContainer := e2epod.NewAgnhostContainer(
   533  		fmt.Sprintf("test-container-subpath-%s", suffix),
   534  		[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
   535  	subpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
   536  	volumeContainer := e2epod.NewAgnhostContainer(
   537  		fmt.Sprintf("test-container-volume-%s", suffix),
   538  		[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
   539  	volumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
   540  
   541  	return &v1.Pod{
   542  		ObjectMeta: metav1.ObjectMeta{
   543  			Name:      fmt.Sprintf("pod-subpath-test-%s", suffix),
   544  			Namespace: f.Namespace.Name,
   545  		},
   546  		Spec: v1.PodSpec{
   547  			InitContainers: []v1.Container{
   548  				{
   549  					Name:            fmt.Sprintf("init-volume-%s", suffix),
   550  					Image:           e2epod.GetDefaultTestImage(),
   551  					VolumeMounts:    []v1.VolumeMount{volumeMount, probeMount},
   552  					SecurityContext: e2epod.GenerateContainerSecurityContext(securityLevel),
   553  				},
   554  				initSubpathContainer,
   555  				initVolumeContainer,
   556  			},
   557  			Containers: []v1.Container{
   558  				subpathContainer,
   559  				volumeContainer,
   560  			},
   561  			RestartPolicy:                 v1.RestartPolicyNever,
   562  			TerminationGracePeriodSeconds: &gracePeriod,
   563  			Volumes: []v1.Volume{
   564  				{
   565  					Name:         volumeName,
   566  					VolumeSource: *source,
   567  				},
   568  				{
   569  					Name: probeVolumeName,
   570  					VolumeSource: v1.VolumeSource{
   571  						EmptyDir: &v1.EmptyDirVolumeSource{},
   572  					},
   573  				},
   574  			},
   575  			SecurityContext: e2epod.GeneratePodSecurityContext(nil, seLinuxOptions),
   576  		},
   577  	}
   578  }
   579  
   580  func containerIsUnused(container *v1.Container) bool {
   581  	// mountImage with nil command and nil Args or with just "mounttest" as Args does nothing. Leave everything else
   582  	return container.Image == mountImage && container.Command == nil &&
   583  		(container.Args == nil || (len(container.Args) == 1 && container.Args[0] == "mounttest"))
   584  }
   585  
   586  // removeUnusedContainers removes containers from a SubpathTestPod that aren't
   587  // needed for a test. e.g. to test for subpath mount failure, only one
   588  // container needs to run and get its status checked.
   589  func removeUnusedContainers(pod *v1.Pod) {
   590  	initContainers := []v1.Container{}
   591  	containers := []v1.Container{}
   592  	if pod.Spec.InitContainers[0].Command != nil {
   593  		initContainers = append(initContainers, pod.Spec.InitContainers[0])
   594  	}
   595  	for _, ic := range pod.Spec.InitContainers[1:] {
   596  		if !containerIsUnused(&ic) {
   597  			initContainers = append(initContainers, ic)
   598  		}
   599  	}
   600  	containers = append(containers, pod.Spec.Containers[0])
   601  	if !containerIsUnused(&pod.Spec.Containers[1]) {
   602  		containers = append(containers, pod.Spec.Containers[1])
   603  	}
   604  	pod.Spec.InitContainers = initContainers
   605  	pod.Spec.Containers = containers
   606  }
   607  
   608  // volumeFormatPod returns a Pod that does nothing but will cause the plugin to format a filesystem
   609  // on first use
   610  func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.Pod {
   611  	return &v1.Pod{
   612  		ObjectMeta: metav1.ObjectMeta{
   613  			Name: fmt.Sprintf("volume-prep-%s", f.Namespace.Name),
   614  		},
   615  		Spec: v1.PodSpec{
   616  			Containers: []v1.Container{
   617  				{
   618  					Name:    fmt.Sprintf("init-volume-%s", f.Namespace.Name),
   619  					Image:   e2epod.GetDefaultTestImage(),
   620  					Command: e2epod.GenerateScriptCmd("echo nothing"),
   621  					VolumeMounts: []v1.VolumeMount{
   622  						{
   623  							Name:      volumeName,
   624  							MountPath: "/vol",
   625  						},
   626  					},
   627  				},
   628  			},
   629  			RestartPolicy: v1.RestartPolicyNever,
   630  			Volumes: []v1.Volume{
   631  				{
   632  					Name:         volumeName,
   633  					VolumeSource: *volumeSource,
   634  				},
   635  			},
   636  		},
   637  	}
   638  }
   639  
   640  func setInitCommand(pod *v1.Pod, command string) {
   641  	pod.Spec.InitContainers[0].Command = e2epod.GenerateScriptCmd(command)
   642  }
   643  
   644  func setWriteCommand(file string, container *v1.Container) {
   645  	container.Args = []string{
   646  		"mounttest",
   647  		fmt.Sprintf("--new_file_0644=%v", file),
   648  	}
   649  	// See issue https://github.com/kubernetes/kubernetes/issues/94237 about file_mode
   650  	// not working well on Windows
   651  	// TODO: remove this check after issue is resolved
   652  	if !framework.NodeOSDistroIs("windows") {
   653  		container.Args = append(container.Args, fmt.Sprintf("--file_mode=%v", file))
   654  	}
   655  
   656  }
   657  
   658  func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) {
   659  	existingMounts := container.VolumeMounts
   660  	container.VolumeMounts = append(existingMounts, volumeMount)
   661  }
   662  
   663  func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
   664  	container.Args = []string{
   665  		"mounttest",
   666  		fmt.Sprintf("--new_file_0644=%v", file1),
   667  		fmt.Sprintf("--new_file_0666=%v", file2),
   668  	}
   669  }
   670  
   671  func testMultipleReads(ctx context.Context, f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
   672  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   673  	removeUnusedContainers(pod)
   674  	e2eoutput.TestContainerOutput(ctx, f, "multi_subpath", pod, containerIndex, []string{
   675  		"content of file \"" + file1 + "\": mount-tester new file",
   676  		"content of file \"" + file2 + "\": mount-tester new file",
   677  	})
   678  }
   679  
   680  func setReadCommand(file string, container *v1.Container) {
   681  	container.Args = []string{
   682  		"mounttest",
   683  		fmt.Sprintf("--file_content_in_loop=%v", file),
   684  		fmt.Sprintf("--retry_time=%d", retryDuration),
   685  	}
   686  }
   687  
   688  func testReadFile(ctx context.Context, f *framework.Framework, file string, pod *v1.Pod, containerIndex int) {
   689  	setReadCommand(file, &pod.Spec.Containers[containerIndex])
   690  
   691  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   692  	removeUnusedContainers(pod)
   693  	e2eoutput.TestContainerOutput(ctx, f, "subpath", pod, containerIndex, []string{
   694  		"content of file \"" + file + "\": mount-tester new file",
   695  	})
   696  
   697  	ginkgo.By(fmt.Sprintf("Deleting pod %s", pod.Name))
   698  	err := e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
   699  	framework.ExpectNoError(err, "while deleting pod")
   700  }
   701  
   702  func testPodFailSubpath(ctx context.Context, f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) {
   703  	testPodFailSubpathError(ctx, f, pod, "subPath", allowContainerTerminationError)
   704  }
   705  
   706  func testPodFailSubpathError(ctx context.Context, f *framework.Framework, pod *v1.Pod, errorMsg string, allowContainerTerminationError bool) {
   707  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   708  	removeUnusedContainers(pod)
   709  	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   710  	framework.ExpectNoError(err, "while creating pod")
   711  	ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
   712  	ginkgo.By("Checking for subpath error in container status")
   713  	err = waitForPodSubpathError(ctx, f, pod, allowContainerTerminationError)
   714  	framework.ExpectNoError(err, "while waiting for subpath failure")
   715  }
   716  
   717  func findSubpathContainerName(pod *v1.Pod) string {
   718  	for _, container := range pod.Spec.Containers {
   719  		for _, mount := range container.VolumeMounts {
   720  			if mount.SubPath != "" {
   721  				return container.Name
   722  			}
   723  		}
   724  	}
   725  	return ""
   726  }
   727  
   728  func waitForPodSubpathError(ctx context.Context, f *framework.Framework, pod *v1.Pod, allowContainerTerminationError bool) error {
   729  	subpathContainerName := findSubpathContainerName(pod)
   730  	if subpathContainerName == "" {
   731  		return fmt.Errorf("failed to find container that uses subpath")
   732  	}
   733  
   734  	waitErr := wait.PollImmediate(framework.Poll, f.Timeouts.PodStart, func() (bool, error) {
   735  		pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   736  		if err != nil {
   737  			return false, err
   738  		}
   739  		for _, status := range pod.Status.ContainerStatuses {
   740  			// 0 is the container that uses subpath
   741  			if status.Name == subpathContainerName {
   742  				switch {
   743  				case status.State.Terminated != nil:
   744  					if status.State.Terminated.ExitCode != 0 && allowContainerTerminationError {
   745  						return true, nil
   746  					}
   747  					return false, fmt.Errorf("subpath container unexpectedly terminated")
   748  				case status.State.Waiting != nil:
   749  					if status.State.Waiting.Reason == "CreateContainerConfigError" &&
   750  						strings.Contains(status.State.Waiting.Message, "subPath") {
   751  						return true, nil
   752  					}
   753  					return false, nil
   754  				default:
   755  					return false, nil
   756  				}
   757  			}
   758  		}
   759  		return false, nil
   760  	})
   761  	if waitErr != nil {
   762  		return fmt.Errorf("error waiting for pod subpath error to occur: %v", waitErr)
   763  	}
   764  	return nil
   765  }
   766  
   767  type podContainerRestartHooks struct {
   768  	AddLivenessProbeFunc  func(pod *v1.Pod, probeFilePath string)
   769  	FailLivenessProbeFunc func(pod *v1.Pod, probeFilePath string)
   770  	FixLivenessProbeFunc  func(pod *v1.Pod, probeFilePath string)
   771  }
   772  
   773  func (h *podContainerRestartHooks) AddLivenessProbe(pod *v1.Pod, probeFilePath string) {
   774  	if h.AddLivenessProbeFunc != nil {
   775  		h.AddLivenessProbeFunc(pod, probeFilePath)
   776  	}
   777  }
   778  
   779  func (h *podContainerRestartHooks) FailLivenessProbe(pod *v1.Pod, probeFilePath string) {
   780  	if h.FailLivenessProbeFunc != nil {
   781  		h.FailLivenessProbeFunc(pod, probeFilePath)
   782  	}
   783  }
   784  
   785  func (h *podContainerRestartHooks) FixLivenessProbe(pod *v1.Pod, probeFilePath string) {
   786  	if h.FixLivenessProbeFunc != nil {
   787  		h.FixLivenessProbeFunc(pod, probeFilePath)
   788  	}
   789  }
   790  
   791  // testPodContainerRestartWithHooks tests that container restarts to stabilize.
   792  // hooks wrap functions between container restarts.
   793  func testPodContainerRestartWithHooks(ctx context.Context, f *framework.Framework, pod *v1.Pod, hooks *podContainerRestartHooks) {
   794  	pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
   795  
   796  	pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage()
   797  	pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000")
   798  	pod.Spec.Containers[0].Args = nil
   799  	pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
   800  	pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
   801  	pod.Spec.Containers[1].Args = nil
   802  	hooks.AddLivenessProbe(pod, probeFilePath)
   803  
   804  	// Start pod
   805  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   806  	removeUnusedContainers(pod)
   807  	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   808  	framework.ExpectNoError(err, "while creating pod")
   809  	ginkgo.DeferCleanup(e2epod.DeletePodWithWait, f.ClientSet, pod)
   810  	err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
   811  	framework.ExpectNoError(err, "while waiting for pod to be running")
   812  
   813  	ginkgo.By("Failing liveness probe")
   814  	hooks.FailLivenessProbe(pod, probeFilePath)
   815  
   816  	// Check that container has restarted. The time that this
   817  	// might take is estimated to be lower than for "delete pod"
   818  	// and "start pod".
   819  	ginkgo.By("Waiting for container to restart")
   820  	restarts := int32(0)
   821  	err = wait.PollImmediate(10*time.Second, f.Timeouts.PodDelete+f.Timeouts.PodStart, func() (bool, error) {
   822  		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   823  		if err != nil {
   824  			return false, err
   825  		}
   826  		for _, status := range pod.Status.ContainerStatuses {
   827  			if status.Name == pod.Spec.Containers[0].Name {
   828  				framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
   829  				restarts = status.RestartCount
   830  				if restarts > 0 {
   831  					framework.Logf("Container has restart count: %v", restarts)
   832  					return true, nil
   833  				}
   834  			}
   835  		}
   836  		return false, nil
   837  	})
   838  	framework.ExpectNoError(err, "while waiting for container to restart")
   839  
   840  	// Fix liveness probe
   841  	ginkgo.By("Fix liveness probe")
   842  	hooks.FixLivenessProbe(pod, probeFilePath)
   843  
   844  	// Wait for container restarts to stabilize. Estimating the
   845  	// time for this is harder. In practice,
   846  	// framework.PodStartTimeout = f.Timeouts.PodStart = 5min
   847  	// turned out to be too low, therefore
   848  	// f.Timeouts.PodStartSlow = 15min is used now.
   849  	ginkgo.By("Waiting for container to stop restarting")
   850  	stableCount := int(0)
   851  	stableThreshold := int(time.Minute / framework.Poll)
   852  	err = wait.PollImmediate(framework.Poll, f.Timeouts.PodStartSlow, func() (bool, error) {
   853  		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   854  		if err != nil {
   855  			return false, err
   856  		}
   857  		for _, status := range pod.Status.ContainerStatuses {
   858  			if status.Name == pod.Spec.Containers[0].Name {
   859  				if status.RestartCount == restarts {
   860  					stableCount++
   861  					if stableCount > stableThreshold {
   862  						framework.Logf("Container restart has stabilized")
   863  						return true, nil
   864  					}
   865  				} else {
   866  					restarts = status.RestartCount
   867  					stableCount = 0
   868  					framework.Logf("Container has restart count: %v", restarts)
   869  				}
   870  				break
   871  			}
   872  		}
   873  		return false, nil
   874  	})
   875  	framework.ExpectNoError(err, "while waiting for container to stabilize")
   876  }
   877  
   878  // testPodContainerRestart tests that the existing subpath mount is detected when a container restarts
   879  func testPodContainerRestart(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
   880  	testPodContainerRestartWithHooks(ctx, f, pod, &podContainerRestartHooks{
   881  		AddLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   882  			p.Spec.Containers[0].LivenessProbe = &v1.Probe{
   883  				ProbeHandler: v1.ProbeHandler{
   884  					Exec: &v1.ExecAction{
   885  						Command: []string{"cat", probeFilePath},
   886  					},
   887  				},
   888  				InitialDelaySeconds: 1,
   889  				FailureThreshold:    1,
   890  				PeriodSeconds:       2,
   891  			}
   892  		},
   893  		FailLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   894  			out, err := podContainerExec(p, 1, fmt.Sprintf("rm %v", probeFilePath))
   895  			framework.Logf("Pod exec output: %v", out)
   896  			framework.ExpectNoError(err, "while failing liveness probe")
   897  		},
   898  		FixLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   899  			ginkgo.By("Rewriting the file")
   900  			var writeCmd string
   901  			if framework.NodeOSDistroIs("windows") {
   902  				writeCmd = fmt.Sprintf("echo test-after | Out-File -FilePath %v", probeFilePath)
   903  			} else {
   904  				writeCmd = fmt.Sprintf("echo test-after > %v", probeFilePath)
   905  			}
   906  			out, err := podContainerExec(pod, 1, writeCmd)
   907  			framework.Logf("Pod exec output: %v", out)
   908  			framework.ExpectNoError(err, "while rewriting the probe file")
   909  		},
   910  	})
   911  }
   912  
   913  // TestPodContainerRestartWithConfigmapModified tests that container can restart to stabilize when configmap has been modified.
   914  // 1. valid container running
   915  // 2. update configmap
   916  // 3. container restarts
   917  // 4. container becomes stable after configmap mounted file has been modified
   918  func TestPodContainerRestartWithConfigmapModified(ctx context.Context, f *framework.Framework, original, modified *v1.ConfigMap) {
   919  	ginkgo.By("Create configmap")
   920  	_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, original, metav1.CreateOptions{})
   921  	if err != nil && !apierrors.IsAlreadyExists(err) {
   922  		framework.ExpectNoError(err, "while creating configmap to modify")
   923  	}
   924  
   925  	var subpath string
   926  	for k := range original.Data {
   927  		subpath = k
   928  		break
   929  	}
   930  	pod := SubpathTestPod(f, subpath, "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: original.Name}}}, admissionapi.LevelBaseline)
   931  	pod.Spec.InitContainers[0].Command = e2epod.GenerateScriptCmd(fmt.Sprintf("touch %v", probeFilePath))
   932  
   933  	modifiedValue := modified.Data[subpath]
   934  	testPodContainerRestartWithHooks(ctx, f, pod, &podContainerRestartHooks{
   935  		AddLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   936  			p.Spec.Containers[0].LivenessProbe = &v1.Probe{
   937  				ProbeHandler: v1.ProbeHandler{
   938  					Exec: &v1.ExecAction{
   939  						// Expect probe file exist or configmap mounted file has been modified.
   940  						Command: []string{"sh", "-c", fmt.Sprintf("cat %s || test `cat %s` = '%s'", probeFilePath, volumePath, modifiedValue)},
   941  					},
   942  				},
   943  				InitialDelaySeconds: 1,
   944  				FailureThreshold:    1,
   945  				PeriodSeconds:       2,
   946  			}
   947  		},
   948  		FailLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   949  			out, err := podContainerExec(p, 1, fmt.Sprintf("rm %v", probeFilePath))
   950  			framework.Logf("Pod exec output: %v", out)
   951  			framework.ExpectNoError(err, "while failing liveness probe")
   952  		},
   953  		FixLivenessProbeFunc: func(p *v1.Pod, probeFilePath string) {
   954  			_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(ctx, modified, metav1.UpdateOptions{})
   955  			framework.ExpectNoError(err, "while fixing liveness probe")
   956  		},
   957  	})
   958  
   959  }
   960  
   961  func testSubpathReconstruction(ctx context.Context, f *framework.Framework, hostExec storageutils.HostExec, pod *v1.Pod, forceDelete bool) {
   962  	// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
   963  
   964  	// Disruptive test run serially, we can cache all voluem global mount
   965  	// points and verify after the test that we do not leak any global mount point.
   966  	nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
   967  	framework.ExpectNoError(err, "while listing schedulable nodes")
   968  	globalMountPointsByNode := make(map[string]sets.String, len(nodeList.Items))
   969  	for _, node := range nodeList.Items {
   970  		globalMountPointsByNode[node.Name] = storageutils.FindVolumeGlobalMountPoints(ctx, hostExec, &node)
   971  	}
   972  
   973  	// Change to busybox
   974  	pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage()
   975  	pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000")
   976  	pod.Spec.Containers[0].Args = nil
   977  	pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
   978  	pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
   979  	pod.Spec.Containers[1].Args = nil
   980  	// If grace period is too short, then there is not enough time for the volume
   981  	// manager to cleanup the volumes
   982  	gracePeriod := int64(30)
   983  	pod.Spec.TerminationGracePeriodSeconds = &gracePeriod
   984  
   985  	ginkgo.By(fmt.Sprintf("Creating pod %s", pod.Name))
   986  	removeUnusedContainers(pod)
   987  	pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   988  	framework.ExpectNoError(err, "while creating pod")
   989  	err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
   990  	framework.ExpectNoError(err, "while waiting for pod to be running")
   991  
   992  	pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   993  	framework.ExpectNoError(err, "while getting pod")
   994  
   995  	var podNode *v1.Node
   996  	for i := range nodeList.Items {
   997  		if nodeList.Items[i].Name == pod.Spec.NodeName {
   998  			podNode = &nodeList.Items[i]
   999  		}
  1000  	}
  1001  	gomega.Expect(podNode).ToNot(gomega.BeNil(), "pod node should exist in schedulable nodes")
  1002  
  1003  	storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, f.ClientSet, f, pod, forceDelete, true, nil, volumePath)
  1004  
  1005  	if podNode != nil {
  1006  		mountPoints := globalMountPointsByNode[podNode.Name]
  1007  		mountPointsAfter := storageutils.FindVolumeGlobalMountPoints(ctx, hostExec, podNode)
  1008  		s1 := mountPointsAfter.Difference(mountPoints)
  1009  		s2 := mountPoints.Difference(mountPointsAfter)
  1010  		gomega.Expect(s1).To(gomega.BeEmpty(), "global mount points leaked: %v", s1)
  1011  		gomega.Expect(s2).To(gomega.BeEmpty(), "global mount points not found: %v", s2)
  1012  	}
  1013  }
  1014  
  1015  func formatVolume(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
  1016  	ginkgo.By(fmt.Sprintf("Creating pod to format volume %s", pod.Name))
  1017  	pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1018  	framework.ExpectNoError(err, "while creating volume init pod")
  1019  
  1020  	err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
  1021  	framework.ExpectNoError(err, "while waiting for volume init pod to succeed")
  1022  
  1023  	err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
  1024  	framework.ExpectNoError(err, "while deleting volume init pod")
  1025  }
  1026  
  1027  func podContainerExec(pod *v1.Pod, containerIndex int, command string) (string, error) {
  1028  	if containerIndex > len(pod.Spec.Containers)-1 {
  1029  		return "", fmt.Errorf("container not found in pod: index %d", containerIndex)
  1030  	}
  1031  	var shell string
  1032  	var option string
  1033  	if framework.NodeOSDistroIs("windows") {
  1034  		shell = "powershell"
  1035  		option = "/c"
  1036  	} else {
  1037  		shell = "/bin/sh"
  1038  		option = "-c"
  1039  	}
  1040  	return e2ekubectl.RunKubectl(pod.Namespace, "exec", pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", shell, option, command)
  1041  }