k8s.io/kubernetes@v1.29.3/test/e2e/framework/volume/fixtures.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  /*
    18   * This test checks that various VolumeSources are working.
    19   *
    20   * There are two ways, how to test the volumes:
    21   * 1) With containerized server (NFS, Ceph, iSCSI, ...)
    22   * The test creates a server pod, exporting simple 'index.html' file.
    23   * Then it uses appropriate VolumeSource to import this file into a client pod
    24   * and checks that the pod can see the file. It does so by importing the file
    25   * into web server root and loading the index.html from it.
    26   *
    27   * These tests work only when privileged containers are allowed, exporting
    28   * various filesystems (ex: NFS) usually needs some mounting or
    29   * other privileged magic in the server pod.
    30   *
    31   * Note that the server containers are for testing purposes only and should not
    32   * be used in production.
    33   *
    34   * 2) With server outside of Kubernetes
    35   * Appropriate server must exist somewhere outside
    36   * the tested Kubernetes cluster. The test itself creates a new volume,
    37   * and checks, that Kubernetes can use it as a volume.
    38   */
    39  
    40  package volume
    41  
    42  import (
    43  	"context"
    44  	"crypto/sha256"
    45  	"fmt"
    46  	"path/filepath"
    47  	"strconv"
    48  	"strings"
    49  	"time"
    50  
    51  	v1 "k8s.io/api/core/v1"
    52  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    53  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    54  	"k8s.io/apimachinery/pkg/util/wait"
    55  	clientset "k8s.io/client-go/kubernetes"
    56  	clientexec "k8s.io/client-go/util/exec"
    57  	"k8s.io/kubernetes/test/e2e/framework"
    58  	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
    59  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    60  	e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
    61  	imageutils "k8s.io/kubernetes/test/utils/image"
    62  	admissionapi "k8s.io/pod-security-admission/api"
    63  	uexec "k8s.io/utils/exec"
    64  
    65  	"github.com/onsi/ginkgo/v2"
    66  	"github.com/onsi/gomega"
    67  )
    68  
    69  const (
    70  	// Kb is byte size of kilobyte
    71  	Kb int64 = 1000
    72  	// Mb is byte size of megabyte
    73  	Mb int64 = 1000 * Kb
    74  	// Gb is byte size of gigabyte
    75  	Gb int64 = 1000 * Mb
    76  	// Tb is byte size of terabyte
    77  	Tb int64 = 1000 * Gb
    78  	// KiB is byte size of kibibyte
    79  	KiB int64 = 1024
    80  	// MiB is byte size of mebibyte
    81  	MiB int64 = 1024 * KiB
    82  	// GiB is byte size of gibibyte
    83  	GiB int64 = 1024 * MiB
    84  	// TiB is byte size of tebibyte
    85  	TiB int64 = 1024 * GiB
    86  
    87  	// VolumeServerPodStartupTimeout is a waiting period for volume server (Ceph, ...) to initialize itself.
    88  	VolumeServerPodStartupTimeout = 3 * time.Minute
    89  
    90  	// PodCleanupTimeout is a waiting period for pod to be cleaned up and unmount its volumes so we
    91  	// don't tear down containers with NFS/Ceph server too early.
    92  	PodCleanupTimeout = 20 * time.Second
    93  )
    94  
    95  // SizeRange encapsulates a range of sizes specified as minimum and maximum quantity strings
    96  // Both values are optional.
    97  // If size is not set, it will assume there's not limitation and it may set a very small size (E.g. 1ki)
    98  // as Min and set a considerable big size(E.g. 10Ei) as Max, which make it possible to calculate
    99  // the intersection of given intervals (if it exists)
   100  type SizeRange struct {
   101  	// Max quantity specified as a string including units. E.g "3Gi".
   102  	// If the Max size is unset, It will be assign a default valid maximum size 10Ei,
   103  	// which is defined in test/e2e/storage/testsuites/base.go
   104  	Max string
   105  	// Min quantity specified as a string including units. E.g "1Gi"
   106  	// If the Min size is unset, It will be assign a default valid minimum size 1Ki,
   107  	// which is defined in test/e2e/storage/testsuites/base.go
   108  	Min string
   109  }
   110  
   111  // TestConfig is a struct for configuration of one tests. The test consist of:
   112  // - server pod - runs serverImage, exports ports[]
   113  // - client pod - does not need any special configuration
   114  type TestConfig struct {
   115  	Namespace string
   116  	// Prefix of all pods. Typically the test name.
   117  	Prefix string
   118  	// Name of container image for the server pod.
   119  	ServerImage string
   120  	// Ports to export from the server pod. TCP only.
   121  	ServerPorts []int
   122  	// Commands to run in the container image.
   123  	ServerCmds []string
   124  	// Arguments to pass to the container image.
   125  	ServerArgs []string
   126  	// Volumes needed to be mounted to the server container from the host
   127  	// map <host (source) path> -> <container (dst.) path>
   128  	// if <host (source) path> is empty, mount a tmpfs emptydir
   129  	ServerVolumes map[string]string
   130  	// Message to wait for before starting clients
   131  	ServerReadyMessage string
   132  	// Use HostNetwork for the server
   133  	ServerHostNetwork bool
   134  	// Wait for the pod to terminate successfully
   135  	// False indicates that the pod is long running
   136  	WaitForCompletion bool
   137  	// ClientNodeSelection restricts where the client pod runs on.  Default is any node.
   138  	ClientNodeSelection e2epod.NodeSelection
   139  }
   140  
   141  // Test contains a volume to mount into a client pod and its
   142  // expected content.
   143  type Test struct {
   144  	Volume v1.VolumeSource
   145  	Mode   v1.PersistentVolumeMode
   146  	// Name of file to read/write in FileSystem mode
   147  	File            string
   148  	ExpectedContent string
   149  }
   150  
   151  // NewNFSServer is a NFS-specific wrapper for CreateStorageServer.
   152  func NewNFSServer(ctx context.Context, cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
   153  	return NewNFSServerWithNodeName(ctx, cs, namespace, args, "")
   154  }
   155  
   156  func NewNFSServerWithNodeName(ctx context.Context, cs clientset.Interface, namespace string, args []string, nodeName string) (config TestConfig, pod *v1.Pod, host string) {
   157  	config = TestConfig{
   158  		Namespace:          namespace,
   159  		Prefix:             "nfs",
   160  		ServerImage:        imageutils.GetE2EImage(imageutils.VolumeNFSServer),
   161  		ServerPorts:        []int{2049},
   162  		ServerVolumes:      map[string]string{"": "/exports"},
   163  		ServerReadyMessage: "NFS started",
   164  	}
   165  	if nodeName != "" {
   166  		config.ClientNodeSelection = e2epod.NodeSelection{Name: nodeName}
   167  	}
   168  
   169  	if len(args) > 0 {
   170  		config.ServerArgs = args
   171  	}
   172  	pod, host = CreateStorageServer(ctx, cs, config)
   173  	if strings.Contains(host, ":") {
   174  		host = "[" + host + "]"
   175  	}
   176  	return config, pod, host
   177  }
   178  
   179  // CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
   180  // and ip address string are returned.
   181  // Note: Expect() is called so no error is returned.
   182  func CreateStorageServer(ctx context.Context, cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
   183  	pod = startVolumeServer(ctx, cs, config)
   184  	gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil")
   185  	ip = pod.Status.PodIP
   186  	gomega.Expect(ip).NotTo(gomega.BeEmpty(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
   187  	framework.Logf("%s server pod IP address: %s", config.Prefix, ip)
   188  	return pod, ip
   189  }
   190  
   191  // GetVolumeAttachmentName returns the hash value of the provisioner, the config ClientNodeSelection name,
   192  // and the VolumeAttachment name of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
   193  func GetVolumeAttachmentName(ctx context.Context, cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string {
   194  	var nodeName string
   195  	// For provisioning tests, ClientNodeSelection is not set so we do not know the NodeName of the VolumeAttachment of the PV that is
   196  	// bound to the PVC with the passed in claimName and claimNamespace. We need this NodeName because it is used to generate the
   197  	// attachmentName that is returned, and used to look up a certain VolumeAttachment in WaitForVolumeAttachmentTerminated.
   198  	// To get the nodeName of the VolumeAttachment, we get all the VolumeAttachments, look for the VolumeAttachment with a
   199  	// PersistentVolumeName equal to the PV that is bound to the passed in PVC, and then we get the NodeName from that VolumeAttachment.
   200  	if config.ClientNodeSelection.Name == "" {
   201  		claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{})
   202  		pvName := claim.Spec.VolumeName
   203  		volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(ctx, metav1.ListOptions{})
   204  		for _, volumeAttachment := range volumeAttachments.Items {
   205  			if *volumeAttachment.Spec.Source.PersistentVolumeName == pvName {
   206  				nodeName = volumeAttachment.Spec.NodeName
   207  				break
   208  			}
   209  		}
   210  	} else {
   211  		nodeName = config.ClientNodeSelection.Name
   212  	}
   213  	handle := getVolumeHandle(ctx, cs, claimName, claimNamespace)
   214  	attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, provisioner, nodeName)))
   215  	return fmt.Sprintf("csi-%x", attachmentHash)
   216  }
   217  
   218  // getVolumeHandle returns the VolumeHandle of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
   219  func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName string, claimNamespace string) string {
   220  	// re-get the claim to the latest state with bound volume
   221  	claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{})
   222  	if err != nil {
   223  		framework.ExpectNoError(err, "Cannot get PVC")
   224  		return ""
   225  	}
   226  	pvName := claim.Spec.VolumeName
   227  	pv, err := cs.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
   228  	if err != nil {
   229  		framework.ExpectNoError(err, "Cannot get PV")
   230  		return ""
   231  	}
   232  	if pv.Spec.CSI == nil {
   233  		gomega.Expect(pv.Spec.CSI).NotTo(gomega.BeNil())
   234  		return ""
   235  	}
   236  	return pv.Spec.CSI.VolumeHandle
   237  }
   238  
   239  // WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated.
   240  func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error {
   241  	waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
   242  		_, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{})
   243  		if err != nil {
   244  			// if the volumeattachment object is not found, it means it has been terminated.
   245  			if apierrors.IsNotFound(err) {
   246  				return true, nil
   247  			}
   248  			return false, err
   249  		}
   250  		return false, nil
   251  	})
   252  	if waitErr != nil {
   253  		return fmt.Errorf("error waiting volume attachment %v to terminate: %v", attachmentName, waitErr)
   254  	}
   255  	return nil
   256  }
   257  
   258  // startVolumeServer starts a container specified by config.serverImage and exports all
   259  // config.serverPorts from it. The returned pod should be used to get the server
   260  // IP address and create appropriate VolumeSource.
   261  func startVolumeServer(ctx context.Context, client clientset.Interface, config TestConfig) *v1.Pod {
   262  	podClient := client.CoreV1().Pods(config.Namespace)
   263  
   264  	portCount := len(config.ServerPorts)
   265  	serverPodPorts := make([]v1.ContainerPort, portCount)
   266  
   267  	for i := 0; i < portCount; i++ {
   268  		portName := fmt.Sprintf("%s-%d", config.Prefix, i)
   269  
   270  		serverPodPorts[i] = v1.ContainerPort{
   271  			Name:          portName,
   272  			ContainerPort: int32(config.ServerPorts[i]),
   273  			Protocol:      v1.ProtocolTCP,
   274  		}
   275  	}
   276  
   277  	volumeCount := len(config.ServerVolumes)
   278  	volumes := make([]v1.Volume, volumeCount)
   279  	mounts := make([]v1.VolumeMount, volumeCount)
   280  
   281  	i := 0
   282  	for src, dst := range config.ServerVolumes {
   283  		mountName := fmt.Sprintf("path%d", i)
   284  		volumes[i].Name = mountName
   285  		if src == "" {
   286  			volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
   287  		} else {
   288  			volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
   289  				Path: src,
   290  			}
   291  		}
   292  
   293  		mounts[i].Name = mountName
   294  		mounts[i].ReadOnly = false
   295  		mounts[i].MountPath = dst
   296  
   297  		i++
   298  	}
   299  
   300  	serverPodName := fmt.Sprintf("%s-server", config.Prefix)
   301  	ginkgo.By(fmt.Sprint("creating ", serverPodName, " pod"))
   302  	privileged := new(bool)
   303  	*privileged = true
   304  
   305  	restartPolicy := v1.RestartPolicyAlways
   306  	if config.WaitForCompletion {
   307  		restartPolicy = v1.RestartPolicyNever
   308  	}
   309  	serverPod := &v1.Pod{
   310  		TypeMeta: metav1.TypeMeta{
   311  			Kind:       "Pod",
   312  			APIVersion: "v1",
   313  		},
   314  		ObjectMeta: metav1.ObjectMeta{
   315  			Name: serverPodName,
   316  			Labels: map[string]string{
   317  				"role": serverPodName,
   318  			},
   319  		},
   320  
   321  		Spec: v1.PodSpec{
   322  			HostNetwork: config.ServerHostNetwork,
   323  			Containers: []v1.Container{
   324  				{
   325  					Name:  serverPodName,
   326  					Image: config.ServerImage,
   327  					SecurityContext: &v1.SecurityContext{
   328  						Privileged: privileged,
   329  					},
   330  					Command:      config.ServerCmds,
   331  					Args:         config.ServerArgs,
   332  					Ports:        serverPodPorts,
   333  					VolumeMounts: mounts,
   334  				},
   335  			},
   336  			Volumes:       volumes,
   337  			RestartPolicy: restartPolicy,
   338  		},
   339  	}
   340  
   341  	if config.ClientNodeSelection.Name != "" {
   342  		serverPod.Spec.NodeName = config.ClientNodeSelection.Name
   343  	}
   344  
   345  	var pod *v1.Pod
   346  	serverPod, err := podClient.Create(ctx, serverPod, metav1.CreateOptions{})
   347  	// ok if the server pod already exists. TODO: make this controllable by callers
   348  	if err != nil {
   349  		if apierrors.IsAlreadyExists(err) {
   350  			framework.Logf("Ignore \"already-exists\" error, re-get pod...")
   351  			ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
   352  			serverPod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{})
   353  			framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
   354  			pod = serverPod
   355  		} else {
   356  			framework.ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
   357  		}
   358  	}
   359  	if config.WaitForCompletion {
   360  		framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(ctx, client, serverPod.Name, serverPod.Namespace))
   361  		framework.ExpectNoError(podClient.Delete(ctx, serverPod.Name, metav1.DeleteOptions{}))
   362  	} else {
   363  		framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, client, serverPod))
   364  		if pod == nil {
   365  			ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName))
   366  			pod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{})
   367  			framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
   368  		}
   369  	}
   370  	if config.ServerReadyMessage != "" {
   371  		_, err := e2epodoutput.LookForStringInLogWithoutKubectl(ctx, client, pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
   372  		framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
   373  	}
   374  	return pod
   375  }
   376  
   377  // TestServerCleanup cleans server pod.
   378  func TestServerCleanup(ctx context.Context, f *framework.Framework, config TestConfig) {
   379  	ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix))
   380  	defer ginkgo.GinkgoRecover()
   381  
   382  	if config.ServerImage == "" {
   383  		return
   384  	}
   385  
   386  	err := e2epod.DeletePodWithWaitByName(ctx, f.ClientSet, config.Prefix+"-server", config.Namespace)
   387  	framework.ExpectNoError(err, "delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
   388  }
   389  
   390  func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
   391  	ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
   392  	var gracePeriod int64 = 1
   393  	var command string
   394  
   395  	/**
   396  	This condition fixes running storage e2e tests in SELinux environment.
   397  	HostPath Volume Plugin creates a directory within /tmp on host machine, to be mounted as volume.
   398  	Inject-pod writes content to the volume, and a client-pod tries the read the contents and verify.
   399  	When SELinux is enabled on the host, client-pod can not read the content, with permission denied.
   400  	Invoking client-pod as privileged, so that it can access the volume content, even when SELinux is enabled on the host.
   401  	*/
   402  	securityLevel := admissionapi.LevelBaseline // TODO (#118184): also support LevelRestricted
   403  	if privileged || config.Prefix == "hostpathsymlink" || config.Prefix == "hostpath" {
   404  		securityLevel = admissionapi.LevelPrivileged
   405  	}
   406  	command = "while true ; do sleep 2; done "
   407  	seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
   408  	clientPod := &v1.Pod{
   409  		TypeMeta: metav1.TypeMeta{
   410  			Kind:       "Pod",
   411  			APIVersion: "v1",
   412  		},
   413  		ObjectMeta: metav1.ObjectMeta{
   414  			Name: config.Prefix + "-" + podSuffix,
   415  			Labels: map[string]string{
   416  				"role": config.Prefix + "-" + podSuffix,
   417  			},
   418  		},
   419  		Spec: v1.PodSpec{
   420  			Containers: []v1.Container{
   421  				{
   422  					Name:       config.Prefix + "-" + podSuffix,
   423  					Image:      e2epod.GetDefaultTestImage(),
   424  					WorkingDir: "/opt",
   425  					// An imperative and easily debuggable container which reads/writes vol contents for
   426  					// us to scan in the tests or by eye.
   427  					// We expect that /opt is empty in the minimal containers which we use in this test.
   428  					Command:      e2epod.GenerateScriptCmd(command),
   429  					VolumeMounts: []v1.VolumeMount{},
   430  				},
   431  			},
   432  			TerminationGracePeriodSeconds: &gracePeriod,
   433  			SecurityContext:               e2epod.GeneratePodSecurityContext(fsGroup, seLinuxOptions),
   434  			Volumes:                       []v1.Volume{},
   435  		},
   436  	}
   437  	e2epod.SetNodeSelection(&clientPod.Spec, config.ClientNodeSelection)
   438  
   439  	for i, test := range tests {
   440  		volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
   441  
   442  		// We need to make the container privileged when SELinux is enabled on the
   443  		// host,  so the test can write data to a location like /tmp. Also, due to
   444  		// the Docker bug below, it's not currently possible to map a device with
   445  		// a privileged container, so we don't go privileged for block volumes.
   446  		// https://github.com/moby/moby/issues/35991
   447  		if privileged && test.Mode == v1.PersistentVolumeBlock {
   448  			securityLevel = admissionapi.LevelBaseline
   449  		}
   450  		clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
   451  
   452  		if test.Mode == v1.PersistentVolumeBlock {
   453  			clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
   454  				Name:       volumeName,
   455  				DevicePath: fmt.Sprintf("/opt/%d", i),
   456  			})
   457  		} else {
   458  			clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
   459  				Name:      volumeName,
   460  				MountPath: fmt.Sprintf("/opt/%d", i),
   461  			})
   462  		}
   463  		clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
   464  			Name:         volumeName,
   465  			VolumeSource: test.Volume,
   466  		})
   467  	}
   468  	podsNamespacer := client.CoreV1().Pods(config.Namespace)
   469  	clientPod, err := podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{})
   470  	if err != nil {
   471  		return nil, err
   472  	}
   473  	if slow {
   474  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
   475  	} else {
   476  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
   477  	}
   478  	if err != nil {
   479  		e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name)
   480  		_ = e2epod.WaitForPodNotFoundInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodDelete)
   481  		return nil, err
   482  	}
   483  	return clientPod, nil
   484  }
   485  
   486  func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
   487  	ginkgo.By("Checking that text file contents are perfect.")
   488  	for i, test := range tests {
   489  		if test.Mode == v1.PersistentVolumeBlock {
   490  			// Block: check content
   491  			deviceName := fmt.Sprintf("/opt/%d", i)
   492  			commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
   493  			_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
   494  			framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
   495  
   496  			// Check that it's a real block device
   497  			CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
   498  		} else {
   499  			// Filesystem: check content
   500  			fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
   501  			commands := GenerateReadFileCmd(fileName)
   502  			_, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
   503  			framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
   504  
   505  			// Check that a directory has been mounted
   506  			dirName := filepath.Dir(fileName)
   507  			CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
   508  
   509  			if !framework.NodeOSDistroIs("windows") {
   510  				// Filesystem: check fsgroup
   511  				if fsGroup != nil {
   512  					ginkgo.By("Checking fsGroup is correct.")
   513  					_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
   514  					framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
   515  				}
   516  
   517  				// Filesystem: check fsType
   518  				if fsType != "" {
   519  					ginkgo.By("Checking fsType is correct.")
   520  					_, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
   521  					framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
   522  				}
   523  			}
   524  		}
   525  	}
   526  }
   527  
   528  // TestVolumeClient start a client pod using given VolumeSource (exported by startVolumeServer())
   529  // and check that the pod sees expected data, e.g. from the server pod.
   530  // Multiple Tests can be specified to mount multiple volumes to a single
   531  // pod.
   532  // Timeout for dynamic provisioning (if "WaitForFirstConsumer" is set && provided PVC is not bound yet),
   533  // pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.podStartTimeout.
   534  // It should be used for cases where "regular" dynamic provisioning of an empty volume is requested.
   535  func TestVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
   536  	testVolumeClient(ctx, f, config, fsGroup, fsType, tests, false)
   537  }
   538  
   539  // TestVolumeClientSlow is the same as TestVolumeClient except for its timeout.
   540  // Timeout for dynamic provisioning (if "WaitForFirstConsumer" is set && provided PVC is not bound yet),
   541  // pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.slowPodStartTimeout.
   542  // It should be used for cases where "special" dynamic provisioning is requested, such as volume cloning
   543  // or snapshot restore.
   544  func TestVolumeClientSlow(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
   545  	testVolumeClient(ctx, f, config, fsGroup, fsType, tests, true)
   546  }
   547  
   548  func testVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
   549  	timeouts := f.Timeouts
   550  	clientPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
   551  	if err != nil {
   552  		framework.Failf("Failed to create client pod: %v", err)
   553  	}
   554  	defer func() {
   555  		// testVolumeClient might get used more than once per test, therefore
   556  		// we have to clean up before returning.
   557  		e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name)
   558  		framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, clientPod.Namespace, timeouts.PodDelete))
   559  	}()
   560  
   561  	testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
   562  
   563  	ginkgo.By("Repeating the test on an ephemeral container (if enabled)")
   564  	ec := &v1.EphemeralContainer{
   565  		EphemeralContainerCommon: v1.EphemeralContainerCommon(clientPod.Spec.Containers[0]),
   566  	}
   567  	ec.Resources = v1.ResourceRequirements{}
   568  	ec.Name = "volume-ephemeral-container"
   569  	err = e2epod.NewPodClient(f).AddEphemeralContainerSync(ctx, clientPod, ec, timeouts.PodStart)
   570  	// The API server will return NotFound for the subresource when the feature is disabled
   571  	framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
   572  	testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
   573  }
   574  
   575  // InjectContent inserts index.html with given content into given volume. It does so by
   576  // starting and auxiliary pod which writes the file there.
   577  // The volume must be writable.
   578  func InjectContent(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
   579  	privileged := true
   580  	timeouts := f.Timeouts
   581  	if framework.NodeOSDistroIs("windows") {
   582  		privileged = false
   583  	}
   584  	injectorPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
   585  	if err != nil {
   586  		framework.Failf("Failed to create injector pod: %v", err)
   587  		return
   588  	}
   589  	defer func() {
   590  		// This pod must get deleted before the function returns becaue the test relies on
   591  		// the volume not being in use.
   592  		e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name)
   593  		framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, injectorPod.Name, injectorPod.Namespace, timeouts.PodDelete))
   594  	}()
   595  
   596  	ginkgo.By("Writing text file contents in the container.")
   597  	for i, test := range tests {
   598  		commands := []string{"exec", injectorPod.Name, fmt.Sprintf("--namespace=%v", injectorPod.Namespace), "--"}
   599  		if test.Mode == v1.PersistentVolumeBlock {
   600  			// Block: write content
   601  			deviceName := fmt.Sprintf("/opt/%d", i)
   602  			commands = append(commands, generateWriteBlockCmd(test.ExpectedContent, deviceName)...)
   603  
   604  		} else {
   605  			// Filesystem: write content
   606  			fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
   607  			commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
   608  		}
   609  		out, err := e2ekubectl.RunKubectl(injectorPod.Namespace, commands...)
   610  		framework.ExpectNoError(err, "failed: writing the contents: %s", out)
   611  	}
   612  
   613  	// Check that the data have been really written in this pod.
   614  	// This tests non-persistent volume types
   615  	testVolumeContent(f, injectorPod, "", fsGroup, fsType, tests)
   616  }
   617  
   618  // generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
   619  func generateWriteCmd(content, path string) []string {
   620  	var commands []string
   621  	commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path + "; sync"}
   622  	return commands
   623  }
   624  
   625  // GenerateReadBlockCmd generates the corresponding command lines to read from a block device with the given file path.
   626  func GenerateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
   627  	var commands []string
   628  	commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
   629  	return commands
   630  }
   631  
   632  // generateWriteBlockCmd generates the corresponding command lines to write to a block device the given content.
   633  func generateWriteBlockCmd(content, fullPath string) []string {
   634  	return generateWriteCmd(content, fullPath)
   635  }
   636  
   637  // GenerateReadFileCmd generates the corresponding command lines to read from a file with the given file path.
   638  func GenerateReadFileCmd(fullPath string) []string {
   639  	var commands []string
   640  	commands = []string{"cat", fullPath}
   641  	return commands
   642  }
   643  
   644  // generateWriteFileCmd generates the corresponding command lines to write a file with the given content and file path.
   645  func generateWriteFileCmd(content, fullPath string) []string {
   646  	return generateWriteCmd(content, fullPath)
   647  }
   648  
   649  // CheckVolumeModeOfPath check mode of volume
   650  func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
   651  	if volMode == v1.PersistentVolumeBlock {
   652  		// Check if block exists
   653  		VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
   654  
   655  		// Double check that it's not directory
   656  		VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
   657  	} else {
   658  		// Check if directory exists
   659  		VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
   660  
   661  		// Double check that it's not block
   662  		VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
   663  	}
   664  }
   665  
   666  // PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
   667  // TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
   668  // is resolved. Otherwise there will be dependency issue.
   669  func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
   670  	return e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
   671  }
   672  
   673  // VerifyExecInPodSucceed verifies shell cmd in target pod succeed
   674  // TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
   675  // is resolved. Otherwise there will be dependency issue.
   676  func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
   677  	stdout, stderr, err := PodExec(f, pod, shExec)
   678  	if err != nil {
   679  		if exiterr, ok := err.(uexec.CodeExitError); ok {
   680  			exitCode := exiterr.ExitStatus()
   681  			framework.ExpectNoError(err,
   682  				"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
   683  				shExec, exitCode, exiterr, stdout, stderr)
   684  		} else {
   685  			framework.ExpectNoError(err,
   686  				"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
   687  				shExec, err, stdout, stderr)
   688  		}
   689  	}
   690  }
   691  
   692  // VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
   693  // TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
   694  // is resolved. Otherwise there will be dependency issue.
   695  func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
   696  	stdout, stderr, err := PodExec(f, pod, shExec)
   697  	if err != nil {
   698  		if exiterr, ok := err.(clientexec.ExitError); ok {
   699  			actualExitCode := exiterr.ExitStatus()
   700  			gomega.Expect(actualExitCode).To(gomega.Equal(exitCode),
   701  				"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
   702  				shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
   703  		} else {
   704  			framework.ExpectNoError(err,
   705  				"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
   706  				shExec, exitCode, err, stdout, stderr)
   707  		}
   708  	}
   709  	gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", shExec, exitCode)
   710  }