k8s.io/kubernetes@v1.29.3/test/e2e/node/mount_propagation.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package node
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strings"
    23  
    24  	v1 "k8s.io/api/core/v1"
    25  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    26  	"k8s.io/apimachinery/pkg/util/sets"
    27  	"k8s.io/kubernetes/test/e2e/framework"
    28  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    29  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    30  	"k8s.io/kubernetes/test/e2e/storage/utils"
    31  	imageutils "k8s.io/kubernetes/test/utils/image"
    32  	admissionapi "k8s.io/pod-security-admission/api"
    33  
    34  	"github.com/onsi/ginkgo/v2"
    35  	"github.com/onsi/gomega"
    36  )
    37  
    38  func preparePod(name string, node *v1.Node, propagation *v1.MountPropagationMode, hostDir string) *v1.Pod {
    39  	const containerName = "cntr"
    40  	bTrue := true
    41  	var oneSecond int64 = 1
    42  	// The pod prepares /mnt/test/<podname> and sleeps.
    43  	cmd := fmt.Sprintf("mkdir /mnt/test/%[1]s; sleep 3600", name)
    44  	pod := &v1.Pod{
    45  		ObjectMeta: metav1.ObjectMeta{
    46  			Name: name,
    47  		},
    48  		Spec: v1.PodSpec{
    49  			NodeName: node.Name,
    50  			Containers: []v1.Container{
    51  				{
    52  					Name:    containerName,
    53  					Image:   imageutils.GetE2EImage(imageutils.BusyBox),
    54  					Command: []string{"sh", "-c", cmd},
    55  					VolumeMounts: []v1.VolumeMount{
    56  						{
    57  							Name:             "host",
    58  							MountPath:        "/mnt/test",
    59  							MountPropagation: propagation,
    60  						},
    61  					},
    62  					SecurityContext: &v1.SecurityContext{
    63  						Privileged: &bTrue,
    64  					},
    65  				},
    66  			},
    67  			Volumes: []v1.Volume{
    68  				{
    69  					Name: "host",
    70  					VolumeSource: v1.VolumeSource{
    71  						HostPath: &v1.HostPathVolumeSource{
    72  							Path: hostDir,
    73  						},
    74  					},
    75  				},
    76  			},
    77  			// speed up termination of the pod
    78  			TerminationGracePeriodSeconds: &oneSecond,
    79  		},
    80  	}
    81  	return pod
    82  }
    83  
    84  var _ = SIGDescribe("Mount propagation", func() {
    85  	f := framework.NewDefaultFramework("mount-propagation")
    86  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    87  
    88  	ginkgo.It("should propagate mounts within defined scopes", func(ctx context.Context) {
    89  		// This test runs two pods: master and slave with respective mount
    90  		// propagation on common /var/lib/kubelet/XXXX directory. Both mount a
    91  		// tmpfs to a subdirectory there. We check that these mounts are
    92  		// propagated to the right places.
    93  
    94  		hostExec := utils.NewHostExec(f)
    95  		ginkgo.DeferCleanup(hostExec.Cleanup)
    96  
    97  		// Pick a node where all pods will run.
    98  		node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
    99  		framework.ExpectNoError(err)
   100  
   101  		// Fail the test if the namespace is not set. We expect that the
   102  		// namespace is unique and we might delete user data if it's not.
   103  		if len(f.Namespace.Name) == 0 {
   104  			gomega.Expect(f.Namespace.Name).NotTo(gomega.BeEmpty())
   105  			return
   106  		}
   107  
   108  		// hostDir is the directory that's shared via HostPath among all pods.
   109  		// Make sure it's random enough so we don't clash with another test
   110  		// running in parallel.
   111  		hostDir := "/var/lib/kubelet/" + f.Namespace.Name
   112  		ginkgo.DeferCleanup(func(ctx context.Context) error {
   113  			cleanCmd := fmt.Sprintf("rm -rf %q", hostDir)
   114  			return hostExec.IssueCommand(ctx, cleanCmd, node)
   115  		})
   116  
   117  		podClient := e2epod.NewPodClient(f)
   118  		bidirectional := v1.MountPropagationBidirectional
   119  		master := podClient.CreateSync(ctx, preparePod("master", node, &bidirectional, hostDir))
   120  
   121  		hostToContainer := v1.MountPropagationHostToContainer
   122  		slave := podClient.CreateSync(ctx, preparePod("slave", node, &hostToContainer, hostDir))
   123  
   124  		none := v1.MountPropagationNone
   125  		private := podClient.CreateSync(ctx, preparePod("private", node, &none, hostDir))
   126  		defaultPropagation := podClient.CreateSync(ctx, preparePod("default", node, nil, hostDir))
   127  
   128  		// Check that the pods sees directories of each other. This just checks
   129  		// that they have the same HostPath, not the mount propagation.
   130  		podNames := []string{master.Name, slave.Name, private.Name, defaultPropagation.Name}
   131  		for _, podName := range podNames {
   132  			for _, dirName := range podNames {
   133  				cmd := fmt.Sprintf("test -d /mnt/test/%s", dirName)
   134  				e2epod.ExecShellInPod(ctx, f, podName, cmd)
   135  			}
   136  		}
   137  
   138  		// Each pod mounts one tmpfs to /mnt/test/<podname> and puts a file there.
   139  		for _, podName := range podNames {
   140  			cmd := fmt.Sprintf("mount -t tmpfs e2e-mount-propagation-%[1]s /mnt/test/%[1]s; echo %[1]s > /mnt/test/%[1]s/file", podName)
   141  			e2epod.ExecShellInPod(ctx, f, podName, cmd)
   142  
   143  			// unmount tmpfs when the test finishes
   144  			cmd = fmt.Sprintf("umount /mnt/test/%s", podName)
   145  			ginkgo.DeferCleanup(e2epod.ExecShellInPod, f, podName, cmd)
   146  		}
   147  
   148  		// The host mounts one tmpfs to testdir/host and puts a file there so we
   149  		// can check mount propagation from the host to pods.
   150  		cmd := fmt.Sprintf("mkdir %[1]q/host; mount -t tmpfs e2e-mount-propagation-host %[1]q/host; echo host > %[1]q/host/file", hostDir)
   151  		err = hostExec.IssueCommand(ctx, cmd, node)
   152  		framework.ExpectNoError(err)
   153  
   154  		ginkgo.DeferCleanup(func(ctx context.Context) error {
   155  			cmd := fmt.Sprintf("umount %q/host", hostDir)
   156  			return hostExec.IssueCommand(ctx, cmd, node)
   157  		})
   158  
   159  		// Now check that mounts are propagated to the right containers.
   160  		// expectedMounts is map of pod name -> expected mounts visible in the
   161  		// pod.
   162  		expectedMounts := map[string]sets.String{
   163  			// Master sees only its own mount and not the slave's one.
   164  			"master": sets.NewString("master", "host"),
   165  			// Slave sees master's mount + itself.
   166  			"slave": sets.NewString("master", "slave", "host"),
   167  			// Private sees only its own mount
   168  			"private": sets.NewString("private"),
   169  			// Default (=private) sees only its own mount
   170  			"default": sets.NewString("default"),
   171  		}
   172  		dirNames := append(podNames, "host")
   173  		for podName, mounts := range expectedMounts {
   174  			for _, mountName := range dirNames {
   175  				cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName)
   176  				stdout, stderr, err := e2epod.ExecShellInPodWithFullOutput(ctx, f, podName, cmd)
   177  				framework.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err)
   178  				msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName)
   179  				shouldBeVisible := mounts.Has(mountName)
   180  				if shouldBeVisible {
   181  					framework.ExpectNoError(err, "%s: failed to run %q", msg, cmd)
   182  					gomega.Expect(stdout).To(gomega.Equal(mountName), msg)
   183  				} else {
   184  					// We *expect* cat to return error here
   185  					gomega.Expect(err).To(gomega.HaveOccurred(), msg)
   186  				}
   187  			}
   188  		}
   189  
   190  		// Find the kubelet PID to ensure we're working with the kubelet's mount namespace
   191  		cmd = "pidof kubelet"
   192  		kubeletPid, err := hostExec.IssueCommandWithResult(ctx, cmd, node)
   193  		framework.ExpectNoError(err, "Checking kubelet pid")
   194  		kubeletPid = strings.TrimSuffix(kubeletPid, "\n")
   195  		gomega.Expect(strings.Count(kubeletPid, " ")).To(gomega.Equal(0), "kubelet should only have a single PID in the system (pidof returned %q)", kubeletPid)
   196  		enterKubeletMountNS := fmt.Sprintf("nsenter -t %s -m", kubeletPid)
   197  
   198  		// Check that the master and host mounts are propagated to the container runtime's mount namespace
   199  		for _, mountName := range []string{"host", master.Name} {
   200  			cmd := fmt.Sprintf("%s cat \"%s/%s/file\"", enterKubeletMountNS, hostDir, mountName)
   201  			output, err := hostExec.IssueCommandWithResult(ctx, cmd, node)
   202  			framework.ExpectNoError(err, "host container namespace should see mount from %s: %s", mountName, output)
   203  			output = strings.TrimSuffix(output, "\n")
   204  			gomega.Expect(output).To(gomega.Equal(mountName), "host container namespace should see mount contents from %s", mountName)
   205  		}
   206  
   207  		// Check that the slave, private, and default mounts are not propagated to the container runtime's mount namespace
   208  		for _, podName := range []string{slave.Name, private.Name, defaultPropagation.Name} {
   209  			cmd := fmt.Sprintf("%s test ! -e \"%s/%s/file\"", enterKubeletMountNS, hostDir, podName)
   210  			output, err := hostExec.IssueCommandWithResult(ctx, cmd, node)
   211  			framework.ExpectNoError(err, "host container namespace shouldn't see mount from %s: %s", podName, output)
   212  		}
   213  	})
   214  })