k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/storage/detach_mounted.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package storage
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"path"
    23  
    24  	"time"
    25  
    26  	v1 "k8s.io/api/core/v1"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/apimachinery/pkg/util/wait"
    29  	clientset "k8s.io/client-go/kubernetes"
    30  	"k8s.io/kubernetes/test/e2e/feature"
    31  	"k8s.io/kubernetes/test/e2e/framework"
    32  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    33  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    34  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    35  	"k8s.io/kubernetes/test/e2e/storage/utils"
    36  	imageutils "k8s.io/kubernetes/test/utils/image"
    37  	admissionapi "k8s.io/pod-security-admission/api"
    38  
    39  	"github.com/onsi/ginkgo/v2"
    40  )
    41  
    42  var (
    43  	durationForStuckMount = 110 * time.Second
    44  )
    45  
    46  var _ = utils.SIGDescribe(feature.Flexvolumes, "Detaching volumes", func() {
    47  	f := framework.NewDefaultFramework("flexvolume")
    48  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    49  
    50  	// note that namespace deletion is handled by delete-namespace flag
    51  
    52  	var cs clientset.Interface
    53  	var ns *v1.Namespace
    54  	var node *v1.Node
    55  	var suffix string
    56  
    57  	ginkgo.BeforeEach(func(ctx context.Context) {
    58  		e2eskipper.SkipUnlessProviderIs("gce", "local")
    59  		e2eskipper.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom")
    60  		e2eskipper.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom")
    61  		e2eskipper.SkipUnlessSSHKeyPresent()
    62  
    63  		cs = f.ClientSet
    64  		ns = f.Namespace
    65  		var err error
    66  		node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
    67  		framework.ExpectNoError(err)
    68  		suffix = ns.Name
    69  	})
    70  
    71  	f.It("should not work when mount is in progress", f.WithSlow(), func(ctx context.Context) {
    72  		e2eskipper.SkipUnlessSSHKeyPresent()
    73  
    74  		driver := "attachable-with-long-mount"
    75  		driverInstallAs := driver + "-" + suffix
    76  
    77  		ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driverInstallAs))
    78  		installFlex(ctx, cs, node, "k8s", driverInstallAs, path.Join(driverDir, driver))
    79  		ginkgo.By(fmt.Sprintf("installing flexvolume %s on master as %s", path.Join(driverDir, driver), driverInstallAs))
    80  		installFlex(ctx, cs, nil, "k8s", driverInstallAs, path.Join(driverDir, driver))
    81  		volumeSource := v1.VolumeSource{
    82  			FlexVolume: &v1.FlexVolumeSource{
    83  				Driver: "k8s/" + driverInstallAs,
    84  			},
    85  		}
    86  
    87  		clientPod := getFlexVolumePod(volumeSource, node.Name)
    88  		ginkgo.By("Creating pod that uses slow format volume")
    89  		pod, err := cs.CoreV1().Pods(ns.Name).Create(ctx, clientPod, metav1.CreateOptions{})
    90  		framework.ExpectNoError(err)
    91  
    92  		uniqueVolumeName := getUniqueVolumeName(pod, driverInstallAs)
    93  
    94  		ginkgo.By("waiting for volumes to be attached to node")
    95  		err = waitForVolumesAttached(ctx, cs, node.Name, uniqueVolumeName)
    96  		framework.ExpectNoError(err, "while waiting for volume to attach to %s node", node.Name)
    97  
    98  		ginkgo.By("waiting for volume-in-use on the node after pod creation")
    99  		err = waitForVolumesInUse(ctx, cs, node.Name, uniqueVolumeName)
   100  		framework.ExpectNoError(err, "while waiting for volume in use")
   101  
   102  		ginkgo.By("waiting for kubelet to start mounting the volume")
   103  		time.Sleep(20 * time.Second)
   104  
   105  		ginkgo.By("Deleting the flexvolume pod")
   106  		err = e2epod.DeletePodWithWait(ctx, cs, pod)
   107  		framework.ExpectNoError(err, "in deleting the pod")
   108  
   109  		// Wait a bit for node to sync the volume status
   110  		time.Sleep(30 * time.Second)
   111  
   112  		ginkgo.By("waiting for volume-in-use on the node after pod deletion")
   113  		err = waitForVolumesInUse(ctx, cs, node.Name, uniqueVolumeName)
   114  		framework.ExpectNoError(err, "while waiting for volume in use")
   115  
   116  		// Wait for 110s because mount device operation has a sleep of 120 seconds
   117  		// we previously already waited for 30s.
   118  		time.Sleep(durationForStuckMount)
   119  
   120  		ginkgo.By("waiting for volume to disappear from node in-use")
   121  		err = waitForVolumesNotInUse(ctx, cs, node.Name, uniqueVolumeName)
   122  		framework.ExpectNoError(err, "while waiting for volume to be removed from in-use")
   123  
   124  		ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
   125  		uninstallFlex(ctx, cs, node, "k8s", driverInstallAs)
   126  		ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from master", driverInstallAs))
   127  		uninstallFlex(ctx, cs, nil, "k8s", driverInstallAs)
   128  	})
   129  })
   130  
   131  func getUniqueVolumeName(pod *v1.Pod, driverName string) string {
   132  	return fmt.Sprintf("k8s/%s/%s", driverName, pod.Spec.Volumes[0].Name)
   133  }
   134  
   135  func waitForVolumesNotInUse(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error {
   136  	waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
   137  		node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
   138  		if err != nil {
   139  			return false, fmt.Errorf("error fetching node %s with %v", nodeName, err)
   140  		}
   141  		volumeInUse := node.Status.VolumesInUse
   142  		for _, volume := range volumeInUse {
   143  			if string(volume) == volumeName {
   144  				return false, nil
   145  			}
   146  		}
   147  		return true, nil
   148  	})
   149  	if waitErr != nil {
   150  		return fmt.Errorf("error waiting for volumes to not be in use: %v", waitErr)
   151  	}
   152  	return nil
   153  }
   154  
   155  func waitForVolumesAttached(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error {
   156  	waitErr := wait.PollUntilContextTimeout(ctx, 2*time.Second, 2*time.Minute, true, func(ctx context.Context) (bool, error) {
   157  		node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
   158  		if err != nil {
   159  			return false, fmt.Errorf("error fetching node %s with %v", nodeName, err)
   160  		}
   161  		volumeAttached := node.Status.VolumesAttached
   162  		for _, volume := range volumeAttached {
   163  			if string(volume.Name) == volumeName {
   164  				return true, nil
   165  			}
   166  		}
   167  		return false, nil
   168  	})
   169  	if waitErr != nil {
   170  		return fmt.Errorf("error waiting for volume %v to attach to node %v: %v", volumeName, nodeName, waitErr)
   171  	}
   172  	return nil
   173  }
   174  
   175  func waitForVolumesInUse(ctx context.Context, client clientset.Interface, nodeName, volumeName string) error {
   176  	waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
   177  		node, err := client.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
   178  		if err != nil {
   179  			return false, fmt.Errorf("error fetching node %s with %v", nodeName, err)
   180  		}
   181  		volumeInUse := node.Status.VolumesInUse
   182  		for _, volume := range volumeInUse {
   183  			if string(volume) == volumeName {
   184  				return true, nil
   185  			}
   186  		}
   187  		return false, nil
   188  	})
   189  	if waitErr != nil {
   190  		return fmt.Errorf("error waiting for volume %v to be in use on node %v: %v", volumeName, nodeName, waitErr)
   191  	}
   192  	return nil
   193  }
   194  
   195  func getFlexVolumePod(volumeSource v1.VolumeSource, nodeName string) *v1.Pod {
   196  	var gracePeriod int64
   197  	clientPod := &v1.Pod{
   198  		TypeMeta: metav1.TypeMeta{
   199  			Kind:       "Pod",
   200  			APIVersion: "v1",
   201  		},
   202  		ObjectMeta: metav1.ObjectMeta{
   203  			Name: "flexvolume-detach-test" + "-client",
   204  			Labels: map[string]string{
   205  				"role": "flexvolume-detach-test" + "-client",
   206  			},
   207  		},
   208  		Spec: v1.PodSpec{
   209  			Containers: []v1.Container{
   210  				{
   211  					Name:       "flexvolume-detach-test" + "-client",
   212  					Image:      imageutils.GetE2EImage(imageutils.BusyBox),
   213  					WorkingDir: "/opt",
   214  					// An imperative and easily debuggable container which reads vol contents for
   215  					// us to scan in the tests or by eye.
   216  					// We expect that /opt is empty in the minimal containers which we use in this test.
   217  					Command: []string{
   218  						"/bin/sh",
   219  						"-c",
   220  						"while true ; do cat /opt/foo/index.html ; sleep 2 ; ls -altrh /opt/  ; sleep 2 ; done ",
   221  					},
   222  					VolumeMounts: []v1.VolumeMount{
   223  						{
   224  							Name:      "test-long-detach-flex",
   225  							MountPath: "/opt/foo",
   226  						},
   227  					},
   228  				},
   229  			},
   230  			TerminationGracePeriodSeconds: &gracePeriod,
   231  			SecurityContext: &v1.PodSecurityContext{
   232  				SELinuxOptions: &v1.SELinuxOptions{
   233  					Level: "s0:c0,c1",
   234  				},
   235  			},
   236  			Volumes: []v1.Volume{
   237  				{
   238  					Name:         "test-long-detach-flex",
   239  					VolumeSource: volumeSource,
   240  				},
   241  			},
   242  			NodeName: nodeName,
   243  		},
   244  	}
   245  	return clientPod
   246  }