k8s.io/kubernetes@v1.29.3/test/e2e/storage/flexvolume_online_resize.go (about) 1 /* 2 Copyright 2018 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package storage 18 19 import ( 20 "context" 21 "fmt" 22 "path" 23 24 "github.com/onsi/ginkgo/v2" 25 "github.com/onsi/gomega" 26 v1 "k8s.io/api/core/v1" 27 storagev1 "k8s.io/api/storage/v1" 28 "k8s.io/apimachinery/pkg/api/resource" 29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 utilerrors "k8s.io/apimachinery/pkg/util/errors" 31 clientset "k8s.io/client-go/kubernetes" 32 "k8s.io/kubernetes/test/e2e/feature" 33 "k8s.io/kubernetes/test/e2e/framework" 34 e2enode "k8s.io/kubernetes/test/e2e/framework/node" 35 e2epod "k8s.io/kubernetes/test/e2e/framework/pod" 36 e2epv "k8s.io/kubernetes/test/e2e/framework/pv" 37 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" 38 "k8s.io/kubernetes/test/e2e/storage/testsuites" 39 "k8s.io/kubernetes/test/e2e/storage/utils" 40 imageutils "k8s.io/kubernetes/test/utils/image" 41 admissionapi "k8s.io/pod-security-admission/api" 42 ) 43 44 var _ = utils.SIGDescribe(feature.Flexvolumes, "Mounted flexvolume volume expand", framework.WithSlow(), func() { 45 var ( 46 c clientset.Interface 47 ns string 48 err error 49 pvc *v1.PersistentVolumeClaim 50 resizableSc *storagev1.StorageClass 51 nodeName string 52 nodeKeyValueLabel map[string]string 53 nodeLabelValue string 54 nodeKey string 55 node *v1.Node 56 ) 57 58 f := framework.NewDefaultFramework("mounted-flexvolume-expand") 59 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged 60 ginkgo.BeforeEach(func(ctx context.Context) { 61 e2eskipper.SkipUnlessProviderIs("aws", "gce", "local") 62 e2eskipper.SkipUnlessMasterOSDistroIs("debian", "ubuntu", "gci", "custom") 63 e2eskipper.SkipUnlessNodeOSDistroIs("debian", "ubuntu", "gci", "custom") 64 e2eskipper.SkipUnlessSSHKeyPresent() 65 c = f.ClientSet 66 ns = f.Namespace.Name 67 framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable)) 68 var err error 69 70 node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) 71 framework.ExpectNoError(err) 72 nodeName = node.Name 73 74 nodeKey = "mounted_flexvolume_expand_" + ns 75 nodeLabelValue = ns 76 nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue} 77 e2enode.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) 78 ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, nodeKey) 79 80 test := testsuites.StorageClassTest{ 81 Name: "flexvolume-resize", 82 Timeouts: f.Timeouts, 83 ClaimSize: "2Gi", 84 AllowVolumeExpansion: true, 85 Provisioner: "flex-expand", 86 } 87 88 resizableSc, err = c.StorageV1().StorageClasses().Create(ctx, newStorageClass(test, ns, "resizing"), metav1.CreateOptions{}) 89 if err != nil { 90 fmt.Printf("storage class creation error: %v\n", err) 91 } 92 framework.ExpectNoError(err, "Error creating resizable storage class: %v", err) 93 if !*resizableSc.AllowVolumeExpansion { 94 framework.Failf("Class %s does not allow volume expansion", resizableSc.Name) 95 } 96 97 pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ 98 StorageClassName: &(resizableSc.Name), 99 ClaimSize: "2Gi", 100 }, ns) 101 pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) 102 framework.ExpectNoError(err, "Error creating pvc: %v", err) 103 ginkgo.DeferCleanup(func(ctx context.Context) { 104 framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") 105 if errs := e2epv.PVPVCCleanup(ctx, c, ns, nil, pvc); len(errs) > 0 { 106 framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) 107 } 108 }) 109 }) 110 111 ginkgo.It("should be resizable when mounted", func(ctx context.Context) { 112 e2eskipper.SkipUnlessSSHKeyPresent() 113 114 driver := "dummy-attachable" 115 116 ginkgo.By(fmt.Sprintf("installing flexvolume %s on node %s as %s", path.Join(driverDir, driver), node.Name, driver)) 117 installFlex(ctx, c, node, "k8s", driver, path.Join(driverDir, driver)) 118 ginkgo.By(fmt.Sprintf("installing flexvolume %s on (master) node %s as %s", path.Join(driverDir, driver), node.Name, driver)) 119 installFlex(ctx, c, nil, "k8s", driver, path.Join(driverDir, driver)) 120 121 pv := e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{ 122 PVSource: v1.PersistentVolumeSource{ 123 FlexVolume: &v1.FlexPersistentVolumeSource{ 124 Driver: "k8s/" + driver, 125 }}, 126 NamePrefix: "pv-", 127 StorageClassName: resizableSc.Name, 128 VolumeMode: pvc.Spec.VolumeMode, 129 }) 130 131 _, err = e2epv.CreatePV(ctx, c, f.Timeouts, pv) 132 framework.ExpectNoError(err, "Error creating pv %v", err) 133 134 ginkgo.By("Waiting for PVC to be in bound phase") 135 pvcClaims := []*v1.PersistentVolumeClaim{pvc} 136 var pvs []*v1.PersistentVolume 137 138 pvs, err = e2epv.WaitForPVClaimBoundPhase(ctx, c, pvcClaims, framework.ClaimProvisionTimeout) 139 framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) 140 gomega.Expect(pvs).To(gomega.HaveLen(1)) 141 142 var pod *v1.Pod 143 ginkgo.By("Creating pod") 144 pod, err = createNginxPod(ctx, c, ns, nodeKeyValueLabel, pvcClaims) 145 framework.ExpectNoError(err, "Failed to create pod %v", err) 146 ginkgo.DeferCleanup(e2epod.DeletePodWithWait, c, pod) 147 148 ginkgo.By("Waiting for pod to go to 'running' state") 149 err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.ObjectMeta.Name, f.Namespace.Name) 150 framework.ExpectNoError(err, "Pod didn't go to 'running' state %v", err) 151 152 ginkgo.By("Expanding current pvc") 153 newSize := resource.MustParse("6Gi") 154 newPVC, err := testsuites.ExpandPVCSize(ctx, pvc, newSize, c) 155 framework.ExpectNoError(err, "While updating pvc for more size") 156 pvc = newPVC 157 gomega.Expect(pvc).NotTo(gomega.BeNil()) 158 159 pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] 160 if pvcSize.Cmp(newSize) != 0 { 161 framework.Failf("error updating pvc size %q", pvc.Name) 162 } 163 164 ginkgo.By("Waiting for cloudprovider resize to finish") 165 err = testsuites.WaitForControllerVolumeResize(ctx, pvc, c, totalResizeWaitPeriod) 166 framework.ExpectNoError(err, "While waiting for pvc resize to finish") 167 168 ginkgo.By("Waiting for file system resize to finish") 169 pvc, err = testsuites.WaitForFSResize(ctx, pvc, c) 170 framework.ExpectNoError(err, "while waiting for fs resize to finish") 171 172 pvcConditions := pvc.Status.Conditions 173 gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions") 174 }) 175 }) 176 177 // createNginxPod creates an nginx pod. 178 func createNginxPod(ctx context.Context, client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) (*v1.Pod, error) { 179 pod := makeNginxPod(namespace, nodeSelector, pvclaims) 180 pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) 181 if err != nil { 182 return nil, fmt.Errorf("pod Create API error: %w", err) 183 } 184 // Waiting for pod to be running 185 err = e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace) 186 if err != nil { 187 return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err) 188 } 189 // get fresh pod info 190 pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) 191 if err != nil { 192 return pod, fmt.Errorf("pod Get API error: %w", err) 193 } 194 return pod, nil 195 } 196 197 // makeNginxPod returns a pod definition based on the namespace using nginx image 198 func makeNginxPod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim) *v1.Pod { 199 podSpec := &v1.Pod{ 200 TypeMeta: metav1.TypeMeta{ 201 Kind: "Pod", 202 APIVersion: "v1", 203 }, 204 ObjectMeta: metav1.ObjectMeta{ 205 GenerateName: "pvc-tester-", 206 Namespace: ns, 207 }, 208 Spec: v1.PodSpec{ 209 Containers: []v1.Container{ 210 { 211 Name: "write-pod", 212 Image: imageutils.GetE2EImage(imageutils.Nginx), 213 Ports: []v1.ContainerPort{ 214 { 215 Name: "http-server", 216 ContainerPort: 80, 217 }, 218 }, 219 }, 220 }, 221 }, 222 } 223 var volumeMounts = make([]v1.VolumeMount, len(pvclaims)) 224 var volumes = make([]v1.Volume, len(pvclaims)) 225 for index, pvclaim := range pvclaims { 226 volumename := fmt.Sprintf("volume%v", index+1) 227 volumeMounts[index] = v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename} 228 volumes[index] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} 229 } 230 podSpec.Spec.Containers[0].VolumeMounts = volumeMounts 231 podSpec.Spec.Volumes = volumes 232 if nodeSelector != nil { 233 podSpec.Spec.NodeSelector = nodeSelector 234 } 235 return podSpec 236 }