k8s.io/kubernetes@v1.29.3/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go (about) 1 /* 2 Copyright 2017 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package vsphere 18 19 import ( 20 "context" 21 "fmt" 22 "hash/fnv" 23 "regexp" 24 "time" 25 26 "strings" 27 28 "github.com/onsi/ginkgo/v2" 29 "github.com/onsi/gomega" 30 v1 "k8s.io/api/core/v1" 31 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 32 clientset "k8s.io/client-go/kubernetes" 33 "k8s.io/kubernetes/test/e2e/feature" 34 "k8s.io/kubernetes/test/e2e/framework" 35 e2enode "k8s.io/kubernetes/test/e2e/framework/node" 36 e2epod "k8s.io/kubernetes/test/e2e/framework/pod" 37 e2epv "k8s.io/kubernetes/test/e2e/framework/pv" 38 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" 39 "k8s.io/kubernetes/test/e2e/storage/utils" 40 admissionapi "k8s.io/pod-security-admission/api" 41 ) 42 43 const ( 44 vmfsDatastore = "sharedVmfs-0" 45 vsanDatastore = "vsanDatastore" 46 dummyVMPrefixName = "vsphere-k8s" 47 diskStripesCapabilityMaxVal = "11" 48 ) 49 50 /* 51 Test to verify the storage policy based management for dynamic volume provisioning inside kubernetes. 52 There are 2 ways to achieve it: 53 1. Specify VSAN storage capabilities in the storage-class. 54 2. Use existing vCenter SPBM storage policies. 55 56 Valid VSAN storage capabilities are mentioned below: 57 1. hostFailuresToTolerate 58 2. forceProvisioning 59 3. cacheReservation 60 4. diskStripes 61 5. objectSpaceReservation 62 6. iopsLimit 63 64 Steps 65 1. Create StorageClass with. 66 a. VSAN storage capabilities set to valid/invalid values (or) 67 b. Use existing vCenter SPBM storage policies. 68 2. Create PVC which uses the StorageClass created in step 1. 69 3. Wait for PV to be provisioned. 70 4. Wait for PVC's status to become Bound 71 5. Create pod using PVC on specific node. 72 6. Wait for Disk to be attached to the node. 73 7. Delete pod and Wait for Volume Disk to be detached from the Node. 74 8. Delete PVC, PV and Storage Class 75 76 77 */ 78 79 var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning", feature.Vsphere, func() { 80 f := framework.NewDefaultFramework("volume-vsan-policy") 81 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged 82 var ( 83 client clientset.Interface 84 namespace string 85 scParameters map[string]string 86 policyName string 87 tagPolicy string 88 ) 89 ginkgo.BeforeEach(func(ctx context.Context) { 90 e2eskipper.SkipUnlessProviderIs("vsphere") 91 Bootstrap(f) 92 client = f.ClientSet 93 namespace = f.Namespace.Name 94 policyName = GetAndExpectStringEnvVar(SPBMPolicyName) 95 tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy) 96 framework.Logf("framework: %+v", f) 97 scParameters = make(map[string]string) 98 _, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) 99 framework.ExpectNoError(err) 100 }) 101 102 // Valid policy. 103 ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 104 ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal)) 105 scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal 106 scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal 107 framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) 108 invokeValidPolicyTest(ctx, f, client, namespace, scParameters) 109 }) 110 111 // Valid policy. 112 ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 113 ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) 114 scParameters[PolicyDiskStripes] = "1" 115 scParameters[PolicyObjectSpaceReservation] = "30" 116 framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) 117 invokeValidPolicyTest(ctx, f, client, namespace, scParameters) 118 }) 119 120 // Valid policy. 121 ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 122 ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) 123 scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal 124 scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal 125 scParameters[Datastore] = vsanDatastore 126 framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) 127 invokeValidPolicyTest(ctx, f, client, namespace, scParameters) 128 }) 129 130 // Valid policy. 131 ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 132 ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal)) 133 scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal 134 scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal 135 framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) 136 invokeValidPolicyTest(ctx, f, client, namespace, scParameters) 137 }) 138 139 // Invalid VSAN storage capabilities parameters. 140 ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 141 ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal)) 142 scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal 143 scParameters[PolicyDiskStripes] = StripeWidthCapabilityVal 144 framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) 145 err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) 146 framework.ExpectError(err) 147 errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume" 148 if !strings.Contains(err.Error(), errorMsg) { 149 framework.ExpectNoError(err, errorMsg) 150 } 151 }) 152 153 // Invalid policy on a VSAN test bed. 154 // diskStripes value has to be between 1 and 12. 155 ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 156 ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal)) 157 scParameters[PolicyDiskStripes] = DiskStripesCapabilityInvalidVal 158 scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal 159 framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) 160 err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) 161 framework.ExpectError(err) 162 errorMsg := "Invalid value for " + PolicyDiskStripes + "." 163 if !strings.Contains(err.Error(), errorMsg) { 164 framework.ExpectNoError(err, errorMsg) 165 } 166 }) 167 168 // Invalid policy on a VSAN test bed. 169 // hostFailuresToTolerate value has to be between 0 and 3 including. 170 ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 171 ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal)) 172 scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal 173 framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) 174 err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) 175 framework.ExpectError(err) 176 errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "." 177 if !strings.Contains(err.Error(), errorMsg) { 178 framework.ExpectNoError(err, errorMsg) 179 } 180 }) 181 182 // Specify a valid VSAN policy on a non-VSAN test bed. 183 // The test should fail. 184 ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 185 ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, vmfsDatastore)) 186 scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal 187 scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal 188 scParameters[Datastore] = vmfsDatastore 189 framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) 190 err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) 191 framework.ExpectError(err) 192 errorMsg := "The specified datastore: \\\"" + vmfsDatastore + "\\\" is not a VSAN datastore. " + 193 "The policy parameters will work only with VSAN Datastore." 194 if !strings.Contains(err.Error(), errorMsg) { 195 framework.ExpectNoError(err, errorMsg) 196 } 197 }) 198 199 ginkgo.It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 200 ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName)) 201 scParameters[SpbmStoragePolicy] = policyName 202 scParameters[DiskFormat] = ThinDisk 203 framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters) 204 invokeValidPolicyTest(ctx, f, client, namespace, scParameters) 205 }) 206 207 ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func(ctx context.Context) { 208 scParameters[PolicyDiskStripes] = diskStripesCapabilityMaxVal 209 scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal 210 scParameters[Datastore] = vsanDatastore 211 framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters) 212 kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName) 213 controlPlaneNode, err := getControlPlaneNode(ctx, client) 214 framework.ExpectNoError(err) 215 invokeStaleDummyVMTestWithStoragePolicy(ctx, client, controlPlaneNode, namespace, kubernetesClusterName, scParameters) 216 }) 217 218 ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 219 ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, vsanDatastore)) 220 scParameters[SpbmStoragePolicy] = tagPolicy 221 scParameters[Datastore] = vsanDatastore 222 scParameters[DiskFormat] = ThinDisk 223 framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters) 224 err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) 225 framework.ExpectError(err) 226 errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\"" 227 if !strings.Contains(err.Error(), errorMsg) { 228 framework.ExpectNoError(err, errorMsg) 229 } 230 }) 231 232 ginkgo.It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 233 ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy)) 234 scParameters[SpbmStoragePolicy] = BronzeStoragePolicy 235 scParameters[DiskFormat] = ThinDisk 236 framework.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters) 237 err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) 238 framework.ExpectError(err) 239 errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\" 240 if !strings.Contains(err.Error(), errorMsg) { 241 framework.ExpectNoError(err, errorMsg) 242 } 243 }) 244 245 ginkgo.It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func(ctx context.Context) { 246 ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName)) 247 scParameters[SpbmStoragePolicy] = policyName 248 gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty()) 249 scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal 250 scParameters[DiskFormat] = ThinDisk 251 framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters) 252 err := invokeInvalidPolicyTestNeg(ctx, client, namespace, scParameters) 253 framework.ExpectError(err) 254 errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one" 255 if !strings.Contains(err.Error(), errorMsg) { 256 framework.ExpectNoError(err, errorMsg) 257 } 258 }) 259 }) 260 261 func invokeValidPolicyTest(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { 262 ginkgo.By("Creating Storage Class With storage policy params") 263 storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) 264 framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) 265 ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) 266 267 ginkgo.By("Creating PVC using the Storage Class") 268 pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) 269 framework.ExpectNoError(err) 270 ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) 271 272 var pvclaims []*v1.PersistentVolumeClaim 273 pvclaims = append(pvclaims, pvclaim) 274 ginkgo.By("Waiting for claim to be in bound phase") 275 persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision) 276 framework.ExpectNoError(err) 277 278 ginkgo.By("Creating pod to attach PV to the node") 279 // Create pod to attach Volume to Node 280 pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "") 281 framework.ExpectNoError(err) 282 283 ginkgo.By("Verify the volume is accessible and available in the pod") 284 verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes) 285 286 ginkgo.By("Deleting pod") 287 framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod)) 288 289 ginkgo.By("Waiting for volumes to be detached from the node") 290 framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) 291 } 292 293 func invokeInvalidPolicyTestNeg(ctx context.Context, client clientset.Interface, namespace string, scParameters map[string]string) error { 294 ginkgo.By("Creating Storage Class With storage policy params") 295 storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) 296 framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) 297 ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) 298 299 ginkgo.By("Creating PVC using the Storage Class") 300 pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) 301 framework.ExpectNoError(err) 302 ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) 303 304 ginkgo.By("Waiting for claim to be in bound phase") 305 err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) 306 framework.ExpectError(err) 307 308 eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(ctx, metav1.ListOptions{}) 309 framework.ExpectNoError(err) 310 return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) 311 } 312 313 // invokeStaleDummyVMTestWithStoragePolicy assumes control plane node is present on the datacenter specified in the workspace section of vsphere.conf file. 314 // With in-tree VCP, when the volume is created using storage policy, shadow (dummy) VM is getting created and deleted to apply SPBM policy on the volume. 315 func invokeStaleDummyVMTestWithStoragePolicy(ctx context.Context, client clientset.Interface, controlPlaneNode string, namespace string, clusterName string, scParameters map[string]string) { 316 ginkgo.By("Creating Storage Class With storage policy params") 317 storageclass, err := client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""), metav1.CreateOptions{}) 318 framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) 319 ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), storageclass.Name, metav1.DeleteOptions{}) 320 321 ginkgo.By("Creating PVC using the Storage Class") 322 pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) 323 framework.ExpectNoError(err) 324 325 var pvclaims []*v1.PersistentVolumeClaim 326 pvclaims = append(pvclaims, pvclaim) 327 ginkgo.By("Expect claim to fail provisioning volume") 328 _, err = e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, 2*time.Minute) 329 framework.ExpectError(err) 330 331 updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, pvclaim.Name, metav1.GetOptions{}) 332 framework.ExpectNoError(err) 333 vmName := clusterName + "-dynamic-pvc-" + string(updatedClaim.UID) 334 ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace) 335 // Wait for 6 minutes to let the vSphere Cloud Provider clean up routine delete the dummy VM 336 time.Sleep(6 * time.Minute) 337 338 fnvHash := fnv.New32a() 339 fnvHash.Write([]byte(vmName)) 340 dummyVMFullName := dummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32()) 341 errorMsg := "Dummy VM - " + vmName + " is still present. Failing the test.." 342 nodeInfo := TestContext.NodeMapper.GetNodeInfo(controlPlaneNode) 343 isVMPresentFlag, err := nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef) 344 framework.ExpectNoError(err) 345 if isVMPresentFlag { 346 framework.Failf("VM with name %s is present, %s", dummyVMFullName, errorMsg) 347 } 348 } 349 350 func getControlPlaneNode(ctx context.Context, client clientset.Interface) (string, error) { 351 regKubeScheduler := regexp.MustCompile("kube-scheduler-.*") 352 regKubeControllerManager := regexp.MustCompile("kube-controller-manager-.*") 353 354 podList, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(ctx, metav1.ListOptions{}) 355 if err != nil { 356 return "", err 357 } 358 if len(podList.Items) < 1 { 359 return "", fmt.Errorf("could not find any pods in namespace %s to grab metrics from", metav1.NamespaceSystem) 360 } 361 for _, pod := range podList.Items { 362 if regKubeScheduler.MatchString(pod.Name) || regKubeControllerManager.MatchString(pod.Name) { 363 return pod.Spec.NodeName, nil 364 } 365 } 366 return "", fmt.Errorf("could not find any nodes where control plane pods are running") 367 }