k8s.io/kubernetes@v1.29.3/test/e2e/storage/vsphere/vsphere_stress.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package vsphere
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"sync"
    23  
    24  	"github.com/onsi/ginkgo/v2"
    25  	"github.com/onsi/gomega"
    26  	v1 "k8s.io/api/core/v1"
    27  	storagev1 "k8s.io/api/storage/v1"
    28  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    29  	clientset "k8s.io/client-go/kubernetes"
    30  	"k8s.io/kubernetes/test/e2e/feature"
    31  	"k8s.io/kubernetes/test/e2e/framework"
    32  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    33  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    34  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    35  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    36  	"k8s.io/kubernetes/test/e2e/storage/utils"
    37  	admissionapi "k8s.io/pod-security-admission/api"
    38  )
    39  
    40  /*
    41  Induce stress to create volumes in parallel with multiple threads based on user configurable values for number of threads and iterations per thread.
    42  The following actions will be performed as part of this test.
    43  
    44  1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.)
    45  2. READ VCP_STRESS_INSTANCES, VCP_STRESS_ITERATIONS, VSPHERE_SPBM_POLICY_NAME and VSPHERE_DATASTORE from System Environment.
    46  3. Launch goroutine for volume lifecycle operations.
    47  4. Each instance of routine iterates for n times, where n is read from system env - VCP_STRESS_ITERATIONS
    48  5. Each iteration creates 1 PVC, 1 POD using the provisioned PV, Verify disk is attached to the node, Verify pod can access the volume, delete the pod and finally delete the PVC.
    49  */
    50  var _ = utils.SIGDescribe("vsphere cloud provider stress", feature.Vsphere, func() {
    51  	f := framework.NewDefaultFramework("vcp-stress")
    52  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    53  	var (
    54  		client        clientset.Interface
    55  		namespace     string
    56  		instances     int
    57  		iterations    int
    58  		policyName    string
    59  		datastoreName string
    60  		scNames       = []string{storageclass1, storageclass2, storageclass3, storageclass4}
    61  	)
    62  
    63  	ginkgo.BeforeEach(func(ctx context.Context) {
    64  		e2eskipper.SkipUnlessProviderIs("vsphere")
    65  		Bootstrap(f)
    66  		client = f.ClientSet
    67  		namespace = f.Namespace.Name
    68  
    69  		nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
    70  		framework.ExpectNoError(err)
    71  
    72  		// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
    73  		// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
    74  		// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
    75  		instances = GetAndExpectIntEnvVar(VCPStressInstances)
    76  		if instances > volumesPerNode*len(nodeList.Items) {
    77  			framework.Failf("Number of Instances should be less or equal: %v, got instead %v", volumesPerNode*len(nodeList.Items), instances)
    78  		}
    79  		if instances <= len(scNames) {
    80  			framework.Failf("VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes, got instead %v", instances)
    81  		}
    82  
    83  		iterations = GetAndExpectIntEnvVar(VCPStressIterations)
    84  		if iterations <= 0 {
    85  			framework.Failf("VCP_STRESS_ITERATIONS should be greater than 0, got instead %v", iterations)
    86  		}
    87  
    88  		policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
    89  		datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
    90  	})
    91  
    92  	ginkgo.It("vsphere stress tests", func(ctx context.Context) {
    93  		scArrays := make([]*storagev1.StorageClass, len(scNames))
    94  		for index, scname := range scNames {
    95  			// Create vSphere Storage Class
    96  			ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname))
    97  			var sc *storagev1.StorageClass
    98  			var err error
    99  			switch scname {
   100  			case storageclass1:
   101  				sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass1, nil, nil, ""), metav1.CreateOptions{})
   102  			case storageclass2:
   103  				var scVSanParameters map[string]string
   104  				scVSanParameters = make(map[string]string)
   105  				scVSanParameters[PolicyHostFailuresToTolerate] = "1"
   106  				sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""), metav1.CreateOptions{})
   107  			case storageclass3:
   108  				var scSPBMPolicyParameters map[string]string
   109  				scSPBMPolicyParameters = make(map[string]string)
   110  				scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
   111  				sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""), metav1.CreateOptions{})
   112  			case storageclass4:
   113  				var scWithDSParameters map[string]string
   114  				scWithDSParameters = make(map[string]string)
   115  				scWithDSParameters[Datastore] = datastoreName
   116  				scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "")
   117  				sc, err = client.StorageV1().StorageClasses().Create(ctx, scWithDatastoreSpec, metav1.CreateOptions{})
   118  			}
   119  			gomega.Expect(sc).NotTo(gomega.BeNil())
   120  			framework.ExpectNoError(err)
   121  			ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), scname, metav1.DeleteOptions{})
   122  			scArrays[index] = sc
   123  		}
   124  
   125  		var wg sync.WaitGroup
   126  		wg.Add(instances)
   127  		for instanceCount := 0; instanceCount < instances; instanceCount++ {
   128  			instanceID := fmt.Sprintf("Thread:%v", instanceCount+1)
   129  			go PerformVolumeLifeCycleInParallel(ctx, f, client, namespace, instanceID, scArrays[instanceCount%len(scArrays)], iterations, &wg)
   130  		}
   131  		wg.Wait()
   132  	})
   133  
   134  })
   135  
   136  // PerformVolumeLifeCycleInParallel performs volume lifecycle operations
   137  // Called as a go routine to perform operations in parallel
   138  func PerformVolumeLifeCycleInParallel(ctx context.Context, f *framework.Framework, client clientset.Interface, namespace string, instanceID string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) {
   139  	defer wg.Done()
   140  	defer ginkgo.GinkgoRecover()
   141  
   142  	for iterationCount := 0; iterationCount < iterations; iterationCount++ {
   143  		logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceID, iterationCount+1)
   144  		ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
   145  		pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
   146  		framework.ExpectNoError(err)
   147  		ginkgo.DeferCleanup(e2epv.DeletePersistentVolumeClaim, client, pvclaim.Name, namespace)
   148  
   149  		var pvclaims []*v1.PersistentVolumeClaim
   150  		pvclaims = append(pvclaims, pvclaim)
   151  		ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
   152  		persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision)
   153  		framework.ExpectNoError(err)
   154  
   155  		ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
   156  		// Create pod to attach Volume to Node
   157  		pod, err := e2epod.CreatePod(ctx, client, namespace, nil, pvclaims, f.NamespacePodSecurityLevel, "")
   158  		framework.ExpectNoError(err)
   159  
   160  		ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
   161  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)
   162  		framework.ExpectNoError(err)
   163  
   164  		// Get the copy of the Pod to know the assigned node name.
   165  		pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   166  		framework.ExpectNoError(err)
   167  
   168  		ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
   169  		isVolumeAttached, verifyDiskAttachedError := diskIsAttached(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
   170  		if !isVolumeAttached {
   171  			framework.Failf("Volume: %s is not attached to the node: %v", persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
   172  		}
   173  		framework.ExpectNoError(verifyDiskAttachedError)
   174  
   175  		ginkgo.By(fmt.Sprintf("%v Verifying the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
   176  		verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes)
   177  
   178  		ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
   179  		err = e2epod.DeletePodWithWait(ctx, client, pod)
   180  		framework.ExpectNoError(err)
   181  
   182  		ginkgo.By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
   183  		err = waitForVSphereDiskToDetach(ctx, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
   184  		framework.ExpectNoError(err)
   185  
   186  		ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))
   187  		err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvclaim.Name, namespace)
   188  		framework.ExpectNoError(err)
   189  	}
   190  }