k8s.io/kubernetes@v1.29.3/test/e2e/storage/vsphere/vsphere_scale.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package vsphere
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strconv"
    23  
    24  	"github.com/onsi/ginkgo/v2"
    25  	"github.com/onsi/gomega"
    26  	v1 "k8s.io/api/core/v1"
    27  	storagev1 "k8s.io/api/storage/v1"
    28  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    29  	clientset "k8s.io/client-go/kubernetes"
    30  	"k8s.io/kubernetes/test/e2e/feature"
    31  	"k8s.io/kubernetes/test/e2e/framework"
    32  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    33  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    34  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    35  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    36  	"k8s.io/kubernetes/test/e2e/storage/utils"
    37  	admissionapi "k8s.io/pod-security-admission/api"
    38  )
    39  
    40  /*
    41  Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes.
    42  The following actions will be performed as part of this test.
    43  
    44  1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capabilities.)
    45  2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment.
    46  3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes.
    47  4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it.
    48  5. Once all the go routines are completed, we delete all the pods and volumes.
    49  */
    50  const (
    51  	NodeLabelKey = "vsphere_e2e_label"
    52  )
    53  
    54  // NodeSelector holds
    55  type NodeSelector struct {
    56  	labelKey   string
    57  	labelValue string
    58  }
    59  
    60  var _ = utils.SIGDescribe("vcp at scale", feature.Vsphere, func() {
    61  	f := framework.NewDefaultFramework("vcp-at-scale")
    62  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    63  
    64  	var (
    65  		client            clientset.Interface
    66  		namespace         string
    67  		nodeSelectorList  []*NodeSelector
    68  		volumeCount       int
    69  		numberOfInstances int
    70  		volumesPerPod     int
    71  		policyName        string
    72  		datastoreName     string
    73  		nodeVolumeMapChan chan map[string][]string
    74  		nodes             *v1.NodeList
    75  		scNames           = []string{storageclass1, storageclass2, storageclass3, storageclass4}
    76  	)
    77  
    78  	ginkgo.BeforeEach(func(ctx context.Context) {
    79  		e2eskipper.SkipUnlessProviderIs("vsphere")
    80  		Bootstrap(f)
    81  		client = f.ClientSet
    82  		namespace = f.Namespace.Name
    83  		nodeVolumeMapChan = make(chan map[string][]string)
    84  
    85  		// Read the environment variables
    86  		volumeCount = GetAndExpectIntEnvVar(VCPScaleVolumeCount)
    87  		volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
    88  
    89  		numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
    90  		if numberOfInstances > 5 {
    91  			framework.Failf("Maximum 5 instances allowed, got instead: %v", numberOfInstances)
    92  		}
    93  		if numberOfInstances > volumeCount {
    94  			framework.Failf("Number of instances: %v cannot be greater than volume count: %v", numberOfInstances, volumeCount)
    95  		}
    96  
    97  		policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
    98  		datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
    99  
   100  		var err error
   101  		nodes, err = e2enode.GetReadySchedulableNodes(ctx, client)
   102  		framework.ExpectNoError(err)
   103  		if len(nodes.Items) < 2 {
   104  			e2eskipper.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
   105  		}
   106  		// Verify volume count specified by the user can be satisfied
   107  		if volumeCount > volumesPerNode*len(nodes.Items) {
   108  			e2eskipper.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
   109  		}
   110  		nodeSelectorList = createNodeLabels(client, namespace, nodes)
   111  		ginkgo.DeferCleanup(func() {
   112  			for _, node := range nodes.Items {
   113  				e2enode.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
   114  			}
   115  		})
   116  	})
   117  
   118  	ginkgo.It("vsphere scale tests", func(ctx context.Context) {
   119  		var pvcClaimList []string
   120  		nodeVolumeMap := make(map[string][]string)
   121  		// Volumes will be provisioned with each different types of Storage Class
   122  		scArrays := make([]*storagev1.StorageClass, len(scNames))
   123  		for index, scname := range scNames {
   124  			// Create vSphere Storage Class
   125  			ginkgo.By(fmt.Sprintf("Creating Storage Class : %q", scname))
   126  			var sc *storagev1.StorageClass
   127  			scParams := make(map[string]string)
   128  			var err error
   129  			switch scname {
   130  			case storageclass1:
   131  				scParams = nil
   132  			case storageclass2:
   133  				scParams[PolicyHostFailuresToTolerate] = "1"
   134  			case storageclass3:
   135  				scParams[SpbmStoragePolicy] = policyName
   136  			case storageclass4:
   137  				scParams[Datastore] = datastoreName
   138  			}
   139  			sc, err = client.StorageV1().StorageClasses().Create(ctx, getVSphereStorageClassSpec(scname, scParams, nil, ""), metav1.CreateOptions{})
   140  			gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty")
   141  			framework.ExpectNoError(err, "Failed to create storage class")
   142  			ginkgo.DeferCleanup(client.StorageV1().StorageClasses().Delete, scname, metav1.DeleteOptions{})
   143  			scArrays[index] = sc
   144  		}
   145  
   146  		volumeCountPerInstance := volumeCount / numberOfInstances
   147  		for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
   148  			if instanceCount == numberOfInstances-1 {
   149  				volumeCountPerInstance = volumeCount
   150  			}
   151  			volumeCount = volumeCount - volumeCountPerInstance
   152  			go VolumeCreateAndAttach(ctx, f, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
   153  		}
   154  
   155  		// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
   156  		for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
   157  			for node, volumeList := range <-nodeVolumeMapChan {
   158  				nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...)
   159  			}
   160  		}
   161  		podList, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
   162  		framework.ExpectNoError(err, "Failed to list pods")
   163  		for _, pod := range podList.Items {
   164  			pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
   165  			ginkgo.By("Deleting pod")
   166  			err = e2epod.DeletePodWithWait(ctx, client, &pod)
   167  			framework.ExpectNoError(err)
   168  		}
   169  		ginkgo.By("Waiting for volumes to be detached from the node")
   170  		err = waitForVSphereDisksToDetach(ctx, nodeVolumeMap)
   171  		framework.ExpectNoError(err)
   172  
   173  		for _, pvcClaim := range pvcClaimList {
   174  			err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvcClaim, namespace)
   175  			framework.ExpectNoError(err)
   176  		}
   177  	})
   178  })
   179  
   180  // Get PVC claims for the pod
   181  func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
   182  	pvcClaimList := make([]string, volumesPerPod)
   183  	for i, volumespec := range pod.Spec.Volumes {
   184  		if volumespec.PersistentVolumeClaim != nil {
   185  			pvcClaimList[i] = volumespec.PersistentVolumeClaim.ClaimName
   186  		}
   187  	}
   188  	return pvcClaimList
   189  }
   190  
   191  // VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
   192  func VolumeCreateAndAttach(ctx context.Context, f *framework.Framework, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
   193  	defer ginkgo.GinkgoRecover()
   194  	client := f.ClientSet
   195  	namespace := f.Namespace.Name
   196  	nodeVolumeMap := make(map[string][]string)
   197  	nodeSelectorIndex := 0
   198  	for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
   199  		if (volumeCountPerInstance - index) < volumesPerPod {
   200  			volumesPerPod = volumeCountPerInstance - index
   201  		}
   202  		pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
   203  		for i := 0; i < volumesPerPod; i++ {
   204  			ginkgo.By("Creating PVC using the Storage Class")
   205  			pvclaim, err := e2epv.CreatePVC(ctx, client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)]))
   206  			framework.ExpectNoError(err)
   207  			pvclaims[i] = pvclaim
   208  		}
   209  
   210  		ginkgo.By("Waiting for claim to be in bound phase")
   211  		persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(ctx, client, pvclaims, f.Timeouts.ClaimProvision)
   212  		framework.ExpectNoError(err)
   213  
   214  		ginkgo.By("Creating pod to attach PV to the node")
   215  		nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
   216  		// Create pod to attach Volume to Node
   217  		pod, err := e2epod.CreatePod(ctx, client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, f.NamespacePodSecurityLevel, "")
   218  		framework.ExpectNoError(err)
   219  
   220  		for _, pv := range persistentvolumes {
   221  			nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
   222  		}
   223  		ginkgo.By("Verify the volume is accessible and available in the pod")
   224  		verifyVSphereVolumesAccessible(ctx, client, pod, persistentvolumes)
   225  		nodeSelectorIndex++
   226  	}
   227  	nodeVolumeMapChan <- nodeVolumeMap
   228  	close(nodeVolumeMapChan)
   229  }
   230  
   231  func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.NodeList) []*NodeSelector {
   232  	var nodeSelectorList []*NodeSelector
   233  	for i, node := range nodes.Items {
   234  		labelVal := "vsphere_e2e_" + strconv.Itoa(i)
   235  		nodeSelector := &NodeSelector{
   236  			labelKey:   NodeLabelKey,
   237  			labelValue: labelVal,
   238  		}
   239  		nodeSelectorList = append(nodeSelectorList, nodeSelector)
   240  		e2enode.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal)
   241  	}
   242  	return nodeSelectorList
   243  }