k8s.io/kubernetes@v1.29.3/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package vsphere
    18  
    19  import (
    20  	"context"
    21  
    22  	"github.com/onsi/ginkgo/v2"
    23  	"github.com/onsi/gomega"
    24  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    25  	clientset "k8s.io/client-go/kubernetes"
    26  	"k8s.io/kubernetes/test/e2e/feature"
    27  	"k8s.io/kubernetes/test/e2e/framework"
    28  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    29  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    30  	"k8s.io/kubernetes/test/e2e/storage/utils"
    31  	admissionapi "k8s.io/pod-security-admission/api"
    32  )
    33  
    34  /*
    35  Tests to verify volume provisioning on a clustered datastore
    36  1. Static provisioning
    37  2. Dynamic provisioning
    38  3. Dynamic provisioning with spbm policy
    39  
    40  This test reads env
    41  1. CLUSTER_DATASTORE which should be set to clustered datastore
    42  2. VSPHERE_SPBM_POLICY_DS_CLUSTER which should be set to a tag based spbm policy tagged to a clustered datastore
    43  */
    44  var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore", feature.Vsphere, func() {
    45  	f := framework.NewDefaultFramework("volume-provision")
    46  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    47  
    48  	var (
    49  		client           clientset.Interface
    50  		namespace        string
    51  		scParameters     map[string]string
    52  		clusterDatastore string
    53  		nodeInfo         *NodeInfo
    54  	)
    55  
    56  	ginkgo.BeforeEach(func(ctx context.Context) {
    57  		e2eskipper.SkipUnlessProviderIs("vsphere")
    58  		Bootstrap(f)
    59  		client = f.ClientSet
    60  		namespace = f.Namespace.Name
    61  		nodeInfo = GetReadySchedulableRandomNodeInfo(ctx, client)
    62  		scParameters = make(map[string]string)
    63  		clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
    64  	})
    65  
    66  	/*
    67  		Steps:
    68  		1. Create volume options with datastore to be a clustered datastore
    69  		2. Create a vsphere volume
    70  		3. Create podspec with volume path. Create a corresponding pod
    71  		4. Verify disk is attached
    72  		5. Delete the pod and wait for the disk to be detached
    73  		6. Delete the volume
    74  	*/
    75  
    76  	ginkgo.It("verify static provisioning on clustered datastore", func(ctx context.Context) {
    77  		var volumePath string
    78  
    79  		ginkgo.By("creating a test vsphere volume")
    80  		volumeOptions := new(VolumeOptions)
    81  		volumeOptions.CapacityKB = 2097152
    82  		volumeOptions.Name = "e2e-vmdk-" + namespace
    83  		volumeOptions.Datastore = clusterDatastore
    84  
    85  		volumePath, err := nodeInfo.VSphere.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
    86  		framework.ExpectNoError(err)
    87  
    88  		defer func() {
    89  			ginkgo.By("Deleting the vsphere volume")
    90  			nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
    91  		}()
    92  
    93  		podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil)
    94  
    95  		ginkgo.By("Creating pod")
    96  		pod, err := client.CoreV1().Pods(namespace).Create(ctx, podspec, metav1.CreateOptions{})
    97  		framework.ExpectNoError(err)
    98  		ginkgo.By("Waiting for pod to be ready")
    99  		gomega.Expect(e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)).To(gomega.Succeed())
   100  
   101  		// get fresh pod info
   102  		pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   103  		framework.ExpectNoError(err)
   104  		nodeName := pod.Spec.NodeName
   105  
   106  		ginkgo.By("Verifying volume is attached")
   107  		expectVolumeToBeAttached(ctx, nodeName, volumePath)
   108  
   109  		ginkgo.By("Deleting pod")
   110  		err = e2epod.DeletePodWithWait(ctx, client, pod)
   111  		framework.ExpectNoError(err)
   112  
   113  		ginkgo.By("Waiting for volumes to be detached from the node")
   114  		err = waitForVSphereDiskToDetach(ctx, volumePath, nodeName)
   115  		framework.ExpectNoError(err)
   116  	})
   117  
   118  	/*
   119  		Steps:
   120  		1. Create storage class parameter and specify datastore to be a clustered datastore name
   121  		2. invokeValidPolicyTest - util to do e2e dynamic provision test
   122  	*/
   123  	ginkgo.It("verify dynamic provision with default parameter on clustered datastore", func(ctx context.Context) {
   124  		scParameters[Datastore] = clusterDatastore
   125  		invokeValidPolicyTest(ctx, f, client, namespace, scParameters)
   126  	})
   127  
   128  	/*
   129  		Steps:
   130  		1. Create storage class parameter and specify storage policy to be a tag based spbm policy
   131  		2. invokeValidPolicyTest - util to do e2e dynamic provision test
   132  	*/
   133  	ginkgo.It("verify dynamic provision with spbm policy on clustered datastore", func(ctx context.Context) {
   134  		policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster)
   135  		scParameters[SpbmStoragePolicy] = policyDatastoreCluster
   136  		invokeValidPolicyTest(ctx, f, client, namespace, scParameters)
   137  	})
   138  })