k8s.io/kubernetes@v1.29.3/test/e2e/storage/vsphere/vsphere_statefulsets.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package vsphere
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  
    23  	"github.com/onsi/ginkgo/v2"
    24  	"github.com/onsi/gomega"
    25  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	clientset "k8s.io/client-go/kubernetes"
    28  	"k8s.io/kubernetes/test/e2e/feature"
    29  	"k8s.io/kubernetes/test/e2e/framework"
    30  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    31  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    32  	e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
    33  	"k8s.io/kubernetes/test/e2e/storage/utils"
    34  	admissionapi "k8s.io/pod-security-admission/api"
    35  )
    36  
    37  /*
    38  	Test performs following operations
    39  
    40  	Steps
    41  	1. Create a storage class with thin diskformat.
    42  	2. Create nginx service.
    43  	3. Create nginx statefulsets with 3 replicas.
    44  	4. Wait until all Pods are ready and PVCs are bounded with PV.
    45  	5. Verify volumes are accessible in all statefulsets pods with creating empty file.
    46  	6. Scale down statefulsets to 2 replicas.
    47  	7. Scale up statefulsets to 4 replicas.
    48  	8. Scale down statefulsets to 0 replicas and delete all pods.
    49  	9. Delete all PVCs from the test namespace.
    50  	10. Delete the storage class.
    51  */
    52  
    53  const (
    54  	manifestPath     = "test/e2e/testing-manifests/statefulset/nginx"
    55  	mountPath        = "/usr/share/nginx/html"
    56  	storageclassname = "nginx-sc"
    57  )
    58  
    59  var _ = utils.SIGDescribe("vsphere statefulset", feature.Vsphere, func() {
    60  	f := framework.NewDefaultFramework("vsphere-statefulset")
    61  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    62  	var (
    63  		namespace string
    64  		client    clientset.Interface
    65  	)
    66  	ginkgo.BeforeEach(func() {
    67  		e2eskipper.SkipUnlessProviderIs("vsphere")
    68  		namespace = f.Namespace.Name
    69  		client = f.ClientSet
    70  		Bootstrap(f)
    71  	})
    72  
    73  	ginkgo.It("vsphere statefulset testing", func(ctx context.Context) {
    74  		ginkgo.By("Creating StorageClass for Statefulset")
    75  		scParameters := make(map[string]string)
    76  		scParameters["diskformat"] = "thin"
    77  		scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "")
    78  		sc, err := client.StorageV1().StorageClasses().Create(ctx, scSpec, metav1.CreateOptions{})
    79  		framework.ExpectNoError(err)
    80  		ginkgo.DeferCleanup(framework.IgnoreNotFound(client.StorageV1().StorageClasses().Delete), sc.Name, metav1.DeleteOptions{})
    81  
    82  		ginkgo.By("Creating statefulset")
    83  
    84  		statefulset := e2estatefulset.CreateStatefulSet(ctx, client, manifestPath, namespace)
    85  		ginkgo.DeferCleanup(e2estatefulset.DeleteAllStatefulSets, client, namespace)
    86  		replicas := *(statefulset.Spec.Replicas)
    87  		// Waiting for pods status to be Ready
    88  		e2estatefulset.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas)
    89  		framework.ExpectNoError(e2estatefulset.CheckMount(ctx, client, statefulset, mountPath))
    90  		ssPodsBeforeScaleDown := e2estatefulset.GetPodList(ctx, client, statefulset)
    91  		gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
    92  		gomega.Expect(ssPodsBeforeScaleDown.Items).To(gomega.HaveLen(int(replicas)), "Number of Pods in the statefulset should match with number of replicas")
    93  
    94  		// Get the list of Volumes attached to Pods before scale down
    95  		volumesBeforeScaleDown := make(map[string]string)
    96  		for _, sspod := range ssPodsBeforeScaleDown.Items {
    97  			_, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
    98  			framework.ExpectNoError(err)
    99  			for _, volumespec := range sspod.Spec.Volumes {
   100  				if volumespec.PersistentVolumeClaim != nil {
   101  					volumePath := getvSphereVolumePathFromClaim(ctx, client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
   102  					volumesBeforeScaleDown[volumePath] = volumespec.PersistentVolumeClaim.ClaimName
   103  				}
   104  			}
   105  		}
   106  
   107  		ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1))
   108  		_, scaledownErr := e2estatefulset.Scale(ctx, client, statefulset, replicas-1)
   109  		framework.ExpectNoError(scaledownErr)
   110  		e2estatefulset.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas-1)
   111  
   112  		// After scale down, verify vsphere volumes are detached from deleted pods
   113  		ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
   114  		for _, sspod := range ssPodsBeforeScaleDown.Items {
   115  			_, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
   116  			if err != nil {
   117  				if !apierrors.IsNotFound(err) {
   118  					framework.Failf("Error in getting Pod %s: %v", sspod.Name, err)
   119  				}
   120  				for _, volumespec := range sspod.Spec.Volumes {
   121  					if volumespec.PersistentVolumeClaim != nil {
   122  						vSpherediskPath := getvSphereVolumePathFromClaim(ctx, client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
   123  						framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
   124  						framework.ExpectNoError(waitForVSphereDiskToDetach(ctx, vSpherediskPath, sspod.Spec.NodeName))
   125  					}
   126  				}
   127  			}
   128  		}
   129  
   130  		ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas))
   131  		_, scaleupErr := e2estatefulset.Scale(ctx, client, statefulset, replicas)
   132  		framework.ExpectNoError(scaleupErr)
   133  		e2estatefulset.WaitForStatusReplicas(ctx, client, statefulset, replicas)
   134  		e2estatefulset.WaitForStatusReadyReplicas(ctx, client, statefulset, replicas)
   135  
   136  		ssPodsAfterScaleUp := e2estatefulset.GetPodList(ctx, client, statefulset)
   137  		gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name))
   138  		gomega.Expect(ssPodsAfterScaleUp.Items).To(gomega.HaveLen(int(replicas)), "Number of Pods in the statefulset should match with number of replicas")
   139  
   140  		// After scale up, verify all vsphere volumes are attached to node VMs.
   141  		ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up")
   142  		for _, sspod := range ssPodsAfterScaleUp.Items {
   143  			err := e2epod.WaitTimeoutForPodReadyInNamespace(ctx, client, sspod.Name, statefulset.Namespace, framework.PodStartTimeout)
   144  			framework.ExpectNoError(err)
   145  			pod, err := client.CoreV1().Pods(namespace).Get(ctx, sspod.Name, metav1.GetOptions{})
   146  			framework.ExpectNoError(err)
   147  			for _, volumespec := range pod.Spec.Volumes {
   148  				if volumespec.PersistentVolumeClaim != nil {
   149  					vSpherediskPath := getvSphereVolumePathFromClaim(ctx, client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
   150  					framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
   151  					// Verify scale up has re-attached the same volumes and not introduced new volume
   152  					if volumesBeforeScaleDown[vSpherediskPath] == "" {
   153  						framework.Failf("Volume: %q was not attached to the Node: %q before scale down", vSpherediskPath, sspod.Spec.NodeName)
   154  					}
   155  					isVolumeAttached, verifyDiskAttachedError := diskIsAttached(ctx, vSpherediskPath, sspod.Spec.NodeName)
   156  					if !isVolumeAttached {
   157  						framework.Failf("Volume: %q is not attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
   158  					}
   159  					framework.ExpectNoError(verifyDiskAttachedError)
   160  				}
   161  			}
   162  		}
   163  	})
   164  })