k8s.io/kubernetes@v1.29.3/test/e2e/storage/vsphere/vsphere_utils.go (about)

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package vsphere
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"path/filepath"
    23  	"regexp"
    24  	"strings"
    25  	"time"
    26  
    27  	"github.com/onsi/ginkgo/v2"
    28  	"github.com/onsi/gomega"
    29  	"github.com/vmware/govmomi/find"
    30  	"github.com/vmware/govmomi/object"
    31  	"github.com/vmware/govmomi/vim25/mo"
    32  	vim25types "github.com/vmware/govmomi/vim25/types"
    33  	"k8s.io/klog/v2"
    34  
    35  	v1 "k8s.io/api/core/v1"
    36  	storagev1 "k8s.io/api/storage/v1"
    37  	"k8s.io/apimachinery/pkg/api/resource"
    38  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    39  	"k8s.io/apimachinery/pkg/util/rand"
    40  	"k8s.io/apimachinery/pkg/util/uuid"
    41  	"k8s.io/apimachinery/pkg/util/wait"
    42  	clientset "k8s.io/client-go/kubernetes"
    43  	"k8s.io/kubernetes/test/e2e/framework"
    44  	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
    45  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    46  	e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
    47  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    48  	e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
    49  	"k8s.io/kubernetes/test/e2e/storage/utils"
    50  	imageutils "k8s.io/kubernetes/test/utils/image"
    51  )
    52  
    53  const (
    54  	volumesPerNode = 55
    55  	storageclass1  = "sc-default"
    56  	storageclass2  = "sc-vsan"
    57  	storageclass3  = "sc-spbm"
    58  	storageclass4  = "sc-user-specified-ds"
    59  	dummyDiskName  = "kube-dummyDisk.vmdk"
    60  	providerPrefix = "vsphere://"
    61  )
    62  
    63  // volumeState represents the state of a volume.
    64  type volumeState int32
    65  
    66  const (
    67  	volumeStateDetached volumeState = 1
    68  	volumeStateAttached volumeState = 2
    69  )
    70  
    71  // Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
    72  func waitForVSphereDisksToDetach(ctx context.Context, nodeVolumes map[string][]string) error {
    73  	var (
    74  		detachTimeout  = 5 * time.Minute
    75  		detachPollTime = 10 * time.Second
    76  	)
    77  	waitErr := wait.PollWithContext(ctx, detachPollTime, detachTimeout, func(ctx context.Context) (bool, error) {
    78  		attachedResult, err := disksAreAttached(ctx, nodeVolumes)
    79  		if err != nil {
    80  			return false, err
    81  		}
    82  		for nodeName, nodeVolumes := range attachedResult {
    83  			for volumePath, attached := range nodeVolumes {
    84  				if attached {
    85  					framework.Logf("Volume %q is still attached to %q.", volumePath, string(nodeName))
    86  					return false, nil
    87  				}
    88  			}
    89  		}
    90  		framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
    91  		return true, nil
    92  	})
    93  	if waitErr != nil {
    94  		if wait.Interrupted(waitErr) {
    95  			return fmt.Errorf("volumes have not detached after %v: %v", detachTimeout, waitErr)
    96  		}
    97  		return fmt.Errorf("error waiting for volumes to detach: %v", waitErr)
    98  	}
    99  	return nil
   100  }
   101  
   102  // Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
   103  func waitForVSphereDiskStatus(ctx context.Context, volumePath string, nodeName string, expectedState volumeState) error {
   104  	var (
   105  		currentState volumeState
   106  		timeout      = 6 * time.Minute
   107  		pollTime     = 10 * time.Second
   108  	)
   109  
   110  	var attachedState = map[bool]volumeState{
   111  		true:  volumeStateAttached,
   112  		false: volumeStateDetached,
   113  	}
   114  
   115  	var attachedStateMsg = map[volumeState]string{
   116  		volumeStateAttached: "attached to",
   117  		volumeStateDetached: "detached from",
   118  	}
   119  
   120  	waitErr := wait.PollWithContext(ctx, pollTime, timeout, func(ctx context.Context) (bool, error) {
   121  		diskAttached, err := diskIsAttached(ctx, volumePath, nodeName)
   122  		if err != nil {
   123  			return true, err
   124  		}
   125  
   126  		currentState = attachedState[diskAttached]
   127  		if currentState == expectedState {
   128  			framework.Logf("Volume %q has successfully %s %q", volumePath, attachedStateMsg[currentState], nodeName)
   129  			return true, nil
   130  		}
   131  		framework.Logf("Waiting for Volume %q to be %s %q.", volumePath, attachedStateMsg[expectedState], nodeName)
   132  		return false, nil
   133  	})
   134  	if waitErr != nil {
   135  		if wait.Interrupted(waitErr) {
   136  			return fmt.Errorf("volume %q is not %s %q after %v: %v", volumePath, attachedStateMsg[expectedState], nodeName, timeout, waitErr)
   137  		}
   138  		return fmt.Errorf("error waiting for volume %q to be %s %q: %v", volumePath, attachedStateMsg[expectedState], nodeName, waitErr)
   139  	}
   140  	return nil
   141  }
   142  
   143  // Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
   144  func waitForVSphereDiskToAttach(ctx context.Context, volumePath string, nodeName string) error {
   145  	return waitForVSphereDiskStatus(ctx, volumePath, nodeName, volumeStateAttached)
   146  }
   147  
   148  // Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
   149  func waitForVSphereDiskToDetach(ctx context.Context, volumePath string, nodeName string) error {
   150  	return waitForVSphereDiskStatus(ctx, volumePath, nodeName, volumeStateDetached)
   151  }
   152  
   153  // function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
   154  func getVSpherePersistentVolumeSpec(volumePath string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy, labels map[string]string) *v1.PersistentVolume {
   155  	return e2epv.MakePersistentVolume(e2epv.PersistentVolumeConfig{
   156  		NamePrefix: "vspherepv-",
   157  		PVSource: v1.PersistentVolumeSource{
   158  			VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
   159  				VolumePath: volumePath,
   160  				FSType:     "ext4",
   161  			},
   162  		},
   163  		ReclaimPolicy: persistentVolumeReclaimPolicy,
   164  		Capacity:      "2Gi",
   165  		AccessModes: []v1.PersistentVolumeAccessMode{
   166  			v1.ReadWriteOnce,
   167  		},
   168  		Labels: labels,
   169  	})
   170  }
   171  
   172  // function to get vsphere persistent volume spec with given selector labels.
   173  func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]string) *v1.PersistentVolumeClaim {
   174  	var (
   175  		pvc *v1.PersistentVolumeClaim
   176  	)
   177  	pvc = &v1.PersistentVolumeClaim{
   178  		ObjectMeta: metav1.ObjectMeta{
   179  			GenerateName: "pvc-",
   180  			Namespace:    namespace,
   181  		},
   182  		Spec: v1.PersistentVolumeClaimSpec{
   183  			AccessModes: []v1.PersistentVolumeAccessMode{
   184  				v1.ReadWriteOnce,
   185  			},
   186  			Resources: v1.VolumeResourceRequirements{
   187  				Requests: v1.ResourceList{
   188  					v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
   189  				},
   190  			},
   191  		},
   192  	}
   193  	if labels != nil {
   194  		pvc.Spec.Selector = &metav1.LabelSelector{MatchLabels: labels}
   195  	}
   196  
   197  	return pvc
   198  }
   199  
   200  // function to write content to the volume backed by given PVC
   201  func writeContentToVSpherePV(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) {
   202  	utils.RunInPodWithVolume(ctx, client, timeouts, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
   203  	framework.Logf("Done with writing content to volume")
   204  }
   205  
   206  // function to verify content is matching on the volume backed for given PVC
   207  func verifyContentOfVSpherePV(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) {
   208  	utils.RunInPodWithVolume(ctx, client, timeouts, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
   209  	framework.Logf("Successfully verified content of the volume")
   210  }
   211  
   212  func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) *storagev1.StorageClass {
   213  	var sc *storagev1.StorageClass
   214  
   215  	sc = &storagev1.StorageClass{
   216  		TypeMeta: metav1.TypeMeta{
   217  			Kind: "StorageClass",
   218  		},
   219  		ObjectMeta: metav1.ObjectMeta{
   220  			Name: name,
   221  		},
   222  		Provisioner: "kubernetes.io/vsphere-volume",
   223  	}
   224  	if scParameters != nil {
   225  		sc.Parameters = scParameters
   226  	}
   227  	if zones != nil {
   228  		term := v1.TopologySelectorTerm{
   229  			MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
   230  				{
   231  					Key:    v1.LabelTopologyZone,
   232  					Values: zones,
   233  				},
   234  			},
   235  		}
   236  		sc.AllowedTopologies = append(sc.AllowedTopologies, term)
   237  	}
   238  	if volumeBindingMode != "" {
   239  		mode := storagev1.VolumeBindingMode(string(volumeBindingMode))
   240  		sc.VolumeBindingMode = &mode
   241  	}
   242  	return sc
   243  }
   244  
   245  func getVSphereClaimSpecWithStorageClass(ns string, diskSize string, storageclass *storagev1.StorageClass) *v1.PersistentVolumeClaim {
   246  	claim := &v1.PersistentVolumeClaim{
   247  		ObjectMeta: metav1.ObjectMeta{
   248  			GenerateName: "pvc-",
   249  			Namespace:    ns,
   250  		},
   251  		Spec: v1.PersistentVolumeClaimSpec{
   252  			AccessModes: []v1.PersistentVolumeAccessMode{
   253  				v1.ReadWriteOnce,
   254  			},
   255  			Resources: v1.VolumeResourceRequirements{
   256  				Requests: v1.ResourceList{
   257  					v1.ResourceName(v1.ResourceStorage): resource.MustParse(diskSize),
   258  				},
   259  			},
   260  			StorageClassName: &(storageclass.Name),
   261  		},
   262  	}
   263  	return claim
   264  }
   265  
   266  // func to get pod spec with given volume claim, node selector labels and command
   267  func getVSpherePodSpecWithClaim(claimName string, nodeSelectorKV map[string]string, command string) *v1.Pod {
   268  	pod := &v1.Pod{
   269  		TypeMeta: metav1.TypeMeta{
   270  			Kind:       "Pod",
   271  			APIVersion: "v1",
   272  		},
   273  		ObjectMeta: metav1.ObjectMeta{
   274  			GenerateName: "pod-pvc-",
   275  		},
   276  		Spec: v1.PodSpec{
   277  			Containers: []v1.Container{
   278  				{
   279  					Name:    "volume-tester",
   280  					Image:   imageutils.GetE2EImage(imageutils.BusyBox),
   281  					Command: []string{"/bin/sh"},
   282  					Args:    []string{"-c", command},
   283  					VolumeMounts: []v1.VolumeMount{
   284  						{
   285  							Name:      "my-volume",
   286  							MountPath: "/mnt/test",
   287  						},
   288  					},
   289  				},
   290  			},
   291  			RestartPolicy: v1.RestartPolicyNever,
   292  			Volumes: []v1.Volume{
   293  				{
   294  					Name: "my-volume",
   295  					VolumeSource: v1.VolumeSource{
   296  						PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
   297  							ClaimName: claimName,
   298  							ReadOnly:  false,
   299  						},
   300  					},
   301  				},
   302  			},
   303  		},
   304  	}
   305  	if nodeSelectorKV != nil {
   306  		pod.Spec.NodeSelector = nodeSelectorKV
   307  	}
   308  	return pod
   309  }
   310  
   311  // func to get pod spec with given volume paths, node selector labels and container commands
   312  func getVSpherePodSpecWithVolumePaths(volumePaths []string, keyValuelabel map[string]string, commands []string) *v1.Pod {
   313  	var volumeMounts []v1.VolumeMount
   314  	var volumes []v1.Volume
   315  
   316  	for index, volumePath := range volumePaths {
   317  		name := fmt.Sprintf("volume%v", index+1)
   318  		volumeMounts = append(volumeMounts, v1.VolumeMount{Name: name, MountPath: "/mnt/" + name})
   319  		vsphereVolume := new(v1.VsphereVirtualDiskVolumeSource)
   320  		vsphereVolume.VolumePath = volumePath
   321  		vsphereVolume.FSType = "ext4"
   322  		volumes = append(volumes, v1.Volume{Name: name})
   323  		volumes[index].VolumeSource.VsphereVolume = vsphereVolume
   324  	}
   325  
   326  	if commands == nil || len(commands) == 0 {
   327  		commands = []string{
   328  			"/bin/sh",
   329  			"-c",
   330  			"while true; do sleep 2; done",
   331  		}
   332  	}
   333  	pod := &v1.Pod{
   334  		TypeMeta: metav1.TypeMeta{
   335  			Kind:       "Pod",
   336  			APIVersion: "v1",
   337  		},
   338  		ObjectMeta: metav1.ObjectMeta{
   339  			GenerateName: "vsphere-e2e-",
   340  		},
   341  		Spec: v1.PodSpec{
   342  			Containers: []v1.Container{
   343  				{
   344  					Name:         "vsphere-e2e-container-" + string(uuid.NewUUID()),
   345  					Image:        imageutils.GetE2EImage(imageutils.BusyBox),
   346  					Command:      commands,
   347  					VolumeMounts: volumeMounts,
   348  				},
   349  			},
   350  			RestartPolicy: v1.RestartPolicyNever,
   351  			Volumes:       volumes,
   352  		},
   353  	}
   354  
   355  	if keyValuelabel != nil {
   356  		pod.Spec.NodeSelector = keyValuelabel
   357  	}
   358  	return pod
   359  }
   360  
   361  func verifyFilesExistOnVSphereVolume(namespace string, podName string, filePaths ...string) {
   362  	for _, filePath := range filePaths {
   363  		_, err := e2ekubectl.RunKubectl(namespace, "exec", podName, "--", "/bin/ls", filePath)
   364  		framework.ExpectNoError(err, fmt.Sprintf("failed to verify file: %q on the pod: %q", filePath, podName))
   365  	}
   366  }
   367  
   368  func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths []string) {
   369  	for _, filePath := range filePaths {
   370  		err := e2eoutput.CreateEmptyFileOnPod(namespace, podName, filePath)
   371  		framework.ExpectNoError(err)
   372  	}
   373  }
   374  
   375  // verify volumes are attached to the node and are accessible in pod
   376  func verifyVSphereVolumesAccessible(ctx context.Context, c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) {
   377  	nodeName := pod.Spec.NodeName
   378  	namespace := pod.Namespace
   379  	for index, pv := range persistentvolumes {
   380  		// Verify disks are attached to the node
   381  		isAttached, err := diskIsAttached(ctx, pv.Spec.VsphereVolume.VolumePath, nodeName)
   382  		framework.ExpectNoError(err)
   383  		if !isAttached {
   384  			framework.Failf("disk %v is not attached to the node: %v", pv.Spec.VsphereVolume.VolumePath, nodeName)
   385  		}
   386  		// Verify Volumes are accessible
   387  		filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt")
   388  		_, err = e2eoutput.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute)
   389  		framework.ExpectNoError(err)
   390  	}
   391  }
   392  
   393  // verify volumes are created on one of the specified zones
   394  func verifyVolumeCreationOnRightZone(ctx context.Context, persistentvolumes []*v1.PersistentVolume, nodeName string, zones []string) {
   395  	for _, pv := range persistentvolumes {
   396  		volumePath := pv.Spec.VsphereVolume.VolumePath
   397  		// Extract datastoreName from the volume path in the pv spec
   398  		// For example : "vsanDatastore" is extracted from "[vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk"
   399  		datastorePathObj, _ := getDatastorePathObjFromVMDiskPath(volumePath)
   400  		datastoreName := datastorePathObj.Datastore
   401  		nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
   402  		ctx, cancel := context.WithCancel(ctx)
   403  		defer cancel()
   404  		// Get the datastore object reference from the datastore name
   405  		datastoreRef, err := nodeInfo.VSphere.GetDatastoreRefFromName(ctx, nodeInfo.DataCenterRef, datastoreName)
   406  		if err != nil {
   407  			framework.ExpectNoError(err)
   408  		}
   409  		// Find common datastores among the specified zones
   410  		var datastoreCountMap = make(map[string]int)
   411  		numZones := len(zones)
   412  		var commonDatastores []string
   413  		for _, zone := range zones {
   414  			datastoreInZone := TestContext.NodeMapper.GetDatastoresInZone(nodeInfo.VSphere.Config.Hostname, zone)
   415  			for _, datastore := range datastoreInZone {
   416  				datastoreCountMap[datastore] = datastoreCountMap[datastore] + 1
   417  				if datastoreCountMap[datastore] == numZones {
   418  					commonDatastores = append(commonDatastores, datastore)
   419  				}
   420  			}
   421  		}
   422  		gomega.Expect(commonDatastores).To(gomega.ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.")
   423  	}
   424  }
   425  
   426  // Get vSphere Volume Path from PVC
   427  func getvSphereVolumePathFromClaim(ctx context.Context, client clientset.Interface, namespace string, claimName string) string {
   428  	pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, claimName, metav1.GetOptions{})
   429  	framework.ExpectNoError(err)
   430  	pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvclaim.Spec.VolumeName, metav1.GetOptions{})
   431  	framework.ExpectNoError(err)
   432  	return pv.Spec.VsphereVolume.VolumePath
   433  }
   434  
   435  // Get canonical volume path for volume Path.
   436  // Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
   437  // Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
   438  func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePath string) (string, error) {
   439  	var folderID string
   440  	canonicalVolumePath := volumePath
   441  	dsPathObj, err := getDatastorePathObjFromVMDiskPath(volumePath)
   442  	if err != nil {
   443  		return "", err
   444  	}
   445  	dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
   446  	if len(dsPath) <= 1 {
   447  		return canonicalVolumePath, nil
   448  	}
   449  	datastore := dsPathObj.Datastore
   450  	dsFolder := dsPath[0]
   451  	// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
   452  	if !isValidUUID(dsFolder) {
   453  		dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + dummyDiskName
   454  		// Querying a non-existent dummy disk on the datastore folder.
   455  		// It would fail and return an folder ID in the error message.
   456  		_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
   457  		if err != nil {
   458  			re := regexp.MustCompile("File (.*?) was not found")
   459  			match := re.FindStringSubmatch(err.Error())
   460  			canonicalVolumePath = match[1]
   461  		}
   462  	}
   463  	diskPath := getPathFromVMDiskPath(canonicalVolumePath)
   464  	if diskPath == "" {
   465  		return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
   466  	}
   467  	folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
   468  	canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
   469  	return canonicalVolumePath, nil
   470  }
   471  
   472  // getPathFromVMDiskPath retrieves the path from VM Disk Path.
   473  // Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
   474  func getPathFromVMDiskPath(vmDiskPath string) string {
   475  	datastorePathObj := new(object.DatastorePath)
   476  	isSuccess := datastorePathObj.FromString(vmDiskPath)
   477  	if !isSuccess {
   478  		framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
   479  		return ""
   480  	}
   481  	return datastorePathObj.Path
   482  }
   483  
   484  // getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
   485  func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
   486  	datastorePathObj := new(object.DatastorePath)
   487  	isSuccess := datastorePathObj.FromString(vmDiskPath)
   488  	if !isSuccess {
   489  		framework.Logf("Failed to parse volPath: %s", vmDiskPath)
   490  		return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
   491  	}
   492  	return datastorePathObj, nil
   493  }
   494  
   495  // getVirtualDiskPage83Data gets the virtual disk UUID by diskPath
   496  func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPath string) (string, error) {
   497  	if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
   498  		diskPath += ".vmdk"
   499  	}
   500  	vdm := object.NewVirtualDiskManager(dc.Client())
   501  	// Returns uuid of vmdk virtual disk
   502  	diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc)
   503  
   504  	if err != nil {
   505  		klog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
   506  		return "", err
   507  	}
   508  	diskUUID = formatVirtualDiskUUID(diskUUID)
   509  	return diskUUID, nil
   510  }
   511  
   512  // formatVirtualDiskUUID removes any spaces and hyphens in UUID
   513  // Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
   514  func formatVirtualDiskUUID(uuid string) string {
   515  	uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
   516  	uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
   517  	return strings.ToLower(uuidWithNoHypens)
   518  }
   519  
   520  // isValidUUID checks if the string is a valid UUID.
   521  func isValidUUID(uuid string) bool {
   522  	r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
   523  	return r.MatchString(uuid)
   524  }
   525  
   526  // removeStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
   527  // for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
   528  // for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
   529  func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
   530  	datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
   531  	if filepath.Base(datastore) != datastore {
   532  		vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
   533  	}
   534  	return vDiskPath
   535  }
   536  
   537  // getVirtualDeviceByPath gets the virtual device by path
   538  func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
   539  	vmDevices, err := vm.Device(ctx)
   540  	if err != nil {
   541  		framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
   542  		return nil, err
   543  	}
   544  
   545  	// filter vm devices to retrieve device for the given vmdk file identified by disk path
   546  	for _, device := range vmDevices {
   547  		if vmDevices.TypeName(device) == "VirtualDisk" {
   548  			virtualDevice := device.GetVirtualDevice()
   549  			if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
   550  				if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
   551  					framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
   552  					return device, nil
   553  				}
   554  				framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
   555  			}
   556  		}
   557  	}
   558  	return nil, nil
   559  }
   560  
   561  func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
   562  	fileExt := ".vmdk"
   563  	diskPath = strings.TrimSuffix(diskPath, fileExt)
   564  	volPath = strings.TrimSuffix(volPath, fileExt)
   565  	return diskPath == volPath
   566  }
   567  
   568  // convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
   569  func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]string) (map[string][]string, error) {
   570  	vmVolumes := make(map[string][]string)
   571  	for nodeName, volPaths := range nodeVolumes {
   572  		nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
   573  		datacenter := nodeInfo.VSphere.GetDatacenterFromObjectReference(ctx, nodeInfo.DataCenterRef)
   574  		for i, volPath := range volPaths {
   575  			deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
   576  			if err != nil {
   577  				framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
   578  				return nil, err
   579  			}
   580  			volPaths[i] = deviceVolPath
   581  		}
   582  		vmVolumes[nodeName] = volPaths
   583  	}
   584  	return vmVolumes, nil
   585  }
   586  
   587  // convertVolPathToDevicePath takes volPath and returns canonical volume path
   588  func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) {
   589  	volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
   590  	// Get the canonical volume path for volPath.
   591  	canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
   592  	if err != nil {
   593  		framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
   594  		return "", err
   595  	}
   596  	// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
   597  	if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
   598  		canonicalVolumePath += ".vmdk"
   599  	}
   600  	return canonicalVolumePath, nil
   601  }
   602  
   603  // get .vmx file path for a virtual machine
   604  func getVMXFilePath(ctx context.Context, vmObject *object.VirtualMachine) (vmxPath string) {
   605  	ctx, cancel := context.WithCancel(ctx)
   606  	defer cancel()
   607  
   608  	var nodeVM mo.VirtualMachine
   609  	err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM)
   610  	framework.ExpectNoError(err)
   611  	gomega.Expect(nodeVM.Config).NotTo(gomega.BeNil())
   612  
   613  	vmxPath = nodeVM.Config.Files.VmPathName
   614  	framework.Logf("vmx file path is %s", vmxPath)
   615  	return vmxPath
   616  }
   617  
   618  // verify ready node count. Try up to 3 minutes. Return true if count is expected count
   619  func verifyReadyNodeCount(ctx context.Context, client clientset.Interface, expectedNodes int) bool {
   620  	numNodes := 0
   621  	for i := 0; i < 36; i++ {
   622  		nodeList, err := e2enode.GetReadySchedulableNodes(ctx, client)
   623  		framework.ExpectNoError(err)
   624  
   625  		numNodes = len(nodeList.Items)
   626  		if numNodes == expectedNodes {
   627  			break
   628  		}
   629  		time.Sleep(5 * time.Second)
   630  	}
   631  	return (numNodes == expectedNodes)
   632  }
   633  
   634  // poweroff nodeVM and confirm the poweroff state
   635  func poweroffNodeVM(ctx context.Context, nodeName string, vm *object.VirtualMachine) {
   636  	ctx, cancel := context.WithCancel(ctx)
   637  	defer cancel()
   638  
   639  	framework.Logf("Powering off node VM %s", nodeName)
   640  
   641  	_, err := vm.PowerOff(ctx)
   642  	framework.ExpectNoError(err)
   643  	err = vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOff)
   644  	framework.ExpectNoError(err, "Unable to power off the node")
   645  }
   646  
   647  // poweron nodeVM and confirm the poweron state
   648  func poweronNodeVM(ctx context.Context, nodeName string, vm *object.VirtualMachine) {
   649  	ctx, cancel := context.WithCancel(ctx)
   650  	defer cancel()
   651  
   652  	framework.Logf("Powering on node VM %s", nodeName)
   653  
   654  	vm.PowerOn(ctx)
   655  	err := vm.WaitForPowerState(ctx, vim25types.VirtualMachinePowerStatePoweredOn)
   656  	framework.ExpectNoError(err, "Unable to power on the node")
   657  }
   658  
   659  // unregister a nodeVM from VC
   660  func unregisterNodeVM(ctx context.Context, nodeName string, vm *object.VirtualMachine) {
   661  	ctx, cancel := context.WithCancel(ctx)
   662  	defer cancel()
   663  
   664  	poweroffNodeVM(ctx, nodeName, vm)
   665  
   666  	framework.Logf("Unregistering node VM %s", nodeName)
   667  	err := vm.Unregister(ctx)
   668  	framework.ExpectNoError(err, "Unable to unregister the node")
   669  }
   670  
   671  // register a nodeVM into a VC
   672  func registerNodeVM(ctx context.Context, nodeName, workingDir, vmxFilePath string, rpool *object.ResourcePool, host *object.HostSystem) {
   673  	ctx, cancel := context.WithCancel(ctx)
   674  	defer cancel()
   675  
   676  	framework.Logf("Registering node VM %s with vmx file path %s", nodeName, vmxFilePath)
   677  
   678  	nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
   679  	finder := find.NewFinder(nodeInfo.VSphere.Client.Client, false)
   680  
   681  	vmFolder, err := finder.FolderOrDefault(ctx, workingDir)
   682  	framework.ExpectNoError(err)
   683  
   684  	registerTask, err := vmFolder.RegisterVM(ctx, vmxFilePath, nodeName, false, rpool, host)
   685  	framework.ExpectNoError(err)
   686  	err = registerTask.Wait(ctx)
   687  	framework.ExpectNoError(err)
   688  
   689  	vmPath := filepath.Join(workingDir, nodeName)
   690  	vm, err := finder.VirtualMachine(ctx, vmPath)
   691  	framework.ExpectNoError(err)
   692  
   693  	poweronNodeVM(ctx, nodeName, vm)
   694  }
   695  
   696  // disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state
   697  func disksAreAttached(ctx context.Context, nodeVolumes map[string][]string) (map[string]map[string]bool, error) {
   698  	ctx, cancel := context.WithCancel(ctx)
   699  	defer cancel()
   700  
   701  	disksAttached := make(map[string]map[string]bool)
   702  	if len(nodeVolumes) == 0 {
   703  		return disksAttached, nil
   704  	}
   705  	// Convert VolPaths into canonical form so that it can be compared with the VM device path.
   706  	vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
   707  	if err != nil {
   708  		framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
   709  		return nil, err
   710  	}
   711  	for vm, volumes := range vmVolumes {
   712  		volumeAttachedMap := make(map[string]bool)
   713  		for _, volume := range volumes {
   714  			attached, err := diskIsAttached(ctx, volume, vm)
   715  			if err != nil {
   716  				return nil, err
   717  			}
   718  			volumeAttachedMap[volume] = attached
   719  		}
   720  		disksAttached[vm] = volumeAttachedMap
   721  	}
   722  	return disksAttached, nil
   723  }
   724  
   725  // diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
   726  func diskIsAttached(ctx context.Context, volPath string, nodeName string) (bool, error) {
   727  	// Create context
   728  	ctx, cancel := context.WithCancel(ctx)
   729  	defer cancel()
   730  	nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
   731  	Connect(ctx, nodeInfo.VSphere)
   732  	vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
   733  	volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
   734  	device, err := getVirtualDeviceByPath(ctx, vm, volPath)
   735  	if err != nil {
   736  		framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
   737  			volPath,
   738  			nodeName)
   739  		return false, err
   740  	}
   741  	if device == nil {
   742  		return false, nil
   743  	}
   744  	framework.Logf("diskIsAttached found the disk %q attached on node %q", volPath, nodeName)
   745  	return true, nil
   746  }
   747  
   748  // getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
   749  // this gives the VM UUID which can be used to find Node VM from vCenter
   750  func getUUIDFromProviderID(providerID string) string {
   751  	return strings.TrimPrefix(providerID, providerPrefix)
   752  }
   753  
   754  // GetReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
   755  func GetReadySchedulableNodeInfos(ctx context.Context, c clientset.Interface) []*NodeInfo {
   756  	var nodesInfo []*NodeInfo
   757  	if TestContext.NodeMapper != nil {
   758  		nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c)
   759  		framework.ExpectNoError(err)
   760  		for _, node := range nodeList.Items {
   761  			nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)
   762  			if nodeInfo != nil {
   763  				nodesInfo = append(nodesInfo, nodeInfo)
   764  			}
   765  		}
   766  	}
   767  	return nodesInfo
   768  }
   769  
   770  // GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
   771  // if multiple nodes are present with Ready and Schedulable state then one of the Node is selected randomly
   772  // and it's associated NodeInfo object is returned.
   773  func GetReadySchedulableRandomNodeInfo(ctx context.Context, c clientset.Interface) *NodeInfo {
   774  	nodesInfo := GetReadySchedulableNodeInfos(ctx, c)
   775  	gomega.Expect(nodesInfo).NotTo(gomega.BeEmpty())
   776  	return nodesInfo[rand.Int()%len(nodesInfo)]
   777  }
   778  
   779  // invokeVCenterServiceControl invokes the given command for the given service
   780  // via service-control on the given vCenter host over SSH.
   781  func invokeVCenterServiceControl(ctx context.Context, command, service, host string) error {
   782  	sshCmd := fmt.Sprintf("service-control --%s %s", command, service)
   783  	framework.Logf("Invoking command %v on vCenter host %v", sshCmd, host)
   784  	result, err := e2essh.SSH(ctx, sshCmd, host, framework.TestContext.Provider)
   785  	if err != nil || result.Code != 0 {
   786  		e2essh.LogResult(result)
   787  		return fmt.Errorf("couldn't execute command: %s on vCenter host: %w", sshCmd, err)
   788  	}
   789  	return nil
   790  }
   791  
   792  // expectVolumeToBeAttached checks if the given Volume is attached to the given
   793  // Node, else fails.
   794  func expectVolumeToBeAttached(ctx context.Context, nodeName, volumePath string) {
   795  	isAttached, err := diskIsAttached(ctx, volumePath, nodeName)
   796  	framework.ExpectNoError(err)
   797  	if !isAttached {
   798  		framework.Failf("Volume: %s is not attached to the node: %v", volumePath, nodeName)
   799  	}
   800  }
   801  
   802  // expectVolumesToBeAttached checks if the given Volumes are attached to the
   803  // corresponding set of Nodes, else fails.
   804  func expectVolumesToBeAttached(ctx context.Context, pods []*v1.Pod, volumePaths []string) {
   805  	for i, pod := range pods {
   806  		nodeName := pod.Spec.NodeName
   807  		volumePath := volumePaths[i]
   808  		ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName))
   809  		expectVolumeToBeAttached(ctx, nodeName, volumePath)
   810  	}
   811  }
   812  
   813  // expectFilesToBeAccessible checks if the given files are accessible on the
   814  // corresponding set of Nodes, else fails.
   815  func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []string) {
   816  	for i, pod := range pods {
   817  		podName := pod.Name
   818  		filePath := filePaths[i]
   819  		ginkgo.By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName))
   820  		verifyFilesExistOnVSphereVolume(namespace, podName, filePath)
   821  	}
   822  }
   823  
   824  // writeContentToPodFile writes the given content to the specified file.
   825  func writeContentToPodFile(namespace, podName, filePath, content string) error {
   826  	_, err := e2ekubectl.RunKubectl(namespace, "exec", podName,
   827  		"--", "/bin/sh", "-c", fmt.Sprintf("echo '%s' > %s", content, filePath))
   828  	return err
   829  }
   830  
   831  // expectFileContentToMatch checks if a given file contains the specified
   832  // content, else fails.
   833  func expectFileContentToMatch(namespace, podName, filePath, content string) {
   834  	_, err := e2ekubectl.RunKubectl(namespace, "exec", podName,
   835  		"--", "/bin/sh", "-c", fmt.Sprintf("grep '%s' %s", content, filePath))
   836  	framework.ExpectNoError(err, fmt.Sprintf("failed to match content of file: %q on the pod: %q", filePath, podName))
   837  }
   838  
   839  // expectFileContentsToMatch checks if the given contents match the ones present
   840  // in corresponding files on respective Pods, else fails.
   841  func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []string, contents []string) {
   842  	for i, pod := range pods {
   843  		podName := pod.Name
   844  		filePath := filePaths[i]
   845  		ginkgo.By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName))
   846  		expectFileContentToMatch(namespace, podName, filePath, contents[i])
   847  	}
   848  }