sigs.k8s.io/cluster-api-provider-aws@v1.5.5/test/e2e/suites/unmanaged/helpers_test.go (about)

     1  //go:build e2e
     2  // +build e2e
     3  
     4  /*
     5  Copyright 2021 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11  	http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package unmanaged
    21  
    22  import (
    23  	"context"
    24  	"fmt"
    25  	"io"
    26  	"net/http"
    27  	"os"
    28  	"path/filepath"
    29  	"strings"
    30  	"time"
    31  
    32  	"github.com/aws/aws-sdk-go/aws"
    33  	"github.com/aws/aws-sdk-go/aws/awserr"
    34  	"github.com/aws/aws-sdk-go/service/ec2"
    35  	"github.com/aws/aws-sdk-go/service/efs"
    36  	"github.com/aws/aws-sdk-go/service/elb"
    37  	"github.com/blang/semver"
    38  	"github.com/onsi/ginkgo"
    39  	. "github.com/onsi/gomega"
    40  	appsv1 "k8s.io/api/apps/v1"
    41  	corev1 "k8s.io/api/core/v1"
    42  	storagev1 "k8s.io/api/storage/v1"
    43  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    44  	"k8s.io/apimachinery/pkg/api/resource"
    45  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    46  	apimachinerytypes "k8s.io/apimachinery/pkg/types"
    47  	"k8s.io/apimachinery/pkg/util/uuid"
    48  	"k8s.io/utils/pointer"
    49  	crclient "sigs.k8s.io/controller-runtime/pkg/client"
    50  
    51  	infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
    52  	"sigs.k8s.io/cluster-api-provider-aws/test/e2e/shared"
    53  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    54  	bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
    55  	controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
    56  	"sigs.k8s.io/cluster-api/test/framework"
    57  	"sigs.k8s.io/cluster-api/test/framework/clusterctl"
    58  	"sigs.k8s.io/cluster-api/util/conditions"
    59  )
    60  
    61  type statefulSetInfo struct {
    62  	name                      string
    63  	namespace                 string
    64  	replicas                  int32
    65  	selector                  map[string]string
    66  	storageClassName          string
    67  	volumeName                string
    68  	svcName                   string
    69  	svcPort                   int32
    70  	svcPortName               string
    71  	containerName             string
    72  	containerImage            string
    73  	containerPort             int32
    74  	podTerminationGracePeriod int64
    75  	volMountPath              string
    76  	isInTreeCSI               bool
    77  }
    78  
    79  // GetClusterByName returns a Cluster object given his name.
    80  func GetAWSClusterByName(ctx context.Context, namespace, name string) (*infrav1.AWSCluster, error) {
    81  	cluster := &clusterv1.Cluster{}
    82  	key := crclient.ObjectKey{
    83  		Namespace: namespace,
    84  		Name:      name,
    85  	}
    86  	if err := e2eCtx.Environment.BootstrapClusterProxy.GetClient().Get(ctx, key, cluster); err != nil {
    87  		return nil, err
    88  	}
    89  
    90  	awsCluster := &infrav1.AWSCluster{}
    91  	awsClusterKey := crclient.ObjectKey{
    92  		Namespace: namespace,
    93  		Name:      cluster.Spec.InfrastructureRef.Name,
    94  	}
    95  	err := e2eCtx.Environment.BootstrapClusterProxy.GetClient().Get(ctx, awsClusterKey, awsCluster)
    96  	return awsCluster, err
    97  }
    98  
    99  func createCluster(ctx context.Context, configCluster clusterctl.ConfigClusterInput, result *clusterctl.ApplyClusterTemplateAndWaitResult) (*clusterv1.Cluster, []*clusterv1.MachineDeployment, *controlplanev1.KubeadmControlPlane) {
   100  	clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
   101  		ClusterProxy:                 e2eCtx.Environment.BootstrapClusterProxy,
   102  		ConfigCluster:                configCluster,
   103  		WaitForClusterIntervals:      e2eCtx.E2EConfig.GetIntervals("", "wait-cluster"),
   104  		WaitForControlPlaneIntervals: e2eCtx.E2EConfig.GetIntervals("", "wait-control-plane"),
   105  		WaitForMachineDeployments:    e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes"),
   106  	}, result)
   107  
   108  	return result.Cluster, result.MachineDeployments, result.ControlPlane
   109  }
   110  
   111  func defaultConfigCluster(clusterName, namespace string) clusterctl.ConfigClusterInput {
   112  	return clusterctl.ConfigClusterInput{
   113  		LogFolder:                filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", e2eCtx.Environment.BootstrapClusterProxy.GetName()),
   114  		ClusterctlConfigPath:     e2eCtx.Environment.ClusterctlConfigPath,
   115  		KubeconfigPath:           e2eCtx.Environment.BootstrapClusterProxy.GetKubeconfigPath(),
   116  		InfrastructureProvider:   clusterctl.DefaultInfrastructureProvider,
   117  		Flavor:                   clusterctl.DefaultFlavor,
   118  		Namespace:                namespace,
   119  		ClusterName:              clusterName,
   120  		KubernetesVersion:        e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion),
   121  		ControlPlaneMachineCount: pointer.Int64Ptr(1),
   122  		WorkerMachineCount:       pointer.Int64Ptr(0),
   123  	}
   124  }
   125  
   126  func createLBService(svcNamespace string, svcName string, k8sclient crclient.Client) string {
   127  	shared.Byf("Creating service of type Load Balancer with name: %s under namespace: %s", svcName, svcNamespace)
   128  	svcSpec := corev1.ServiceSpec{
   129  		Type: corev1.ServiceTypeLoadBalancer,
   130  		Ports: []corev1.ServicePort{
   131  			{
   132  				Port:     80,
   133  				Protocol: corev1.ProtocolTCP,
   134  			},
   135  		},
   136  		Selector: map[string]string{
   137  			"app": "nginx",
   138  		},
   139  	}
   140  	createService(svcName, svcNamespace, nil, svcSpec, k8sclient)
   141  	// this sleep is required for the service to get updated with ingress details
   142  	time.Sleep(15 * time.Second)
   143  	svcCreated := &corev1.Service{}
   144  	err := k8sclient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: svcNamespace, Name: svcName}, svcCreated)
   145  	Expect(err).NotTo(HaveOccurred())
   146  	elbName := ""
   147  	if lbs := len(svcCreated.Status.LoadBalancer.Ingress); lbs > 0 {
   148  		ingressHostname := svcCreated.Status.LoadBalancer.Ingress[0].Hostname
   149  		elbName = strings.Split(ingressHostname, "-")[0]
   150  	}
   151  	shared.Byf("Created Load Balancer service and ELB name is: %s", elbName)
   152  
   153  	return elbName
   154  }
   155  
   156  func deleteLBService(svcNamespace string, svcName string, k8sclient crclient.Client) {
   157  	svcSpec := corev1.ServiceSpec{
   158  		Type: corev1.ServiceTypeLoadBalancer,
   159  		Ports: []corev1.ServicePort{
   160  			{
   161  				Port:     80,
   162  				Protocol: corev1.ProtocolTCP,
   163  			},
   164  		},
   165  		Selector: map[string]string{
   166  			"app": "nginx",
   167  		},
   168  	}
   169  	deleteService(svcName, svcNamespace, nil, svcSpec, k8sclient)
   170  }
   171  
   172  func createPodTemplateSpec(statefulsetinfo statefulSetInfo) corev1.PodTemplateSpec {
   173  	ginkgo.By("Creating PodTemplateSpec config object")
   174  	podTemplateSpec := corev1.PodTemplateSpec{
   175  		ObjectMeta: metav1.ObjectMeta{
   176  			Name:   statefulsetinfo.name,
   177  			Labels: statefulsetinfo.selector,
   178  		},
   179  		Spec: corev1.PodSpec{
   180  			TerminationGracePeriodSeconds: &statefulsetinfo.podTerminationGracePeriod,
   181  			Containers: []corev1.Container{
   182  				{
   183  					Name:  statefulsetinfo.containerName,
   184  					Image: statefulsetinfo.containerImage,
   185  					Ports: []corev1.ContainerPort{{Name: statefulsetinfo.svcPortName, ContainerPort: statefulsetinfo.containerPort}},
   186  					VolumeMounts: []corev1.VolumeMount{
   187  						{Name: statefulsetinfo.volumeName, MountPath: statefulsetinfo.volMountPath},
   188  					},
   189  				},
   190  			},
   191  			Volumes: []corev1.Volume{
   192  				{
   193  					Name: statefulsetinfo.volumeName,
   194  					VolumeSource: corev1.VolumeSource{
   195  						PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ClaimName: statefulsetinfo.volumeName},
   196  					},
   197  				},
   198  			},
   199  		},
   200  	}
   201  	return podTemplateSpec
   202  }
   203  
   204  func createPVC(statefulsetinfo statefulSetInfo) corev1.PersistentVolumeClaim {
   205  	ginkgo.By("Creating PersistentVolumeClaim config object")
   206  	volClaimTemplate := corev1.PersistentVolumeClaim{
   207  		ObjectMeta: metav1.ObjectMeta{
   208  			Name: statefulsetinfo.volumeName,
   209  		},
   210  		Spec: corev1.PersistentVolumeClaimSpec{
   211  			AccessModes:      []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
   212  			StorageClassName: &statefulsetinfo.storageClassName,
   213  			Resources: corev1.ResourceRequirements{
   214  				Requests: corev1.ResourceList{
   215  					corev1.ResourceStorage: resource.MustParse("4Gi"),
   216  				},
   217  			},
   218  		},
   219  	}
   220  	return volClaimTemplate
   221  }
   222  
   223  func createService(svcName string, svcNamespace string, labels map[string]string, serviceSpec corev1.ServiceSpec, k8sClient crclient.Client) {
   224  	svcToCreate := corev1.Service{
   225  		ObjectMeta: metav1.ObjectMeta{
   226  			Namespace: svcNamespace,
   227  			Name:      svcName,
   228  		},
   229  		Spec: serviceSpec,
   230  	}
   231  	if len(labels) > 0 {
   232  		svcToCreate.ObjectMeta.Labels = labels
   233  	}
   234  	Expect(k8sClient.Create(context.TODO(), &svcToCreate)).NotTo(HaveOccurred())
   235  }
   236  
   237  func deleteService(svcName string, svcNamespace string, labels map[string]string, serviceSpec corev1.ServiceSpec, k8sClient crclient.Client) {
   238  	svcToDelete := corev1.Service{
   239  		ObjectMeta: metav1.ObjectMeta{
   240  			Namespace: svcNamespace,
   241  			Name:      svcName,
   242  		},
   243  		Spec: serviceSpec,
   244  	}
   245  	if len(labels) > 0 {
   246  		svcToDelete.ObjectMeta.Labels = labels
   247  	}
   248  	Expect(k8sClient.Delete(context.TODO(), &svcToDelete)).NotTo(HaveOccurred())
   249  }
   250  
   251  func createStatefulSet(statefulsetinfo statefulSetInfo, k8sclient crclient.Client) {
   252  	ginkgo.By("Creating statefulset")
   253  	svcSpec := corev1.ServiceSpec{
   254  		ClusterIP: "None",
   255  		Ports: []corev1.ServicePort{
   256  			{
   257  				Port: statefulsetinfo.svcPort,
   258  				Name: statefulsetinfo.svcPortName,
   259  			},
   260  		},
   261  		Selector: statefulsetinfo.selector,
   262  	}
   263  	createService(statefulsetinfo.svcName, statefulsetinfo.namespace, statefulsetinfo.selector, svcSpec, k8sclient)
   264  	createStorageClass(statefulsetinfo.isInTreeCSI, statefulsetinfo.storageClassName, k8sclient)
   265  	podTemplateSpec := createPodTemplateSpec(statefulsetinfo)
   266  	volClaimTemplate := createPVC(statefulsetinfo)
   267  	deployStatefulSet(statefulsetinfo, volClaimTemplate, podTemplateSpec, k8sclient)
   268  	waitForStatefulSetRunning(statefulsetinfo, k8sclient)
   269  }
   270  
   271  func createStorageClass(isIntree bool, storageClassName string, k8sclient crclient.Client) {
   272  	shared.Byf("Creating StorageClass object with name: %s", storageClassName)
   273  	volExpansion := true
   274  	bindingMode := storagev1.VolumeBindingWaitForFirstConsumer
   275  	azs := shared.GetAvailabilityZones(e2eCtx.AWSSession)
   276  
   277  	provisioner := "ebs.csi.aws.com"
   278  	params := map[string]string{
   279  		"csi.storage.k8s.io/fstype": "xfs",
   280  		"type":                      "io1",
   281  		"iopsPerGB":                 "100",
   282  	}
   283  	allowedTopo := []corev1.TopologySelectorTerm{{
   284  		MatchLabelExpressions: []corev1.TopologySelectorLabelRequirement{{
   285  			Key:    shared.StorageClassOutTreeZoneLabel,
   286  			Values: []string{*azs[0].ZoneName},
   287  		}},
   288  	}}
   289  	if isIntree {
   290  		provisioner = "kubernetes.io/aws-ebs"
   291  		params = map[string]string{
   292  			"type": "gp2",
   293  		}
   294  
   295  		allowedTopo = nil
   296  	}
   297  	storageClass := &storagev1.StorageClass{}
   298  	if err := k8sclient.Get(context.TODO(), crclient.ObjectKey{
   299  		Name:      storageClassName,
   300  		Namespace: metav1.NamespaceDefault,
   301  	}, storageClass); err != nil {
   302  		if apierrors.IsNotFound(err) {
   303  			storageClass = &storagev1.StorageClass{
   304  				TypeMeta: metav1.TypeMeta{
   305  					APIVersion: "storage.k8s.io/v1",
   306  					Kind:       "StorageClass",
   307  				},
   308  				ObjectMeta: metav1.ObjectMeta{
   309  					Name: storageClassName,
   310  				},
   311  				Parameters:           params,
   312  				Provisioner:          provisioner,
   313  				AllowVolumeExpansion: &volExpansion,
   314  				VolumeBindingMode:    &bindingMode,
   315  				AllowedTopologies:    allowedTopo,
   316  			}
   317  			Expect(k8sclient.Create(context.TODO(), storageClass)).NotTo(HaveOccurred())
   318  		}
   319  	}
   320  }
   321  
   322  func deleteCluster(ctx context.Context, cluster *clusterv1.Cluster) {
   323  	framework.DeleteCluster(ctx, framework.DeleteClusterInput{
   324  		Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   325  		Cluster: cluster,
   326  	})
   327  
   328  	framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{
   329  		Getter:  e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   330  		Cluster: cluster,
   331  	}, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...)
   332  }
   333  
   334  func deleteMachine(namespace *corev1.Namespace, md *clusterv1.MachineDeployment) {
   335  	machineList := &clusterv1.MachineList{}
   336  	selector, err := metav1.LabelSelectorAsMap(&md.Spec.Selector)
   337  	Expect(err).NotTo(HaveOccurred())
   338  
   339  	bootstrapClient := e2eCtx.Environment.BootstrapClusterProxy.GetClient()
   340  	err = bootstrapClient.List(context.TODO(), machineList, crclient.InNamespace(namespace.Name), crclient.MatchingLabels(selector))
   341  	Expect(err).NotTo(HaveOccurred())
   342  
   343  	Expect(len(machineList.Items)).ToNot(Equal(0))
   344  	machine := &clusterv1.Machine{
   345  		ObjectMeta: metav1.ObjectMeta{
   346  			Namespace: namespace.Name,
   347  			Name:      machineList.Items[0].Name,
   348  		},
   349  	}
   350  	Expect(bootstrapClient.Delete(context.TODO(), machine)).To(Succeed())
   351  }
   352  
   353  func deleteRetainedVolumes(awsVolIDs []*string) {
   354  	ginkgo.By("Deleting dynamically provisioned volumes")
   355  	ec2Client := ec2.New(e2eCtx.AWSSession)
   356  	for _, volumeID := range awsVolIDs {
   357  		input := &ec2.DeleteVolumeInput{
   358  			VolumeId: aws.String(*volumeID),
   359  		}
   360  		_, err := ec2Client.DeleteVolume(input)
   361  		Expect(err).NotTo(HaveOccurred())
   362  		shared.Byf("Deleted dynamically provisioned volume with ID: %s", *volumeID)
   363  	}
   364  }
   365  
   366  func deployStatefulSet(statefulsetinfo statefulSetInfo, volClaimTemp corev1.PersistentVolumeClaim, podTemplate corev1.PodTemplateSpec, k8sclient crclient.Client) {
   367  	shared.Byf("Deploying Statefulset with name: %s under namespace: %s", statefulsetinfo.name, statefulsetinfo.namespace)
   368  	statefulset := appsv1.StatefulSet{
   369  		ObjectMeta: metav1.ObjectMeta{Name: statefulsetinfo.name, Namespace: statefulsetinfo.namespace},
   370  		Spec: appsv1.StatefulSetSpec{
   371  			ServiceName:          statefulsetinfo.svcName,
   372  			Replicas:             &statefulsetinfo.replicas,
   373  			Selector:             &metav1.LabelSelector{MatchLabels: statefulsetinfo.selector},
   374  			Template:             podTemplate,
   375  			VolumeClaimTemplates: []corev1.PersistentVolumeClaim{volClaimTemp},
   376  		},
   377  	}
   378  	err := k8sclient.Create(context.TODO(), &statefulset)
   379  	Expect(err).NotTo(HaveOccurred())
   380  }
   381  
   382  func getEvents(namespace string) *corev1.EventList {
   383  	eventsList := &corev1.EventList{}
   384  	if err := e2eCtx.Environment.BootstrapClusterProxy.GetClient().List(context.TODO(), eventsList, crclient.InNamespace(namespace), crclient.MatchingLabels{}); err != nil {
   385  		fmt.Fprintf(ginkgo.GinkgoWriter, "Got error while fetching events of namespace: %s, %s \n", namespace, err.Error())
   386  	}
   387  
   388  	return eventsList
   389  }
   390  
   391  func getSubnetID(filterKey, filterValue, clusterName string) *string {
   392  	var subnetOutput *ec2.DescribeSubnetsOutput
   393  	var err error
   394  
   395  	ec2Client := ec2.New(e2eCtx.AWSSession)
   396  	subnetInput := &ec2.DescribeSubnetsInput{
   397  		Filters: []*ec2.Filter{
   398  			{
   399  				Name: aws.String(filterKey),
   400  				Values: []*string{
   401  					aws.String(filterValue),
   402  				},
   403  			},
   404  			{
   405  				Name:   aws.String("tag-key"),
   406  				Values: aws.StringSlice([]string{"sigs.k8s.io/cluster-api-provider-aws/cluster/" + clusterName}),
   407  			},
   408  		},
   409  	}
   410  
   411  	Eventually(func() int {
   412  		subnetOutput, err = ec2Client.DescribeSubnets(subnetInput)
   413  		Expect(err).NotTo(HaveOccurred())
   414  		return len(subnetOutput.Subnets)
   415  	}, e2eCtx.E2EConfig.GetIntervals("", "wait-infra-subnets")...).Should(Equal(1))
   416  
   417  	return subnetOutput.Subnets[0].SubnetId
   418  }
   419  
   420  func getVolumeIds(info statefulSetInfo, k8sclient crclient.Client) []*string {
   421  	ginkgo.By("Retrieving IDs of dynamically provisioned volumes.")
   422  	statefulset := &appsv1.StatefulSet{}
   423  	err := k8sclient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: info.namespace, Name: info.name}, statefulset)
   424  	Expect(err).NotTo(HaveOccurred())
   425  	podSelector, err := metav1.LabelSelectorAsMap(statefulset.Spec.Selector)
   426  	Expect(err).NotTo(HaveOccurred())
   427  	pvcList := &corev1.PersistentVolumeClaimList{}
   428  	err = k8sclient.List(context.TODO(), pvcList, crclient.InNamespace(info.namespace), crclient.MatchingLabels(podSelector))
   429  	Expect(err).NotTo(HaveOccurred())
   430  	volIDs := make([]*string, len(pvcList.Items))
   431  	for i, pvc := range pvcList.Items {
   432  		volName := pvc.Spec.VolumeName
   433  		volDescription := &corev1.PersistentVolume{}
   434  		err = k8sclient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: info.namespace, Name: volName}, volDescription)
   435  		Expect(err).NotTo(HaveOccurred())
   436  
   437  		url := ""
   438  		// Out-of-tree ebs CSI use .Spec.PersistentVolumeSource.CSI path
   439  		// In-tree ebs CSI use .Spec.PersistentVolumeSource.AWSElasticBlockStore path
   440  		if volDescription.Spec.PersistentVolumeSource.CSI != nil {
   441  			url = volDescription.Spec.PersistentVolumeSource.CSI.VolumeHandle
   442  		} else if volDescription.Spec.PersistentVolumeSource.AWSElasticBlockStore != nil {
   443  			str := strings.Split(volDescription.Spec.PersistentVolumeSource.AWSElasticBlockStore.VolumeID, "vol-")
   444  			url = "vol-" + str[1]
   445  		}
   446  		volIDs[i] = &url
   447  	}
   448  	return volIDs
   449  }
   450  
   451  func isErrorEventExists(namespace, machineDeploymentName, eventReason, errorMsg string, eList *corev1.EventList) bool {
   452  	k8sClient := e2eCtx.Environment.BootstrapClusterProxy.GetClient()
   453  	machineDeployment := &clusterv1.MachineDeployment{}
   454  	if err := k8sClient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: namespace, Name: machineDeploymentName}, machineDeployment); err != nil {
   455  		fmt.Fprintf(ginkgo.GinkgoWriter, "Got error while getting machinedeployment %s \n", machineDeploymentName)
   456  		return false
   457  	}
   458  
   459  	selector, err := metav1.LabelSelectorAsMap(&machineDeployment.Spec.Selector)
   460  	if err != nil {
   461  		fmt.Fprintf(ginkgo.GinkgoWriter, "Got error while reading lables of machinedeployment: %s, %s \n", machineDeploymentName, err.Error())
   462  		return false
   463  	}
   464  
   465  	awsMachineList := &infrav1.AWSMachineList{}
   466  	if err := k8sClient.List(context.TODO(), awsMachineList, crclient.InNamespace(namespace), crclient.MatchingLabels(selector)); err != nil {
   467  		fmt.Fprintf(ginkgo.GinkgoWriter, "Got error while getting awsmachines of machinedeployment: %s, %s \n", machineDeploymentName, err.Error())
   468  		return false
   469  	}
   470  
   471  	eventMachinesCnt := 0
   472  	for _, awsMachine := range awsMachineList.Items {
   473  		for _, event := range eList.Items {
   474  			if strings.Contains(event.Name, awsMachine.Name) && event.Reason == eventReason && strings.Contains(event.Message, errorMsg) {
   475  				eventMachinesCnt++
   476  				break
   477  			}
   478  		}
   479  	}
   480  	return len(awsMachineList.Items) == eventMachinesCnt
   481  }
   482  
   483  func getAWSMachinesForDeployment(namespace string, machineDeployment clusterv1.MachineDeployment) *infrav1.AWSMachineList {
   484  	k8sClient := e2eCtx.Environment.BootstrapClusterProxy.GetClient()
   485  	selector, err := metav1.LabelSelectorAsMap(&machineDeployment.Spec.Selector)
   486  	Expect(err).NotTo(HaveOccurred())
   487  	awsMachineList := &infrav1.AWSMachineList{}
   488  	Expect(k8sClient.List(context.TODO(), awsMachineList, crclient.InNamespace(namespace), crclient.MatchingLabels(selector))).NotTo(HaveOccurred())
   489  	return awsMachineList
   490  }
   491  
   492  func makeAWSMachineTemplate(namespace, name, instanceType string, az, subnetID *string) *infrav1.AWSMachineTemplate {
   493  	awsMachine := &infrav1.AWSMachineTemplate{
   494  		ObjectMeta: metav1.ObjectMeta{
   495  			Name:      name,
   496  			Namespace: namespace,
   497  		},
   498  		Spec: infrav1.AWSMachineTemplateSpec{
   499  			Template: infrav1.AWSMachineTemplateResource{
   500  				Spec: infrav1.AWSMachineSpec{
   501  					InstanceType:       instanceType,
   502  					IAMInstanceProfile: "nodes.cluster-api-provider-aws.sigs.k8s.io",
   503  					SSHKeyName:         pointer.StringPtr(os.Getenv("AWS_SSH_KEY_NAME")),
   504  				},
   505  			},
   506  		},
   507  	}
   508  	if az != nil {
   509  		awsMachine.Spec.Template.Spec.FailureDomain = az
   510  	}
   511  
   512  	if subnetID != nil {
   513  		resRef := &infrav1.AWSResourceReference{
   514  			ID: subnetID,
   515  		}
   516  		awsMachine.Spec.Template.Spec.Subnet = resRef
   517  	}
   518  
   519  	return awsMachine
   520  }
   521  
   522  func makeJoinBootstrapConfigTemplate(namespace, name string) *bootstrapv1.KubeadmConfigTemplate {
   523  	return &bootstrapv1.KubeadmConfigTemplate{
   524  		ObjectMeta: metav1.ObjectMeta{
   525  			Name:      name,
   526  			Namespace: namespace,
   527  		},
   528  		Spec: bootstrapv1.KubeadmConfigTemplateSpec{
   529  			Template: bootstrapv1.KubeadmConfigTemplateResource{
   530  				Spec: bootstrapv1.KubeadmConfigSpec{
   531  					JoinConfiguration: &bootstrapv1.JoinConfiguration{
   532  						NodeRegistration: bootstrapv1.NodeRegistrationOptions{
   533  							Name:             "{{ ds.meta_data.local_hostname }}",
   534  							KubeletExtraArgs: map[string]string{"cloud-provider": "aws"},
   535  						},
   536  					},
   537  				},
   538  			},
   539  		},
   540  	}
   541  }
   542  
   543  func makeMachineDeployment(namespace, mdName, clusterName string, replicas int32) *clusterv1.MachineDeployment {
   544  	return &clusterv1.MachineDeployment{
   545  		ObjectMeta: metav1.ObjectMeta{
   546  			Name:      mdName,
   547  			Namespace: namespace,
   548  			Labels: map[string]string{
   549  				"cluster.x-k8s.io/cluster-name": clusterName,
   550  				"nodepool":                      mdName,
   551  			},
   552  		},
   553  		Spec: clusterv1.MachineDeploymentSpec{
   554  			Replicas: &replicas,
   555  			Selector: metav1.LabelSelector{
   556  				MatchLabels: map[string]string{
   557  					"cluster.x-k8s.io/cluster-name": clusterName,
   558  					"nodepool":                      mdName,
   559  				},
   560  			},
   561  			ClusterName: clusterName,
   562  			Template: clusterv1.MachineTemplateSpec{
   563  				ObjectMeta: clusterv1.ObjectMeta{
   564  					Labels: map[string]string{
   565  						"cluster.x-k8s.io/cluster-name": clusterName,
   566  						"nodepool":                      mdName,
   567  					},
   568  				},
   569  				Spec: clusterv1.MachineSpec{
   570  					ClusterName: clusterName,
   571  					Bootstrap: clusterv1.Bootstrap{
   572  						ConfigRef: &corev1.ObjectReference{
   573  							Kind:       "KubeadmConfigTemplate",
   574  							APIVersion: bootstrapv1.GroupVersion.String(),
   575  							Name:       mdName,
   576  							Namespace:  namespace,
   577  						},
   578  					},
   579  					InfrastructureRef: corev1.ObjectReference{
   580  						Kind:       "AWSMachineTemplate",
   581  						APIVersion: infrav1.GroupVersion.String(),
   582  						Name:       mdName,
   583  						Namespace:  namespace,
   584  					},
   585  					Version: pointer.StringPtr(e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion)),
   586  				},
   587  			},
   588  		},
   589  	}
   590  }
   591  
   592  func assertSpotInstanceType(instanceID string) {
   593  	shared.Byf("Finding EC2 spot instance with ID: %s", instanceID)
   594  	ec2Client := ec2.New(e2eCtx.AWSSession)
   595  	input := &ec2.DescribeInstancesInput{
   596  		InstanceIds: []*string{
   597  			aws.String(instanceID[strings.LastIndex(instanceID, "/")+1:]),
   598  		},
   599  		Filters: []*ec2.Filter{{Name: aws.String("instance-lifecycle"), Values: aws.StringSlice([]string{"spot"})}},
   600  	}
   601  
   602  	result, err := ec2Client.DescribeInstances(input)
   603  	Expect(err).To(BeNil())
   604  	Expect(len(result.Reservations)).To(Equal(1))
   605  	Expect(len(result.Reservations[0].Instances)).To(Equal(1))
   606  }
   607  
   608  func terminateInstance(instanceID string) {
   609  	shared.Byf("Terminating EC2 instance with ID: %s", instanceID)
   610  	ec2Client := ec2.New(e2eCtx.AWSSession)
   611  	input := &ec2.TerminateInstancesInput{
   612  		InstanceIds: []*string{
   613  			aws.String(instanceID[strings.LastIndex(instanceID, "/")+1:]),
   614  		},
   615  	}
   616  
   617  	result, err := ec2Client.TerminateInstances(input)
   618  	Expect(err).To(BeNil())
   619  	Expect(len(result.TerminatingInstances)).To(Equal(1))
   620  	termCode := int64(32)
   621  	Expect(*result.TerminatingInstances[0].CurrentState.Code).To(Equal(termCode))
   622  }
   623  
   624  func verifyElbExists(elbName string, exists bool) {
   625  	shared.Byf("Verifying ELB with name %s present", elbName)
   626  	elbClient := elb.New(e2eCtx.AWSSession)
   627  	input := &elb.DescribeLoadBalancersInput{
   628  		LoadBalancerNames: []*string{
   629  			aws.String(elbName),
   630  		},
   631  	}
   632  	elbsOutput, err := elbClient.DescribeLoadBalancers(input)
   633  	if exists {
   634  		Expect(err).NotTo(HaveOccurred())
   635  		Expect(len(elbsOutput.LoadBalancerDescriptions)).To(Equal(1))
   636  		shared.Byf("ELB with name %s exists", elbName)
   637  	} else {
   638  		aerr, ok := err.(awserr.Error)
   639  		Expect(ok).To(BeTrue())
   640  		Expect(aerr.Code()).To(Equal(elb.ErrCodeAccessPointNotFoundException))
   641  		shared.Byf("ELB with name %s doesn't exists", elbName)
   642  	}
   643  }
   644  
   645  func verifyVolumesExists(awsVolumeIds []*string) {
   646  	ginkgo.By("Ensuring dynamically provisioned volumes exists")
   647  	ec2Client := ec2.New(e2eCtx.AWSSession)
   648  	input := &ec2.DescribeVolumesInput{
   649  		VolumeIds: awsVolumeIds,
   650  	}
   651  	_, err := ec2Client.DescribeVolumes(input)
   652  	Expect(err).NotTo(HaveOccurred())
   653  }
   654  
   655  func waitForStatefulSetRunning(info statefulSetInfo, k8sclient crclient.Client) {
   656  	shared.Byf("Ensuring Statefulset(%s) is running", info.name)
   657  	statefulset := &appsv1.StatefulSet{}
   658  	Eventually(
   659  		func() (bool, error) {
   660  			if err := k8sclient.Get(context.TODO(), apimachinerytypes.NamespacedName{Namespace: info.namespace, Name: info.name}, statefulset); err != nil {
   661  				return false, err
   662  			}
   663  			return *statefulset.Spec.Replicas == statefulset.Status.ReadyReplicas, nil
   664  		}, 10*time.Minute, 30*time.Second,
   665  	).Should(BeTrue())
   666  }
   667  
   668  // LatestCIReleaseForVersion returns the latest ci release of a specific version.
   669  func LatestCIReleaseForVersion(searchVersion string) (string, error) {
   670  	ciVersionURL := "https://dl.k8s.io/ci/latest-%d.%d.txt"
   671  	tagPrefix := "v"
   672  	searchSemVer, err := semver.Make(strings.TrimPrefix(searchVersion, tagPrefix))
   673  	if err != nil {
   674  		return "", err
   675  	}
   676  	searchSemVer.Minor++
   677  	resp, err := http.Get(fmt.Sprintf(ciVersionURL, searchSemVer.Major, searchSemVer.Minor))
   678  	if err != nil {
   679  		return "", err
   680  	}
   681  	defer resp.Body.Close()
   682  	b, err := io.ReadAll(resp.Body)
   683  	if err != nil {
   684  		return "", err
   685  	}
   686  
   687  	return strings.TrimSpace(string(b)), nil
   688  }
   689  
   690  type conditionAssertion struct {
   691  	conditionType clusterv1.ConditionType
   692  	status        corev1.ConditionStatus
   693  	severity      clusterv1.ConditionSeverity
   694  	reason        string
   695  }
   696  
   697  func expectAWSClusterConditions(m *infrav1.AWSCluster, expected []conditionAssertion) {
   698  	Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected)), "number of conditions")
   699  	for _, c := range expected {
   700  		actual := conditions.Get(m, c.conditionType)
   701  		Expect(actual).To(Not(BeNil()))
   702  		Expect(actual.Type).To(Equal(c.conditionType))
   703  		Expect(actual.Status).To(Equal(c.status))
   704  		Expect(actual.Severity).To(Equal(c.severity))
   705  		Expect(actual.Reason).To(Equal(c.reason))
   706  	}
   707  }
   708  
   709  func createEFS() *efs.FileSystemDescription {
   710  	efs, err := shared.CreateEFS(e2eCtx, string(uuid.NewUUID()))
   711  	Expect(err).NotTo(HaveOccurred())
   712  	Eventually(func() (string, error) {
   713  		state, err := shared.GetEFSState(e2eCtx, aws.StringValue(efs.FileSystemId))
   714  		return aws.StringValue(state), err
   715  	}, 2*time.Minute, 5*time.Second).Should(Equal("available"))
   716  	return efs
   717  }
   718  
   719  func createSecurityGroupForEFS(clusterName string, vpc *ec2.Vpc) *ec2.CreateSecurityGroupOutput {
   720  	securityGroup, err := shared.CreateSecurityGroup(e2eCtx, clusterName+"-efs-sg", "security group for EFS Access", *(vpc.VpcId))
   721  	Expect(err).NotTo(HaveOccurred())
   722  	nameFilter := &ec2.Filter{
   723  		Name:   aws.String("tag:Name"),
   724  		Values: aws.StringSlice([]string{clusterName + "-node"}),
   725  	}
   726  	nodeSecurityGroups, err := shared.GetSecurityGroupByFilters(e2eCtx, []*ec2.Filter{
   727  		nameFilter,
   728  	})
   729  	Expect(err).NotTo(HaveOccurred())
   730  	Expect(len(nodeSecurityGroups)).To(Equal(1))
   731  	_, err = shared.CreateSecurityGroupIngressRuleWithSourceSG(e2eCtx, aws.StringValue(securityGroup.GroupId), "tcp", 2049, aws.StringValue(nodeSecurityGroups[0].GroupId))
   732  	Expect(err).NotTo(HaveOccurred())
   733  	return securityGroup
   734  }
   735  
   736  func createMountTarget(efs *efs.FileSystemDescription, securityGroup *ec2.CreateSecurityGroupOutput, vpc *ec2.Vpc) *efs.MountTargetDescription {
   737  	mt, err := shared.CreateMountTargetOnEFS(e2eCtx, aws.StringValue(efs.FileSystemId), aws.StringValue(vpc.VpcId), aws.StringValue(securityGroup.GroupId))
   738  	Expect(err).NotTo(HaveOccurred())
   739  	Eventually(func() (string, error) {
   740  		state, err := shared.GetMountTargetState(e2eCtx, *mt.MountTargetId)
   741  		return aws.StringValue(state), err
   742  	}, 5*time.Minute, 10*time.Second).Should(Equal("available"))
   743  	return mt
   744  }
   745  
   746  func deleteMountTarget(mountTarget *efs.MountTargetDescription) {
   747  	_, err := shared.DeleteMountTarget(e2eCtx, *mountTarget.MountTargetId)
   748  	Expect(err).NotTo(HaveOccurred())
   749  	Eventually(func(g Gomega) {
   750  		_, err = shared.GetMountTarget(e2eCtx, *mountTarget.MountTargetId)
   751  		g.Expect(err).ShouldNot(Equal(nil))
   752  		aerr, ok := err.(awserr.Error)
   753  		g.Expect(ok).To(BeTrue())
   754  		g.Expect(aerr.Code()).To(Equal(efs.ErrCodeMountTargetNotFound))
   755  	}, 5*time.Minute, 10*time.Second).Should(Succeed())
   756  }
   757  
   758  // example taken from aws-efs-csi-driver (https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/dynamic_provisioning/specs/storageclass.yaml)
   759  func createEFSStorageClass(storageClassName string, clusterClient crclient.Client, efs *efs.FileSystemDescription) {
   760  	storageClass := &storagev1.StorageClass{
   761  		TypeMeta: metav1.TypeMeta{
   762  			APIVersion: "storage.k8s.io/v1",
   763  			Kind:       "StorageClass",
   764  		},
   765  		ObjectMeta: metav1.ObjectMeta{
   766  			Name: storageClassName,
   767  		},
   768  		MountOptions: []string{"tls"},
   769  		Parameters: map[string]string{
   770  			"provisioningMode": "efs-ap",
   771  			"fileSystemId":     aws.StringValue(efs.FileSystemId),
   772  			"directoryPerms":   "700",
   773  			"gidRangeStart":    "1000",
   774  			"gidRangeEnd":      "2000",
   775  		},
   776  		Provisioner: "efs.csi.aws.com",
   777  	}
   778  	Expect(clusterClient.Create(context.TODO(), storageClass)).NotTo(HaveOccurred())
   779  }
   780  
   781  // example taken from aws-efs-csi-driver (https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/dynamic_provisioning/specs/pod.yaml)
   782  func createPVCForEFS(storageClassName string, clusterClient crclient.Client) {
   783  	pvc := &corev1.PersistentVolumeClaim{
   784  		TypeMeta: metav1.TypeMeta{
   785  			APIVersion: "v1",
   786  			Kind:       "PersistentVolumeClaim",
   787  		},
   788  		ObjectMeta: metav1.ObjectMeta{
   789  			Name:      "efs-claim",
   790  			Namespace: metav1.NamespaceDefault,
   791  		},
   792  		Spec: corev1.PersistentVolumeClaimSpec{
   793  			AccessModes: []corev1.PersistentVolumeAccessMode{
   794  				corev1.ReadWriteMany,
   795  			},
   796  			StorageClassName: &storageClassName,
   797  			Resources: corev1.ResourceRequirements{
   798  				Requests: map[corev1.ResourceName]resource.Quantity{
   799  					corev1.ResourceStorage: *resource.NewQuantity(5*1024*1024*1024, resource.BinarySI),
   800  				},
   801  			},
   802  		},
   803  	}
   804  	Expect(clusterClient.Create(context.TODO(), pvc)).NotTo(HaveOccurred())
   805  }
   806  
   807  // example taken from aws-efs-csi-driver (https://github.com/kubernetes-sigs/aws-efs-csi-driver/blob/master/examples/kubernetes/dynamic_provisioning/specs/pod.yaml)
   808  func createPodWithEFSMount(clusterClient crclient.Client) {
   809  	pod := &corev1.Pod{
   810  		TypeMeta: metav1.TypeMeta{
   811  			APIVersion: "v1",
   812  			Kind:       "Pod",
   813  		},
   814  		ObjectMeta: metav1.ObjectMeta{
   815  			Name:      "efs-app",
   816  			Namespace: metav1.NamespaceDefault,
   817  		},
   818  		Spec: corev1.PodSpec{
   819  			Containers: []corev1.Container{
   820  				{
   821  					Name:    "app",
   822  					Image:   "centos",
   823  					Command: []string{"/bin/sh"},
   824  					Args:    []string{"-c", "while true; do echo $(date -u) >> /data/out; sleep 5; done"},
   825  					VolumeMounts: []corev1.VolumeMount{
   826  						{
   827  							Name:      "persistent-storage",
   828  							MountPath: "/data",
   829  						},
   830  					},
   831  				},
   832  			},
   833  			Volumes: []corev1.Volume{
   834  				{
   835  					Name: "persistent-storage",
   836  					VolumeSource: corev1.VolumeSource{
   837  						PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
   838  							ClaimName: "efs-claim",
   839  						},
   840  					},
   841  				},
   842  			},
   843  		},
   844  	}
   845  	Expect(clusterClient.Create(context.TODO(), pod)).NotTo(HaveOccurred())
   846  }