sigs.k8s.io/cluster-api-provider-aws@v1.5.5/test/e2e/suites/unmanaged/unmanaged_functional_test.go (about)

     1  //go:build e2e
     2  // +build e2e
     3  
     4  /*
     5  Copyright 2020 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11  	http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package unmanaged
    21  
    22  import (
    23  	"context"
    24  	"fmt"
    25  	"os"
    26  	"path/filepath"
    27  	"strings"
    28  	"time"
    29  
    30  	"github.com/aws/aws-sdk-go/service/ec2"
    31  	"github.com/blang/semver"
    32  	"github.com/gofrs/flock"
    33  	"github.com/onsi/ginkgo"
    34  	"github.com/onsi/ginkgo/config"
    35  	. "github.com/onsi/gomega"
    36  	corev1 "k8s.io/api/core/v1"
    37  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    38  	"k8s.io/utils/pointer"
    39  	"sigs.k8s.io/controller-runtime/pkg/client"
    40  
    41  	infrav1 "sigs.k8s.io/cluster-api-provider-aws/api/v1beta1"
    42  	"sigs.k8s.io/cluster-api-provider-aws/exp/instancestate"
    43  	"sigs.k8s.io/cluster-api-provider-aws/test/e2e/shared"
    44  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    45  	"sigs.k8s.io/cluster-api/test/framework"
    46  	"sigs.k8s.io/cluster-api/test/framework/clusterctl"
    47  	"sigs.k8s.io/cluster-api/util"
    48  )
    49  
    50  var _ = ginkgo.Context("[unmanaged] [functional]", func() {
    51  	var (
    52  		ctx               context.Context
    53  		result            *clusterctl.ApplyClusterTemplateAndWaitResult
    54  		requiredResources *shared.TestResource
    55  	)
    56  
    57  	ginkgo.BeforeEach(func() {
    58  		ctx = context.TODO()
    59  		result = &clusterctl.ApplyClusterTemplateAndWaitResult{}
    60  	})
    61  
    62  	ginkgo.Describe("Workload cluster with EFS driver", func() {
    63  		ginkgo.It("should pass dynamic provisioning test", func() {
    64  			specName := "functional-efs-support"
    65  			requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1}
    66  			requiredResources.WriteRequestedResources(e2eCtx, "efs-support-test")
    67  
    68  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
    69  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
    70  
    71  			Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
    72  			Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion))
    73  			shared.CreateAWSClusterControllerIdentity(e2eCtx.Environment.BootstrapClusterProxy.GetClient())
    74  
    75  			clusterName := fmt.Sprintf("cluster-%s", util.RandomString(6))
    76  			namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
    77  			configCluster := defaultConfigCluster(clusterName, namespace.Name)
    78  			configCluster.Flavor = shared.EFSSupport
    79  			configCluster.ControlPlaneMachineCount = pointer.Int64Ptr(1)
    80  			configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
    81  			cluster, _, _ := createCluster(ctx, configCluster, result)
    82  			defer deleteCluster(ctx, cluster)
    83  			clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterName).GetClient()
    84  
    85  			ginkgo.By("Setting up EFS in AWS")
    86  			efs := createEFS()
    87  			defer shared.DeleteEFS(e2eCtx, *efs.FileSystemId)
    88  			vpc, err := shared.GetVPCByName(e2eCtx, clusterName+"-vpc")
    89  			Expect(err).NotTo(HaveOccurred())
    90  			securityGroup := createSecurityGroupForEFS(clusterName, vpc)
    91  			defer shared.DeleteSecurityGroup(e2eCtx, *securityGroup.GroupId)
    92  			mountTarget := createMountTarget(efs, securityGroup, vpc)
    93  			defer deleteMountTarget(mountTarget)
    94  
    95  			// running efs dynamic provisioning example (https://github.com/kubernetes-sigs/aws-efs-csi-driver/tree/master/examples/kubernetes/dynamic_provisioning)
    96  			ginkgo.By("Deploying efs dynamic provisioning resources")
    97  			storageClassName := "efs-sc"
    98  			createEFSStorageClass(storageClassName, clusterClient, efs)
    99  			createPVCForEFS(storageClassName, clusterClient)
   100  			createPodWithEFSMount(clusterClient)
   101  
   102  			ginkgo.By("Waiting for pod to be in running state")
   103  			// verifying if pod is running
   104  			framework.WaitForPodListCondition(ctx, framework.WaitForPodListConditionInput{
   105  				Lister: clusterClient,
   106  				ListOptions: &client.ListOptions{
   107  					Namespace: metav1.NamespaceDefault,
   108  				},
   109  				Condition: framework.PhasePodCondition(corev1.PodRunning),
   110  			})
   111  			ginkgo.By("PASSED!")
   112  		})
   113  	})
   114  
   115  	ginkgo.Describe("GPU-enabled cluster test", func() {
   116  		ginkgo.It("should create cluster with single worker", func() {
   117  			specName := "functional-gpu-cluster"
   118  			// Change the multiplier for EC2GPU if GPU type is changed. g4dn.xlarge uses 2 vCPU
   119  			requiredResources = &shared.TestResource{EC2GPU: 2 * 2, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1}
   120  			requiredResources.WriteRequestedResources(e2eCtx, "gpu-test")
   121  			namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   122  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   123  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   124  
   125  			ginkgo.By("Creating cluster with a single worker")
   126  			clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   127  
   128  			clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
   129  				ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
   130  				ConfigCluster: clusterctl.ConfigClusterInput{
   131  					LogFolder:                filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", e2eCtx.Environment.BootstrapClusterProxy.GetName()),
   132  					ClusterctlConfigPath:     e2eCtx.Environment.ClusterctlConfigPath,
   133  					KubeconfigPath:           e2eCtx.Environment.BootstrapClusterProxy.GetKubeconfigPath(),
   134  					InfrastructureProvider:   clusterctl.DefaultInfrastructureProvider,
   135  					Flavor:                   shared.GPUFlavor,
   136  					Namespace:                namespace.Name,
   137  					ClusterName:              clusterName,
   138  					KubernetesVersion:        e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion),
   139  					ControlPlaneMachineCount: pointer.Int64Ptr(1),
   140  					WorkerMachineCount:       pointer.Int64Ptr(1),
   141  				},
   142  				WaitForClusterIntervals:      e2eCtx.E2EConfig.GetIntervals(specName, "wait-cluster"),
   143  				WaitForControlPlaneIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-control-plane"),
   144  				WaitForMachineDeployments:    e2eCtx.E2EConfig.GetIntervals(specName, "wait-worker-nodes"),
   145  				// nvidia-gpu flavor creates a config map as part of a crs, that exceeds the annotations size limit when we do kubectl apply.
   146  				// This is because the entire config map is stored in `last-applied` annotation for tracking.
   147  				// The workaround is to use server side apply by passing `--server-side` flag to kubectl apply.
   148  				// More on server side apply here: https://kubernetes.io/docs/reference/using-api/server-side-apply/
   149  				Args: []string{"--server-side"},
   150  			}, result)
   151  
   152  			shared.AWSGPUSpec(ctx, e2eCtx, shared.AWSGPUSpecInput{
   153  				BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
   154  				NamespaceName:         namespace.Name,
   155  				ClusterName:           clusterName,
   156  				SkipCleanup:           false,
   157  			})
   158  			ginkgo.By("PASSED!")
   159  		})
   160  	})
   161  
   162  	ginkgo.Describe("Multitenancy test", func() {
   163  		ginkgo.It("should create cluster with nested assumed role", func() {
   164  			// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
   165  			specName := "functional-multitenancy-nested"
   166  			requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1}
   167  			requiredResources.WriteRequestedResources(e2eCtx, specName)
   168  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   169  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   170  			namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   171  			defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   172  			Expect(shared.SetMultitenancyEnvVars(e2eCtx.AWSSession)).To(Succeed())
   173  			ginkgo.By("Creating cluster")
   174  			clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   175  			clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
   176  				ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy,
   177  				ConfigCluster: clusterctl.ConfigClusterInput{
   178  					LogFolder:                filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", e2eCtx.Environment.BootstrapClusterProxy.GetName()),
   179  					ClusterctlConfigPath:     e2eCtx.Environment.ClusterctlConfigPath,
   180  					KubeconfigPath:           e2eCtx.Environment.BootstrapClusterProxy.GetKubeconfigPath(),
   181  					InfrastructureProvider:   clusterctl.DefaultInfrastructureProvider,
   182  					Flavor:                   shared.NestedMultitenancyFlavor,
   183  					Namespace:                namespace.Name,
   184  					ClusterName:              clusterName,
   185  					KubernetesVersion:        e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion),
   186  					ControlPlaneMachineCount: pointer.Int64Ptr(1),
   187  					WorkerMachineCount:       pointer.Int64Ptr(0),
   188  				},
   189  				WaitForClusterIntervals:      e2eCtx.E2EConfig.GetIntervals(specName, "wait-cluster"),
   190  				WaitForControlPlaneIntervals: e2eCtx.E2EConfig.GetIntervals(specName, "wait-control-plane"),
   191  			}, result)
   192  
   193  			// Check if bastion host is up and running
   194  			awsCluster, err := GetAWSClusterByName(ctx, namespace.Name, clusterName)
   195  			Expect(err).To(BeNil())
   196  			Expect(awsCluster.Status.Bastion.State).To(Equal(infrav1.InstanceStateRunning))
   197  			expectAWSClusterConditions(awsCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, corev1.ConditionTrue, "", ""}})
   198  			ginkgo.By("PASSED!")
   199  		})
   200  	})
   201  
   202  	// // TODO: @sedefsavas: Requires env var logic to be removed
   203  	ginkgo.PDescribe("[Serial] Upgrade to main branch Kubernetes", func() {
   204  		ginkgo.Context("in same namespace", func() {
   205  			ginkgo.It("should create the clusters", func() {
   206  				specName := "upgrade-to-main-branch-k8s"
   207  				requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 3, VPC: 1, ClassicLB: 1, EIP: 3}
   208  				requiredResources.WriteRequestedResources(e2eCtx, "upgrade-to-master-test")
   209  				Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   210  				defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   211  				namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   212  				defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   213  				ginkgo.By("Creating first cluster with single control plane")
   214  				cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   215  				shared.SetEnvVar("USE_CI_ARTIFACTS", "true", false)
   216  				tagPrefix := "v"
   217  				searchSemVer, err := semver.Make(strings.TrimPrefix(e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion), tagPrefix))
   218  				Expect(err).NotTo(HaveOccurred())
   219  
   220  				shared.SetEnvVar(shared.KubernetesVersion, "v"+searchSemVer.String(), false)
   221  				configCluster := defaultConfigCluster(cluster1Name, namespace.Name)
   222  
   223  				configCluster.Flavor = shared.UpgradeToMain
   224  				configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   225  				createCluster(ctx, configCluster, result)
   226  
   227  				kubernetesUgradeVersion, err := LatestCIReleaseForVersion("v" + searchSemVer.String())
   228  				Expect(err).NotTo(HaveOccurred())
   229  				configCluster.KubernetesVersion = kubernetesUgradeVersion
   230  				configCluster.Flavor = "upgrade-ci-artifacts"
   231  				cluster2, md, kcp := createCluster(ctx, configCluster, result)
   232  
   233  				ginkgo.By(fmt.Sprintf("Waiting for Kubernetes versions of machines in MachineDeployment %s/%s to be upgraded from %s to %s",
   234  					md[0].Namespace, md[0].Name, e2eCtx.E2EConfig.GetVariable(shared.KubernetesVersion), kubernetesUgradeVersion))
   235  
   236  				framework.WaitForMachineDeploymentMachinesToBeUpgraded(ctx, framework.WaitForMachineDeploymentMachinesToBeUpgradedInput{
   237  					Lister:                   e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   238  					Cluster:                  cluster2,
   239  					MachineCount:             int(*md[0].Spec.Replicas),
   240  					KubernetesUpgradeVersion: kubernetesUgradeVersion,
   241  					MachineDeployment:        *md[0],
   242  				}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-machine-upgrade")...)
   243  
   244  				ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version")
   245  				framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{
   246  					Lister:                   e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   247  					Cluster:                  cluster2,
   248  					MachineCount:             int(*kcp.Spec.Replicas),
   249  					KubernetesUpgradeVersion: kubernetesUgradeVersion,
   250  				}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-machine-upgrade")...)
   251  
   252  				ginkgo.By("Deleting the Clusters")
   253  				shared.SetEnvVar("USE_CI_ARTIFACTS", "false", false)
   254  				deleteCluster(ctx, cluster2)
   255  			})
   256  		})
   257  	})
   258  
   259  	ginkgo.Describe("CSI=in-tree CCM=in-tree AWSCSIMigration=off: upgrade to v1.23", func() {
   260  		ginkgo.It("should create volumes dynamically with external cloud provider", func() {
   261  			specName := "csimigration-off-upgrade"
   262  			requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4}
   263  			requiredResources.WriteRequestedResources(e2eCtx, specName)
   264  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   265  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   266  			namespace := shared.SetupNamespace(ctx, specName, e2eCtx)
   267  			defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   268  
   269  			ginkgo.By("Creating first cluster with single control plane")
   270  			cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   271  			configCluster := defaultConfigCluster(cluster1Name, namespace.Name)
   272  			configCluster.KubernetesVersion = e2eCtx.E2EConfig.GetVariable(shared.PreCSIKubernetesVer)
   273  			configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   274  			createCluster(ctx, configCluster, result)
   275  
   276  			// Create statefulSet with PVC and confirm it is working with in-tree providers
   277  			nginxStatefulsetInfo := createStatefulSetInfo(true, "intree")
   278  
   279  			ginkgo.By("Deploying StatefulSet on infra")
   280  			clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient()
   281  
   282  			createStatefulSet(nginxStatefulsetInfo, clusterClient)
   283  			awsVolIds := getVolumeIds(nginxStatefulsetInfo, clusterClient)
   284  			verifyVolumesExists(awsVolIds)
   285  
   286  			kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer)
   287  			configCluster.KubernetesVersion = kubernetesUgradeVersion
   288  			configCluster.Flavor = "csimigration-off"
   289  
   290  			cluster2, _, kcp := createCluster(ctx, configCluster, result)
   291  
   292  			ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version")
   293  			framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{
   294  				Lister:                   e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   295  				Cluster:                  cluster2,
   296  				MachineCount:             int(*kcp.Spec.Replicas),
   297  				KubernetesUpgradeVersion: kubernetesUgradeVersion,
   298  			}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-contolplane-upgrade")...)
   299  
   300  			ginkgo.By("Creating the LB service")
   301  			lbServiceName := "test-svc-" + util.RandomString(6)
   302  			elbName := createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
   303  			verifyElbExists(elbName, true)
   304  
   305  			ginkgo.By("Checking v1.22 StatefulSet still healthy after the upgrade")
   306  			waitForStatefulSetRunning(nginxStatefulsetInfo, clusterClient)
   307  
   308  			nginxStatefulsetInfo2 := createStatefulSetInfo(true, "postupgrade")
   309  
   310  			ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23")
   311  			createStatefulSet(nginxStatefulsetInfo2, clusterClient)
   312  			awsVolIds = getVolumeIds(nginxStatefulsetInfo2, clusterClient)
   313  			verifyVolumesExists(awsVolIds)
   314  
   315  			ginkgo.By("Deleting LB service")
   316  			deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
   317  
   318  			ginkgo.By("Deleting the Clusters")
   319  			deleteCluster(ctx, cluster2)
   320  
   321  			ginkgo.By("Deleting retained dynamically provisioned volumes")
   322  			deleteRetainedVolumes(awsVolIds)
   323  			ginkgo.By("PASSED!")
   324  		})
   325  	})
   326  
   327  	ginkgo.Describe("CSI=external CCM=in-tree AWSCSIMigration=on: upgrade to v1.23", func() {
   328  		ginkgo.It("should create volumes dynamically with external cloud provider", func() {
   329  			specName := "only-csi-external-upgrade"
   330  			requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4}
   331  			requiredResources.WriteRequestedResources(e2eCtx, specName)
   332  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   333  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   334  			namespace := shared.SetupNamespace(ctx, specName, e2eCtx)
   335  			defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   336  			ginkgo.By("Creating first cluster with single control plane")
   337  			cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   338  
   339  			configCluster := defaultConfigCluster(cluster1Name, namespace.Name)
   340  			configCluster.KubernetesVersion = e2eCtx.E2EConfig.GetVariable(shared.PreCSIKubernetesVer)
   341  			configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   342  			createCluster(ctx, configCluster, result)
   343  
   344  			// Create statefulSet with PVC and confirm it is working with in-tree providers
   345  			nginxStatefulsetInfo := createStatefulSetInfo(true, "intree")
   346  
   347  			ginkgo.By("Deploying StatefulSet on infra")
   348  			clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient()
   349  
   350  			createStatefulSet(nginxStatefulsetInfo, clusterClient)
   351  			awsVolIds := getVolumeIds(nginxStatefulsetInfo, clusterClient)
   352  			verifyVolumesExists(awsVolIds)
   353  
   354  			kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer)
   355  
   356  			configCluster.KubernetesVersion = kubernetesUgradeVersion
   357  			configCluster.Flavor = "external-csi"
   358  
   359  			cluster2, _, kcp := createCluster(ctx, configCluster, result)
   360  
   361  			ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version")
   362  			framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{
   363  				Lister:                   e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   364  				Cluster:                  cluster2,
   365  				MachineCount:             int(*kcp.Spec.Replicas),
   366  				KubernetesUpgradeVersion: kubernetesUgradeVersion,
   367  			}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-contolplane-upgrade")...)
   368  
   369  			ginkgo.By("Creating the LB service")
   370  			lbServiceName := "test-svc-" + util.RandomString(6)
   371  			elbName := createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
   372  			verifyElbExists(elbName, true)
   373  
   374  			ginkgo.By("Checking v1.22 StatefulSet still healthy after the upgrade")
   375  			waitForStatefulSetRunning(nginxStatefulsetInfo, clusterClient)
   376  
   377  			nginxStatefulsetInfo2 := createStatefulSetInfo(false, "postupgrade")
   378  
   379  			ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23")
   380  			createStatefulSet(nginxStatefulsetInfo2, clusterClient)
   381  			awsVolIds = getVolumeIds(nginxStatefulsetInfo2, clusterClient)
   382  			verifyVolumesExists(awsVolIds)
   383  
   384  			ginkgo.By("Deleting LB service")
   385  			deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
   386  
   387  			ginkgo.By("Deleting the Clusters")
   388  			deleteCluster(ctx, cluster2)
   389  
   390  			ginkgo.By("Deleting retained dynamically provisioned volumes")
   391  			deleteRetainedVolumes(awsVolIds)
   392  			ginkgo.By("PASSED!")
   393  		})
   394  	})
   395  
   396  	ginkgo.Describe("CSI=external CCM=external AWSCSIMigration=on: upgrade to v1.23", func() {
   397  		ginkgo.It("should create volumes dynamically with external cloud provider", func() {
   398  			specName := "csi-ccm-external-upgrade"
   399  			requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 1, VolumeGP2: 4}
   400  			requiredResources.WriteRequestedResources(e2eCtx, specName)
   401  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   402  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   403  			namespace := shared.SetupNamespace(ctx, specName, e2eCtx)
   404  			defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   405  
   406  			ginkgo.By("Creating first cluster with single control plane")
   407  			cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   408  			configCluster := defaultConfigCluster(cluster1Name, namespace.Name)
   409  			configCluster.KubernetesVersion = e2eCtx.E2EConfig.GetVariable(shared.PreCSIKubernetesVer)
   410  
   411  			configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   412  			createCluster(ctx, configCluster, result)
   413  
   414  			// Create statefulSet with PVC and confirm it is working with in-tree providers
   415  			nginxStatefulsetInfo := createStatefulSetInfo(true, "intree")
   416  
   417  			ginkgo.By("Deploying StatefulSet on infra")
   418  			clusterClient := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, cluster1Name).GetClient()
   419  
   420  			createStatefulSet(nginxStatefulsetInfo, clusterClient)
   421  			awsVolIds := getVolumeIds(nginxStatefulsetInfo, clusterClient)
   422  			verifyVolumesExists(awsVolIds)
   423  
   424  			kubernetesUgradeVersion := e2eCtx.E2EConfig.GetVariable(shared.PostCSIKubernetesVer)
   425  			configCluster.KubernetesVersion = kubernetesUgradeVersion
   426  			configCluster.Flavor = "external-cloud-provider"
   427  
   428  			cluster2, _, kcp := createCluster(ctx, configCluster, result)
   429  
   430  			ginkgo.By("Waiting for control-plane machines to have the upgraded kubernetes version")
   431  			framework.WaitForControlPlaneMachinesToBeUpgraded(ctx, framework.WaitForControlPlaneMachinesToBeUpgradedInput{
   432  				Lister:                   e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   433  				Cluster:                  cluster2,
   434  				MachineCount:             int(*kcp.Spec.Replicas),
   435  				KubernetesUpgradeVersion: kubernetesUgradeVersion,
   436  			}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-contolplane-upgrade")...)
   437  
   438  			ginkgo.By("Creating the LB service")
   439  			lbServiceName := "test-svc-" + util.RandomString(6)
   440  			elbName := createLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
   441  			verifyElbExists(elbName, true)
   442  
   443  			ginkgo.By("Checking v1.22 StatefulSet still healthy after the upgrade")
   444  			waitForStatefulSetRunning(nginxStatefulsetInfo, clusterClient)
   445  
   446  			nginxStatefulsetInfo2 := createStatefulSetInfo(false, "postupgrade")
   447  
   448  			ginkgo.By("Deploying StatefulSet on infra when K8s >= 1.23")
   449  			createStatefulSet(nginxStatefulsetInfo2, clusterClient)
   450  			awsVolIds = getVolumeIds(nginxStatefulsetInfo2, clusterClient)
   451  			verifyVolumesExists(awsVolIds)
   452  
   453  			ginkgo.By("Deleting LB service")
   454  			deleteLBService(metav1.NamespaceDefault, lbServiceName, clusterClient)
   455  
   456  			ginkgo.By("Deleting the Clusters")
   457  			deleteCluster(ctx, cluster2)
   458  
   459  			ginkgo.By("Deleting retained dynamically provisioned volumes")
   460  			deleteRetainedVolumes(awsVolIds)
   461  			ginkgo.By("PASSED!")
   462  		})
   463  	})
   464  
   465  	ginkgo.Describe("Workload cluster with AWS SSM Parameter as the Secret Backend", func() {
   466  		ginkgo.It("should be creatable and deletable", func() {
   467  			specName := "functional-test-ssm-parameter-store"
   468  			requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3}
   469  			requiredResources.WriteRequestedResources(e2eCtx, specName)
   470  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   471  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   472  			namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   473  			defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   474  
   475  			ginkgo.By("Creating a cluster")
   476  			clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   477  			configCluster := defaultConfigCluster(clusterName, namespace.Name)
   478  			configCluster.ControlPlaneMachineCount = pointer.Int64Ptr(1)
   479  			configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   480  			configCluster.Flavor = shared.SSMFlavor
   481  			_, md, _ := createCluster(ctx, configCluster, result)
   482  
   483  			workerMachines := framework.GetMachinesByMachineDeployments(ctx, framework.GetMachinesByMachineDeploymentsInput{
   484  				Lister:            e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   485  				ClusterName:       clusterName,
   486  				Namespace:         namespace.Name,
   487  				MachineDeployment: *md[0],
   488  			})
   489  			controlPlaneMachines := framework.GetControlPlaneMachinesByCluster(ctx, framework.GetControlPlaneMachinesByClusterInput{
   490  				Lister:      e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   491  				ClusterName: clusterName,
   492  				Namespace:   namespace.Name,
   493  			})
   494  			Expect(len(workerMachines)).To(Equal(1))
   495  			Expect(len(controlPlaneMachines)).To(Equal(1))
   496  		})
   497  	})
   498  
   499  	ginkgo.Describe("MachineDeployment misconfigurations", func() {
   500  		ginkgo.It("MachineDeployment misconfigurations", func() {
   501  			specName := "functional-test-md-misconfigurations"
   502  			requiredResources = &shared.TestResource{EC2Normal: 1 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3}
   503  			requiredResources.WriteRequestedResources(e2eCtx, specName)
   504  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   505  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   506  			namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   507  			defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   508  			ginkgo.By("Creating a cluster")
   509  			clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   510  			configCluster := defaultConfigCluster(clusterName, namespace.Name)
   511  			_, _, _ = createCluster(ctx, configCluster, result)
   512  
   513  			ginkgo.By("Creating Machine Deployment with invalid subnet ID")
   514  			md1Name := clusterName + "-md-1"
   515  			framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{
   516  				Creator:                 e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   517  				MachineDeployment:       makeMachineDeployment(namespace.Name, md1Name, clusterName, 1),
   518  				BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, md1Name),
   519  				InfraMachineTemplate:    makeAWSMachineTemplate(namespace.Name, md1Name, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), nil, pointer.StringPtr("invalid-subnet")),
   520  			})
   521  
   522  			ginkgo.By("Looking for failure event to be reported")
   523  			Eventually(func() bool {
   524  				eventList := getEvents(namespace.Name)
   525  				subnetError := "Failed to create instance: failed to run instance: InvalidSubnetID.NotFound: " +
   526  					"The subnet ID '%s' does not exist"
   527  				return isErrorEventExists(namespace.Name, md1Name, "FailedCreate", fmt.Sprintf(subnetError, "invalid-subnet"), eventList)
   528  			}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...).Should(BeTrue())
   529  
   530  			ginkgo.By("Creating Machine Deployment in non-configured Availability Zone")
   531  			md2Name := clusterName + "-md-2"
   532  			// By default, first availability zone will be used for cluster resources. This step attempts to create a machine deployment in the second availability zone
   533  			invalidAz := shared.GetAvailabilityZones(e2eCtx.AWSSession)[1].ZoneName
   534  			framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{
   535  				Creator:                 e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   536  				MachineDeployment:       makeMachineDeployment(namespace.Name, md2Name, clusterName, 1),
   537  				BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, md2Name),
   538  				InfraMachineTemplate:    makeAWSMachineTemplate(namespace.Name, md2Name, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), invalidAz, nil),
   539  			})
   540  
   541  			ginkgo.By("Looking for failure event to be reported")
   542  			Eventually(func() bool {
   543  				eventList := getEvents(namespace.Name)
   544  				azError := "Failed to create instance: no subnets available in availability zone \"%s\""
   545  				return isErrorEventExists(namespace.Name, md2Name, "FailedCreate", fmt.Sprintf(azError, *invalidAz), eventList)
   546  			}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...).Should(BeTrue())
   547  		})
   548  	})
   549  
   550  	ginkgo.Describe("Workload cluster in multiple AZs", func() {
   551  		ginkgo.It("It should be creatable and deletable", func() {
   552  			specName := "functional-test-multi-az"
   553  			requiredResources = &shared.TestResource{EC2Normal: 3 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3}
   554  			requiredResources.WriteRequestedResources(e2eCtx, specName)
   555  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   556  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   557  			namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   558  			defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   559  			ginkgo.By("Creating a cluster")
   560  			clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   561  			configCluster := defaultConfigCluster(clusterName, namespace.Name)
   562  			configCluster.ControlPlaneMachineCount = pointer.Int64Ptr(3)
   563  			configCluster.Flavor = shared.MultiAzFlavor
   564  			cluster, _, _ := createCluster(ctx, configCluster, result)
   565  
   566  			ginkgo.By("Adding worker nodes to additional subnets")
   567  			mdName1 := clusterName + "-md-1"
   568  			mdName2 := clusterName + "-md-2"
   569  			md1 := makeMachineDeployment(namespace.Name, mdName1, clusterName, 1)
   570  			md2 := makeMachineDeployment(namespace.Name, mdName2, clusterName, 1)
   571  			az1 := os.Getenv(shared.AwsAvailabilityZone1)
   572  			az2 := os.Getenv(shared.AwsAvailabilityZone2)
   573  
   574  			// private CIDRs set in cluster-template-multi-az.yaml.
   575  			framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{
   576  				Creator:                 e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   577  				MachineDeployment:       md1,
   578  				BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, mdName1),
   579  				InfraMachineTemplate:    makeAWSMachineTemplate(namespace.Name, mdName1, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), pointer.StringPtr(az1), getSubnetID("cidr-block", "10.0.0.0/24", clusterName)),
   580  			})
   581  			framework.CreateMachineDeployment(ctx, framework.CreateMachineDeploymentInput{
   582  				Creator:                 e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   583  				MachineDeployment:       md2,
   584  				BootstrapConfigTemplate: makeJoinBootstrapConfigTemplate(namespace.Name, mdName2),
   585  				InfraMachineTemplate:    makeAWSMachineTemplate(namespace.Name, mdName2, e2eCtx.E2EConfig.GetVariable(shared.AwsNodeMachineType), pointer.StringPtr(az2), getSubnetID("cidr-block", "10.0.2.0/24", clusterName)),
   586  			})
   587  
   588  			ginkgo.By("Waiting for new worker nodes to become ready")
   589  			k8sClient := e2eCtx.Environment.BootstrapClusterProxy.GetClient()
   590  			framework.WaitForMachineDeploymentNodesToExist(ctx, framework.WaitForMachineDeploymentNodesToExistInput{Lister: k8sClient, Cluster: cluster, MachineDeployment: md1}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...)
   591  			framework.WaitForMachineDeploymentNodesToExist(ctx, framework.WaitForMachineDeploymentNodesToExistInput{Lister: k8sClient, Cluster: cluster, MachineDeployment: md2}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...)
   592  		})
   593  	})
   594  
   595  	// TODO @randomvariable: Await more resources
   596  	ginkgo.PDescribe("Multiple workload clusters", func() {
   597  		ginkgo.Context("in different namespaces with machine failures", func() {
   598  			ginkgo.It("should setup namespaces correctly for the two clusters", func() {
   599  				specName := "functional-test-multi-namespace"
   600  				requiredResources = &shared.TestResource{EC2Normal: 4 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6}
   601  				requiredResources.WriteRequestedResources(e2eCtx, specName)
   602  				Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   603  				defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   604  
   605  				ginkgo.By("Creating first cluster with single control plane")
   606  				ns1, cf1 := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{
   607  					Creator:   e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   608  					ClientSet: e2eCtx.Environment.BootstrapClusterProxy.GetClientSet(),
   609  					Name:      fmt.Sprintf("functional-multi-namespace-1-%s", util.RandomString(6)),
   610  					LogFolder: filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", e2eCtx.Environment.BootstrapClusterProxy.GetName()),
   611  				})
   612  				e2eCtx.Environment.Namespaces[ns1] = cf1
   613  				ns2, cf2 := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{
   614  					Creator:   e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   615  					ClientSet: e2eCtx.Environment.BootstrapClusterProxy.GetClientSet(),
   616  					Name:      fmt.Sprintf("functional-multi-namespace-2-%s", util.RandomString(6)),
   617  					LogFolder: filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", e2eCtx.Environment.BootstrapClusterProxy.GetName()),
   618  				})
   619  				e2eCtx.Environment.Namespaces[ns2] = cf2
   620  
   621  				ginkgo.By("Creating first cluster")
   622  				cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   623  				configCluster := defaultConfigCluster(cluster1Name, ns1.Name)
   624  				configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   625  				configCluster.Flavor = shared.LimitAzFlavor
   626  				cluster1, md1, _ := createCluster(ctx, configCluster, result)
   627  				Expect(len(md1)).To(Equal(1), "Expecting one MachineDeployment")
   628  
   629  				ginkgo.By("Deleting a worker node machine")
   630  				deleteMachine(ns1, md1[0])
   631  				time.Sleep(10 * time.Second)
   632  
   633  				ginkgo.By("Verifying MachineDeployment is running.")
   634  				framework.DiscoveryAndWaitForMachineDeployments(ctx, framework.DiscoveryAndWaitForMachineDeploymentsInput{Cluster: cluster1, Lister: e2eCtx.Environment.BootstrapClusterProxy.GetClient()}, e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes")...)
   635  
   636  				ginkgo.By("Creating second cluster")
   637  				cluster2Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   638  				configCluster = defaultConfigCluster(cluster2Name, ns2.Name)
   639  				configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   640  				configCluster.Flavor = shared.LimitAzFlavor
   641  				cluster2, md2, _ := createCluster(ctx, configCluster, result)
   642  				Expect(len(md2)).To(Equal(1), "Expecting one MachineDeployment")
   643  
   644  				ginkgo.By("Deleting node directly from infra cloud")
   645  				machines := framework.GetMachinesByMachineDeployments(ctx, framework.GetMachinesByMachineDeploymentsInput{
   646  					Lister:            e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   647  					ClusterName:       cluster1Name,
   648  					Namespace:         ns2.Name,
   649  					MachineDeployment: *md2[0],
   650  				})
   651  				Expect(len(machines)).Should(BeNumerically(">", 0))
   652  				terminateInstance(*machines[0].Spec.ProviderID)
   653  
   654  				ginkgo.By("Waiting for AWSMachine to be labelled as terminated")
   655  				Eventually(func() bool {
   656  					machineList := getAWSMachinesForDeployment(ns2.Name, *md2[0])
   657  					labels := machineList.Items[0].GetLabels()
   658  					return labels[instancestate.Ec2InstanceStateLabelKey] == string(infrav1.InstanceStateTerminated)
   659  				}, e2eCtx.E2EConfig.GetIntervals("", "wait-machine-status")...).Should(Equal(true))
   660  
   661  				ginkgo.By("Waiting for machine to reach Failed state")
   662  				statusChecks := []framework.MachineStatusCheck{framework.MachinePhaseCheck(string(clusterv1.MachinePhaseFailed))}
   663  				machineStatusInput := framework.WaitForMachineStatusCheckInput{
   664  					Getter:       e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   665  					Machine:      &machines[0],
   666  					StatusChecks: statusChecks,
   667  				}
   668  				framework.WaitForMachineStatusCheck(ctx, machineStatusInput, e2eCtx.E2EConfig.GetIntervals("", "wait-machine-status")...)
   669  
   670  				ginkgo.By("Deleting the clusters and namespaces")
   671  				deleteCluster(ctx, cluster1)
   672  				deleteCluster(ctx, cluster2)
   673  				framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), Name: ns1.Name})
   674  				framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), Name: ns2.Name})
   675  				cf1()
   676  				cf2()
   677  			})
   678  		})
   679  
   680  		ginkgo.Context("Defining clusters in the same namespace", func() {
   681  			specName := "functional-test-multi-cluster-single-namespace"
   682  			ginkgo.It("should create the clusters", func() {
   683  				requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 6}
   684  				requiredResources.WriteRequestedResources(e2eCtx, specName)
   685  				Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   686  				defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   687  				namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   688  				defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   689  				ginkgo.By("Creating first cluster with single control plane")
   690  				cluster1Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   691  				configCluster := defaultConfigCluster(cluster1Name, namespace.Name)
   692  				configCluster.Flavor = shared.LimitAzFlavor
   693  				cluster1, _, _ := createCluster(ctx, configCluster, result)
   694  
   695  				ginkgo.By("Creating second cluster with single control plane")
   696  				cluster2Name := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   697  				configCluster = defaultConfigCluster(cluster2Name, namespace.Name)
   698  				configCluster.Flavor = shared.LimitAzFlavor
   699  				cluster2, _, _ := createCluster(ctx, configCluster, result)
   700  
   701  				ginkgo.By("Deleting the Clusters")
   702  				deleteCluster(ctx, cluster1)
   703  				deleteCluster(ctx, cluster2)
   704  			})
   705  		})
   706  	})
   707  
   708  	ginkgo.Describe("Workload cluster with spot instances", func() {
   709  		ginkgo.It("should be creatable and deletable", func() {
   710  			specName := "functional-test-spot-instances"
   711  			requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 1, NGW: 1, VPC: 1, ClassicLB: 1, EIP: 3}
   712  			requiredResources.WriteRequestedResources(e2eCtx, specName)
   713  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   714  			defer shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   715  			namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   716  			defer shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   717  			ginkgo.By("Creating a cluster")
   718  			clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   719  			configCluster := defaultConfigCluster(clusterName, namespace.Name)
   720  			configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   721  			configCluster.Flavor = shared.SpotInstancesFlavor
   722  			_, md, _ := createCluster(ctx, configCluster, result)
   723  
   724  			workerMachines := framework.GetMachinesByMachineDeployments(ctx, framework.GetMachinesByMachineDeploymentsInput{
   725  				Lister:            e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   726  				ClusterName:       clusterName,
   727  				Namespace:         namespace.Name,
   728  				MachineDeployment: *md[0],
   729  			})
   730  			controlPlaneMachines := framework.GetControlPlaneMachinesByCluster(ctx, framework.GetControlPlaneMachinesByClusterInput{
   731  				Lister:      e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   732  				ClusterName: clusterName,
   733  				Namespace:   namespace.Name,
   734  			})
   735  			Expect(len(workerMachines)).To(Equal(1))
   736  			assertSpotInstanceType(*workerMachines[0].Spec.ProviderID)
   737  			Expect(len(controlPlaneMachines)).To(Equal(1))
   738  		})
   739  	})
   740  
   741  	// This test builds a management cluster using an externally managed VPC and subnets. CAPA is still handling security group
   742  	// creation for the management cluster. The workload cluster is created in a peered VPC with a single externally managed security group.
   743  	// A private and public subnet is created in this VPC to allow for egress traffic but the workload AWSCluster is configured with
   744  	// an internal load balancer and only the private subnet. All applicable resources are restricted to us-west-2a for simplicity.
   745  	ginkgo.PDescribe("External infrastructure, external security groups, VPC peering, internal ELB and private subnet use only", func() {
   746  		var namespace *corev1.Namespace
   747  		var requiredResources *shared.TestResource
   748  		specName := "functional-test-extinfra"
   749  		mgmtClusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   750  		mgmtClusterInfra := new(shared.AWSInfrastructure)
   751  		shared.SetEnvVar("MGMT_CLUSTER_NAME", mgmtClusterName, false)
   752  
   753  		wlClusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
   754  		wlClusterInfra := new(shared.AWSInfrastructure)
   755  
   756  		var cPeering *ec2.VpcPeeringConnection
   757  
   758  		// Some infrastructure creation was moved to a setup node to better organize the test.
   759  		ginkgo.JustBeforeEach(func() {
   760  			requiredResources = &shared.TestResource{EC2Normal: 2 * e2eCtx.Settings.InstanceVCPU, IGW: 2, NGW: 2, VPC: 2, ClassicLB: 2, EIP: 5}
   761  			requiredResources.WriteRequestedResources(e2eCtx, specName)
   762  			Expect(shared.AcquireResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))).To(Succeed())
   763  			namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   764  			ginkgo.By("Creating the management cluster infrastructure")
   765  			mgmtClusterInfra.New(shared.AWSInfrastructureSpec{
   766  				ClusterName:       mgmtClusterName,
   767  				VpcCidr:           "10.0.0.0/23",
   768  				PublicSubnetCidr:  "10.0.0.0/24",
   769  				PrivateSubnetCidr: "10.0.1.0/24",
   770  				AvailabilityZone:  "us-west-2a",
   771  			}, e2eCtx)
   772  			mgmtClusterInfra.CreateInfrastructure()
   773  
   774  			ginkgo.By("Creating the workload cluster infrastructure")
   775  			wlClusterInfra.New(shared.AWSInfrastructureSpec{
   776  				ClusterName:       wlClusterName,
   777  				VpcCidr:           "10.0.2.0/23",
   778  				PublicSubnetCidr:  "10.0.2.0/24",
   779  				PrivateSubnetCidr: "10.0.3.0/24",
   780  				AvailabilityZone:  "us-west-2a",
   781  			}, e2eCtx)
   782  			wlClusterInfra.CreateInfrastructure()
   783  
   784  			ginkgo.By("Creating VPC peerings")
   785  			cPeering, _ = shared.CreatePeering(e2eCtx, mgmtClusterName+"-"+wlClusterName, *mgmtClusterInfra.VPC.VpcId, *wlClusterInfra.VPC.VpcId)
   786  		})
   787  
   788  		// Infrastructure cleanup is done in setup node so it is not bypassed if there is a test failure in the subject node.
   789  		ginkgo.JustAfterEach(func() {
   790  			shared.ReleaseResources(requiredResources, config.GinkgoConfig.ParallelNode, flock.New(shared.ResourceQuotaFilePath))
   791  			shared.DumpSpecResourcesAndCleanup(ctx, "", namespace, e2eCtx)
   792  			if !e2eCtx.Settings.SkipCleanup {
   793  				ginkgo.By("Deleting peering connection")
   794  				if cPeering != nil && cPeering.VpcPeeringConnectionId != nil {
   795  					shared.DeletePeering(e2eCtx, *cPeering.VpcPeeringConnectionId)
   796  				}
   797  				ginkgo.By("Deleting the workload cluster infrastructure")
   798  				wlClusterInfra.DeleteInfrastructure()
   799  				ginkgo.By("Deleting the management cluster infrastructure")
   800  				mgmtClusterInfra.DeleteInfrastructure()
   801  			}
   802  		})
   803  
   804  		ginkgo.It("should create external clusters in peered VPC and with an internal ELB and only utilize a private subnet", func() {
   805  			ginkgo.By("Validating management infrastructure")
   806  			Expect(mgmtClusterInfra.VPC).NotTo(BeNil())
   807  			Expect(*mgmtClusterInfra.State.VpcState).To(Equal("available"))
   808  			Expect(len(mgmtClusterInfra.Subnets)).To(Equal(2))
   809  			Expect(mgmtClusterInfra.InternetGateway).NotTo(BeNil())
   810  			Expect(mgmtClusterInfra.ElasticIP).NotTo(BeNil())
   811  			Expect(mgmtClusterInfra.NatGateway).NotTo(BeNil())
   812  			Expect(len(mgmtClusterInfra.RouteTables)).To(Equal(2))
   813  
   814  			ginkgo.By("Validating workload infrastructure")
   815  			Expect(wlClusterInfra.VPC).NotTo(BeNil())
   816  			Expect(*wlClusterInfra.State.VpcState).To(Equal("available"))
   817  			Expect(len(wlClusterInfra.Subnets)).To(Equal(2))
   818  			Expect(wlClusterInfra.InternetGateway).NotTo(BeNil())
   819  			Expect(wlClusterInfra.ElasticIP).NotTo(BeNil())
   820  			Expect(wlClusterInfra.NatGateway).NotTo(BeNil())
   821  			Expect(len(wlClusterInfra.RouteTables)).To(Equal(2))
   822  
   823  			ginkgo.By("Validate and accept peering")
   824  			Expect(cPeering).NotTo(BeNil())
   825  			Eventually(func() bool {
   826  				aPeering, err := shared.AcceptPeering(e2eCtx, *cPeering.VpcPeeringConnectionId)
   827  				if err != nil {
   828  					return false
   829  				}
   830  				wlClusterInfra.Peering = aPeering
   831  				return aPeering != nil
   832  			}, 60*time.Second).Should(BeTrue())
   833  
   834  			ginkgo.By("Creating security groups")
   835  			mgmtSG, _ := shared.CreateSecurityGroup(e2eCtx, mgmtClusterName+"-all", mgmtClusterName+"-all", *mgmtClusterInfra.VPC.VpcId)
   836  			Expect(mgmtSG).NotTo(BeNil())
   837  			shared.CreateSecurityGroupIngressRule(e2eCtx, *mgmtSG.GroupId, "all default", "0.0.0.0/0", "-1", -1, -1)
   838  			shared.SetEnvVar("SG_ID", *mgmtSG.GroupId, false)
   839  
   840  			shared.SetEnvVar("MGMT_VPC_ID", *mgmtClusterInfra.VPC.VpcId, false)
   841  			shared.SetEnvVar("WL_VPC_ID", *wlClusterInfra.VPC.VpcId, false)
   842  			shared.SetEnvVar("MGMT_PUBLIC_SUBNET_ID", *mgmtClusterInfra.State.PublicSubnetID, false)
   843  			shared.SetEnvVar("MGMT_PRIVATE_SUBNET_ID", *mgmtClusterInfra.State.PrivateSubnetID, false)
   844  			shared.SetEnvVar("WL_PRIVATE_SUBNET_ID", *wlClusterInfra.State.PrivateSubnetID, false)
   845  
   846  			ginkgo.By("Creating routes for peerings")
   847  			shared.CreateRoute(e2eCtx, *mgmtClusterInfra.State.PublicRouteTableID, "10.0.2.0/23", nil, nil, cPeering.VpcPeeringConnectionId)
   848  			shared.CreateRoute(e2eCtx, *mgmtClusterInfra.State.PrivateRouteTableID, "10.0.2.0/23", nil, nil, cPeering.VpcPeeringConnectionId)
   849  			shared.CreateRoute(e2eCtx, *wlClusterInfra.State.PublicRouteTableID, "10.0.0.0/23", nil, nil, cPeering.VpcPeeringConnectionId)
   850  			shared.CreateRoute(e2eCtx, *wlClusterInfra.State.PrivateRouteTableID, "10.0.0.0/23", nil, nil, cPeering.VpcPeeringConnectionId)
   851  
   852  			ginkgo.By("Creating a management cluster in a peered VPC")
   853  			mgmtConfigCluster := defaultConfigCluster(mgmtClusterName, namespace.Name)
   854  			mgmtConfigCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   855  			mgmtConfigCluster.Flavor = "peered-remote"
   856  			mgmtCluster, mgmtMD, _ := createCluster(ctx, mgmtConfigCluster, result)
   857  
   858  			mgmtWM := framework.GetMachinesByMachineDeployments(ctx, framework.GetMachinesByMachineDeploymentsInput{
   859  				Lister:            e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   860  				ClusterName:       mgmtClusterName,
   861  				Namespace:         namespace.Name,
   862  				MachineDeployment: *mgmtMD[0],
   863  			})
   864  			mgmtCPM := framework.GetControlPlaneMachinesByCluster(ctx, framework.GetControlPlaneMachinesByClusterInput{
   865  				Lister:      e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   866  				ClusterName: mgmtClusterName,
   867  				Namespace:   namespace.Name,
   868  			})
   869  			Expect(len(mgmtWM)).To(Equal(1))
   870  			Expect(len(mgmtCPM)).To(Equal(1))
   871  
   872  			mgmtClusterProxy := e2eCtx.Environment.BootstrapClusterProxy.GetWorkloadCluster(ctx, mgmtCluster.Namespace, mgmtCluster.Name)
   873  
   874  			shared.Byf("Creating a namespace for hosting the %s test spec", specName)
   875  			mgmtNamespace := framework.CreateNamespace(ctx, framework.CreateNamespaceInput{
   876  				Creator: mgmtClusterProxy.GetClient(),
   877  				Name:    namespace.Name,
   878  			})
   879  
   880  			ginkgo.By("Initializing the management cluster")
   881  			clusterctl.InitManagementClusterAndWatchControllerLogs(ctx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{
   882  				ClusterProxy:            mgmtClusterProxy,
   883  				ClusterctlConfigPath:    e2eCtx.Environment.ClusterctlConfigPath,
   884  				InfrastructureProviders: e2eCtx.E2EConfig.InfrastructureProviders(),
   885  				LogFolder:               filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", mgmtCluster.Name),
   886  			}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-controllers")...)
   887  
   888  			ginkgo.By("Ensure API servers are stable before doing the move")
   889  			Consistently(func() error {
   890  				kubeSystem := &corev1.Namespace{}
   891  				return e2eCtx.Environment.BootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: "kube-system"}, kubeSystem)
   892  			}, "5s", "100ms").Should(BeNil(), "Failed to assert bootstrap API server stability")
   893  			Consistently(func() error {
   894  				kubeSystem := &corev1.Namespace{}
   895  				return mgmtClusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: "kube-system"}, kubeSystem)
   896  			}, "5s", "100ms").Should(BeNil(), "Failed to assert management API server stability")
   897  
   898  			ginkgo.By("Moving the management cluster to be self hosted")
   899  			clusterctl.Move(ctx, clusterctl.MoveInput{
   900  				LogFolder:            filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", "bootstrap"),
   901  				ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath,
   902  				FromKubeconfigPath:   e2eCtx.Environment.BootstrapClusterProxy.GetKubeconfigPath(),
   903  				ToKubeconfigPath:     mgmtClusterProxy.GetKubeconfigPath(),
   904  				Namespace:            namespace.Name,
   905  			})
   906  
   907  			mgmtCluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{
   908  				Getter:    mgmtClusterProxy.GetClient(),
   909  				Namespace: mgmtNamespace.Name,
   910  				Name:      mgmtCluster.Name,
   911  			}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-cluster")...)
   912  
   913  			mgmtControlPlane := framework.GetKubeadmControlPlaneByCluster(ctx, framework.GetKubeadmControlPlaneByClusterInput{
   914  				Lister:      mgmtClusterProxy.GetClient(),
   915  				ClusterName: mgmtCluster.Name,
   916  				Namespace:   mgmtCluster.Namespace,
   917  			})
   918  			Expect(mgmtControlPlane).ToNot(BeNil())
   919  
   920  			ginkgo.By("Creating a namespace to host the internal-elb spec")
   921  			wlNamespace := framework.CreateNamespace(ctx, framework.CreateNamespaceInput{
   922  				Creator: mgmtClusterProxy.GetClient(),
   923  				Name:    wlClusterName,
   924  			})
   925  
   926  			ginkgo.By("Creating workload cluster with internal ELB")
   927  			wlConfigCluster := defaultConfigCluster(wlClusterName, wlNamespace.Name)
   928  			wlConfigCluster.WorkerMachineCount = pointer.Int64Ptr(1)
   929  			wlConfigCluster.Flavor = "internal-elb"
   930  			wlResult := &clusterctl.ApplyClusterTemplateAndWaitResult{}
   931  			clusterctl.ApplyClusterTemplateAndWait(ctx, clusterctl.ApplyClusterTemplateAndWaitInput{
   932  				ClusterProxy:                 mgmtClusterProxy,
   933  				ConfigCluster:                wlConfigCluster,
   934  				WaitForClusterIntervals:      e2eCtx.E2EConfig.GetIntervals("", "wait-cluster"),
   935  				WaitForControlPlaneIntervals: e2eCtx.E2EConfig.GetIntervals("", "wait-control-plane"),
   936  				WaitForMachineDeployments:    e2eCtx.E2EConfig.GetIntervals("", "wait-worker-nodes"),
   937  			}, wlResult)
   938  
   939  			wlWM := framework.GetMachinesByMachineDeployments(ctx, framework.GetMachinesByMachineDeploymentsInput{
   940  				Lister:            mgmtClusterProxy.GetClient(),
   941  				ClusterName:       mgmtClusterName,
   942  				Namespace:         wlNamespace.Name,
   943  				MachineDeployment: *wlResult.MachineDeployments[0],
   944  			})
   945  			wlCPM := framework.GetControlPlaneMachinesByCluster(ctx, framework.GetControlPlaneMachinesByClusterInput{
   946  				Lister:      mgmtClusterProxy.GetClient(),
   947  				ClusterName: wlClusterName,
   948  				Namespace:   wlNamespace.Name,
   949  			})
   950  			Expect(len(wlWM)).To(Equal(1))
   951  			Expect(len(wlCPM)).To(Equal(1))
   952  
   953  			ginkgo.By("Deleting the workload cluster")
   954  			shared.DumpSpecResourcesFromProxy(ctx, e2eCtx, wlNamespace, mgmtClusterProxy)
   955  			shared.DumpMachinesFromProxy(ctx, e2eCtx, wlNamespace, mgmtClusterProxy)
   956  			if !e2eCtx.Settings.SkipCleanup {
   957  				framework.DeleteCluster(ctx, framework.DeleteClusterInput{
   958  					Deleter: mgmtClusterProxy.GetClient(),
   959  					Cluster: wlResult.Cluster,
   960  				})
   961  
   962  				framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{
   963  					Getter:  mgmtClusterProxy.GetClient(),
   964  					Cluster: wlResult.Cluster,
   965  				}, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...)
   966  
   967  				ginkgo.By("Moving the management cluster back to bootstrap")
   968  				clusterctl.Move(ctx, clusterctl.MoveInput{
   969  					LogFolder:            filepath.Join(e2eCtx.Settings.ArtifactFolder, "clusters", mgmtCluster.Name),
   970  					ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath,
   971  					FromKubeconfigPath:   mgmtClusterProxy.GetKubeconfigPath(),
   972  					ToKubeconfigPath:     e2eCtx.Environment.BootstrapClusterProxy.GetKubeconfigPath(),
   973  					Namespace:            namespace.Name,
   974  				})
   975  
   976  				mgmtCluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{
   977  					Getter:    e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   978  					Namespace: mgmtNamespace.Name,
   979  					Name:      mgmtCluster.Name,
   980  				}, e2eCtx.E2EConfig.GetIntervals(specName, "wait-cluster")...)
   981  
   982  				mgmtControlPlane = framework.GetKubeadmControlPlaneByCluster(ctx, framework.GetKubeadmControlPlaneByClusterInput{
   983  					Lister:      e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
   984  					ClusterName: mgmtCluster.Name,
   985  					Namespace:   mgmtCluster.Namespace,
   986  				})
   987  				Expect(mgmtControlPlane).ToNot(BeNil())
   988  
   989  				ginkgo.By("Deleting the management cluster")
   990  				deleteCluster(ctx, mgmtCluster)
   991  			}
   992  		})
   993  	})
   994  
   995  	ginkgo.Describe("Workload cluster with AWS S3 and Ignition parameter", func() {
   996  		ginkgo.It("It should be creatable and deletable", func() {
   997  			specName := "functional-test-ignition"
   998  			namespace := shared.SetupSpecNamespace(ctx, specName, e2eCtx)
   999  			ginkgo.By("Creating a cluster")
  1000  			clusterName := fmt.Sprintf("%s-%s", specName, util.RandomString(6))
  1001  			configCluster := defaultConfigCluster(clusterName, namespace.Name)
  1002  			configCluster.ControlPlaneMachineCount = pointer.Int64Ptr(1)
  1003  			configCluster.WorkerMachineCount = pointer.Int64Ptr(1)
  1004  			configCluster.Flavor = shared.IgnitionFlavor
  1005  			_, md, _ := createCluster(ctx, configCluster, result)
  1006  
  1007  			workerMachines := framework.GetMachinesByMachineDeployments(ctx, framework.GetMachinesByMachineDeploymentsInput{
  1008  				Lister:            e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
  1009  				ClusterName:       clusterName,
  1010  				Namespace:         namespace.Name,
  1011  				MachineDeployment: *md[0],
  1012  			})
  1013  			controlPlaneMachines := framework.GetControlPlaneMachinesByCluster(ctx, framework.GetControlPlaneMachinesByClusterInput{
  1014  				Lister:      e2eCtx.Environment.BootstrapClusterProxy.GetClient(),
  1015  				ClusterName: clusterName,
  1016  				Namespace:   namespace.Name,
  1017  			})
  1018  			Expect(len(workerMachines)).To(Equal(1))
  1019  			Expect(len(controlPlaneMachines)).To(Equal(1))
  1020  		})
  1021  	})
  1022  })
  1023  
  1024  func createStatefulSetInfo(isIntreeCSI bool, prefix string) statefulSetInfo {
  1025  	return statefulSetInfo{
  1026  		name:                      fmt.Sprintf("%s%s", prefix, "-nginx-statefulset"),
  1027  		namespace:                 metav1.NamespaceDefault,
  1028  		replicas:                  int32(2),
  1029  		selector:                  map[string]string{"app": fmt.Sprintf("%s%s", prefix, "-nginx")},
  1030  		storageClassName:          fmt.Sprintf("%s%s", prefix, "-aws-ebs-volumes"),
  1031  		volumeName:                fmt.Sprintf("%s%s", prefix, "-volumes"),
  1032  		svcName:                   fmt.Sprintf("%s%s", prefix, "-svc"),
  1033  		svcPort:                   int32(80),
  1034  		svcPortName:               fmt.Sprintf("%s%s", prefix, "-web"),
  1035  		containerName:             fmt.Sprintf("%s%s", prefix, "-nginx"),
  1036  		containerImage:            "registry.k8s.io/nginx-slim:0.8",
  1037  		containerPort:             int32(80),
  1038  		podTerminationGracePeriod: int64(30),
  1039  		volMountPath:              "/usr/share/nginx/html",
  1040  		isInTreeCSI:               isIntreeCSI,
  1041  	}
  1042  }