sigs.k8s.io/cluster-api-provider-azure@v1.14.3/test/e2e/azure_test.go (about)

     1  //go:build e2e
     2  // +build e2e
     3  
     4  /*
     5  Copyright 2020 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11      http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package e2e
    21  
    22  import (
    23  	"context"
    24  	"fmt"
    25  	"os"
    26  	"time"
    27  
    28  	. "github.com/onsi/ginkgo/v2"
    29  	. "github.com/onsi/gomega"
    30  	corev1 "k8s.io/api/core/v1"
    31  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/types"
    34  	clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3"
    35  	capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
    36  	"sigs.k8s.io/cluster-api/test/framework/clusterctl"
    37  	"sigs.k8s.io/cluster-api/util"
    38  )
    39  
    40  var _ = Describe("Workload cluster creation", func() {
    41  	var (
    42  		ctx                    = context.TODO()
    43  		specName               = "create-workload-cluster"
    44  		namespace              *corev1.Namespace
    45  		cancelWatches          context.CancelFunc
    46  		result                 *clusterctl.ApplyClusterTemplateAndWaitResult
    47  		clusterName            string
    48  		clusterNamePrefix      string
    49  		additionalCleanup      func()
    50  		specTimes              = map[string]time.Time{}
    51  		skipResourceGroupCheck = false
    52  	)
    53  
    54  	BeforeEach(func() {
    55  		logCheckpoint(specTimes)
    56  
    57  		Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
    58  		Expect(e2eConfig).NotTo(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
    59  		Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName)
    60  		Expect(bootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName)
    61  		Expect(os.MkdirAll(artifactFolder, 0o755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName)
    62  		Expect(e2eConfig.Variables).To(HaveKey(capi_e2e.KubernetesVersion))
    63  
    64  		// CLUSTER_NAME and CLUSTER_NAMESPACE allows for testing existing clusters
    65  		// if CLUSTER_NAMESPACE is set don't generate a new prefix otherwise
    66  		// the correct namespace won't be found and a new cluster will be created
    67  		clusterNameSpace := os.Getenv("CLUSTER_NAMESPACE")
    68  		if clusterNameSpace == "" {
    69  			clusterNamePrefix = fmt.Sprintf("capz-e2e-%s", util.RandomString(6))
    70  		} else {
    71  			clusterNamePrefix = clusterNameSpace
    72  		}
    73  
    74  		// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
    75  		var err error
    76  		namespace, cancelWatches, err = setupSpecNamespace(ctx, clusterNamePrefix, bootstrapClusterProxy, artifactFolder)
    77  		Expect(err).NotTo(HaveOccurred())
    78  
    79  		result = new(clusterctl.ApplyClusterTemplateAndWaitResult)
    80  
    81  		spClientSecret := os.Getenv(AzureClientSecret)
    82  		secret := &corev1.Secret{
    83  			ObjectMeta: metav1.ObjectMeta{
    84  				Name:      "cluster-identity-secret",
    85  				Namespace: defaultNamespace,
    86  				Labels: map[string]string{
    87  					clusterctlv1.ClusterctlMoveHierarchyLabel: "true",
    88  				},
    89  			},
    90  			Type: corev1.SecretTypeOpaque,
    91  			Data: map[string][]byte{"clientSecret": []byte(spClientSecret)},
    92  		}
    93  		_, err = bootstrapClusterProxy.GetClientSet().CoreV1().Secrets(defaultNamespace).Get(ctx, secret.Name, metav1.GetOptions{})
    94  		if err != nil && !apierrors.IsNotFound(err) {
    95  			Expect(err).NotTo(HaveOccurred())
    96  		}
    97  		if err != nil {
    98  			Logf("Creating cluster identity secret \"%s\"", secret.Name)
    99  			err = bootstrapClusterProxy.GetClient().Create(ctx, secret)
   100  			if !apierrors.IsAlreadyExists(err) {
   101  				Expect(err).NotTo(HaveOccurred())
   102  			}
   103  		} else {
   104  			Logf("Using existing cluster identity secret")
   105  		}
   106  
   107  		identityName := e2eConfig.GetVariable(ClusterIdentityName)
   108  		Expect(os.Setenv(ClusterIdentityName, identityName)).To(Succeed())
   109  		Expect(os.Setenv(ClusterIdentityNamespace, defaultNamespace)).To(Succeed())
   110  		Expect(os.Setenv(ClusterIdentitySecretName, "cluster-identity-secret")).To(Succeed())
   111  		Expect(os.Setenv(ClusterIdentitySecretNamespace, defaultNamespace)).To(Succeed())
   112  		additionalCleanup = nil
   113  	})
   114  
   115  	AfterEach(func() {
   116  		if result.Cluster == nil {
   117  			// this means the cluster failed to come up. We make an attempt to find the cluster to be able to fetch logs for the failed bootstrapping.
   118  			_ = bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace.Name}, result.Cluster)
   119  		}
   120  
   121  		CheckTestBeforeCleanup()
   122  
   123  		cleanInput := cleanupInput{
   124  			SpecName:               specName,
   125  			Cluster:                result.Cluster,
   126  			ClusterProxy:           bootstrapClusterProxy,
   127  			Namespace:              namespace,
   128  			CancelWatches:          cancelWatches,
   129  			IntervalsGetter:        e2eConfig.GetIntervals,
   130  			SkipCleanup:            skipCleanup,
   131  			SkipLogCollection:      skipLogCollection,
   132  			AdditionalCleanup:      additionalCleanup,
   133  			ArtifactFolder:         artifactFolder,
   134  			SkipResourceGroupCheck: skipResourceGroupCheck,
   135  		}
   136  		dumpSpecResourcesAndCleanup(ctx, cleanInput)
   137  		Expect(os.Unsetenv(AzureResourceGroup)).To(Succeed())
   138  		Expect(os.Unsetenv(AzureCustomVnetResourceGroup)).To(Succeed())
   139  		Expect(os.Unsetenv(AzureVNetName)).To(Succeed())
   140  		Expect(os.Unsetenv(ClusterIdentityName)).To(Succeed())
   141  		Expect(os.Unsetenv(ClusterIdentityNamespace)).To(Succeed())
   142  		Expect(os.Unsetenv(ClusterIdentitySecretName)).To(Succeed())
   143  		Expect(os.Unsetenv(ClusterIdentitySecretNamespace)).To(Succeed())
   144  
   145  		Expect(os.Unsetenv("WINDOWS_WORKER_MACHINE_COUNT")).To(Succeed())
   146  		Expect(os.Unsetenv("K8S_FEATURE_GATES")).To(Succeed())
   147  
   148  		logCheckpoint(specTimes)
   149  	})
   150  
   151  	if os.Getenv("USE_LOCAL_KIND_REGISTRY") != "true" {
   152  		// This spec expects a user-assigned identity with Contributor role assignment named "cloud-provider-user-identity" in a "capz-ci"
   153  		// resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables.
   154  		Context("Creating a private cluster [OPTIONAL]", func() {
   155  			It("Creates a public management cluster in a custom vnet", func() {
   156  				clusterName = getClusterName(clusterNamePrefix, "public-custom-vnet")
   157  				By("Creating a custom virtual network", func() {
   158  					Expect(os.Setenv(AzureCustomVNetName, "custom-vnet")).To(Succeed())
   159  					Expect(os.Setenv(AzureCustomVnetResourceGroup, clusterName+"-vnetrg")).To(Succeed())
   160  					additionalCleanup = SetupExistingVNet(ctx,
   161  						"10.0.0.0/16",
   162  						map[string]string{fmt.Sprintf("%s-controlplane-subnet", os.Getenv(AzureCustomVNetName)): "10.0.0.0/24"},
   163  						map[string]string{fmt.Sprintf("%s-node-subnet", os.Getenv(AzureCustomVNetName)): "10.0.1.0/24"},
   164  						fmt.Sprintf("%s-azure-bastion-subnet", os.Getenv(AzureCustomVNetName)),
   165  						"10.0.2.0/24",
   166  					)
   167  				})
   168  
   169  				clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   170  					specName,
   171  					withFlavor("custom-vnet"),
   172  					withNamespace(namespace.Name),
   173  					withClusterName(clusterName),
   174  					withControlPlaneMachineCount(1),
   175  					withWorkerMachineCount(1),
   176  					withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   177  						WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   178  					}),
   179  					withPostMachinesProvisioned(func() {
   180  						EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   181  							return DaemonsetsSpecInput{
   182  								BootstrapClusterProxy: bootstrapClusterProxy,
   183  								Namespace:             namespace,
   184  								ClusterName:           clusterName,
   185  							}
   186  						})
   187  					}),
   188  				), result)
   189  
   190  				By("Creating a private cluster from the management cluster", func() {
   191  					AzurePrivateClusterSpec(ctx, func() AzurePrivateClusterSpecInput {
   192  						return AzurePrivateClusterSpecInput{
   193  							BootstrapClusterProxy: bootstrapClusterProxy,
   194  							Namespace:             namespace,
   195  							ClusterName:           clusterName,
   196  							ClusterctlConfigPath:  clusterctlConfigPath,
   197  							E2EConfig:             e2eConfig,
   198  							ArtifactFolder:        artifactFolder,
   199  							SkipCleanup:           skipCleanup,
   200  							CancelWatches:         cancelWatches,
   201  						}
   202  					})
   203  				})
   204  
   205  				By("PASSED!")
   206  			})
   207  		})
   208  	} else {
   209  		fmt.Fprintf(GinkgoWriter, "INFO: skipping test requires pushing container images to external repository")
   210  	}
   211  
   212  	Context("Creating a highly available cluster [REQUIRED]", func() {
   213  		It("With 3 control-plane nodes and 2 Linux and 2 Windows worker nodes", func() {
   214  			clusterName = getClusterName(clusterNamePrefix, "ha")
   215  
   216  			// Opt into using windows with prow template
   217  			Expect(os.Setenv("WINDOWS_WORKER_MACHINE_COUNT", "2")).To(Succeed())
   218  
   219  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   220  				specName,
   221  				withNamespace(namespace.Name),
   222  				withClusterName(clusterName),
   223  				withControlPlaneMachineCount(3),
   224  				withWorkerMachineCount(2),
   225  				withControlPlaneInterval(specName, "wait-control-plane-ha"),
   226  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   227  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   228  				}),
   229  				withPostMachinesProvisioned(func() {
   230  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   231  						return DaemonsetsSpecInput{
   232  							BootstrapClusterProxy: bootstrapClusterProxy,
   233  							Namespace:             namespace,
   234  							ClusterName:           clusterName,
   235  						}
   236  					})
   237  				}),
   238  			), result)
   239  
   240  			By("Verifying expected VM extensions are present on the node", func() {
   241  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   242  					return AzureVMExtensionsSpecInput{
   243  						BootstrapClusterProxy: bootstrapClusterProxy,
   244  						Namespace:             namespace,
   245  						ClusterName:           clusterName,
   246  					}
   247  				})
   248  			})
   249  
   250  			By("Verifying security rules are deleted on azure side", func() {
   251  				AzureSecurityGroupsSpec(ctx, func() AzureSecurityGroupsSpecInput {
   252  					return AzureSecurityGroupsSpecInput{
   253  						BootstrapClusterProxy: bootstrapClusterProxy,
   254  						Namespace:             namespace,
   255  						ClusterName:           clusterName,
   256  						Cluster:               result.Cluster,
   257  						WaitForUpdate:         e2eConfig.GetIntervals(specName, "wait-nsg-update"),
   258  					}
   259  				})
   260  			})
   261  
   262  			By("Validating failure domains", func() {
   263  				AzureFailureDomainsSpec(ctx, func() AzureFailureDomainsSpecInput {
   264  					return AzureFailureDomainsSpecInput{
   265  						BootstrapClusterProxy: bootstrapClusterProxy,
   266  						Cluster:               result.Cluster,
   267  						Namespace:             namespace,
   268  						ClusterName:           clusterName,
   269  					}
   270  				})
   271  			})
   272  
   273  			By("Creating an accessible load balancer", func() {
   274  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   275  					return AzureLBSpecInput{
   276  						BootstrapClusterProxy: bootstrapClusterProxy,
   277  						Namespace:             namespace,
   278  						ClusterName:           clusterName,
   279  						SkipCleanup:           skipCleanup,
   280  					}
   281  				})
   282  			})
   283  
   284  			By("Validating network policies", func() {
   285  				AzureNetPolSpec(ctx, func() AzureNetPolSpecInput {
   286  					return AzureNetPolSpecInput{
   287  						BootstrapClusterProxy: bootstrapClusterProxy,
   288  						Namespace:             namespace,
   289  						ClusterName:           clusterName,
   290  						SkipCleanup:           skipCleanup,
   291  					}
   292  				})
   293  			})
   294  
   295  			By("Creating an accessible load balancer for windows", func() {
   296  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   297  					return AzureLBSpecInput{
   298  						BootstrapClusterProxy: bootstrapClusterProxy,
   299  						Namespace:             namespace,
   300  						ClusterName:           clusterName,
   301  						SkipCleanup:           skipCleanup,
   302  						Windows:               true,
   303  					}
   304  				})
   305  			})
   306  
   307  			By("PASSED!")
   308  		})
   309  	})
   310  
   311  	When("Creating a highly available cluster with Azure CNI v1 [REQUIRED]", Label("Azure CNI v1"), func() {
   312  		It("can create 3 control-plane nodes and 2 Linux worker nodes", func() {
   313  			clusterName = getClusterName(clusterNamePrefix, "azcni-v1")
   314  
   315  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   316  				specName,
   317  				withAzureCNIv1Manifest(e2eConfig.GetVariable(AzureCNIv1Manifest)), // AzureCNIManifest is set
   318  				withFlavor("azure-cni-v1"),
   319  				withNamespace(namespace.Name),
   320  				withClusterName(clusterName),
   321  				withControlPlaneMachineCount(3),
   322  				withWorkerMachineCount(2),
   323  				withControlPlaneInterval(specName, "wait-control-plane-ha"),
   324  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   325  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   326  				}),
   327  				withPostMachinesProvisioned(func() {
   328  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   329  						return DaemonsetsSpecInput{
   330  							BootstrapClusterProxy: bootstrapClusterProxy,
   331  							Namespace:             namespace,
   332  							ClusterName:           clusterName,
   333  						}
   334  					})
   335  				}),
   336  			), result)
   337  
   338  			By("can expect VM extensions are present on the node", func() {
   339  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   340  					return AzureVMExtensionsSpecInput{
   341  						BootstrapClusterProxy: bootstrapClusterProxy,
   342  						Namespace:             namespace,
   343  						ClusterName:           clusterName,
   344  					}
   345  				})
   346  			})
   347  
   348  			By("can validate failure domains", func() {
   349  				AzureFailureDomainsSpec(ctx, func() AzureFailureDomainsSpecInput {
   350  					return AzureFailureDomainsSpecInput{
   351  						BootstrapClusterProxy: bootstrapClusterProxy,
   352  						Cluster:               result.Cluster,
   353  						Namespace:             namespace,
   354  						ClusterName:           clusterName,
   355  					}
   356  				})
   357  			})
   358  
   359  			By("can create an accessible load balancer", func() {
   360  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   361  					return AzureLBSpecInput{
   362  						BootstrapClusterProxy: bootstrapClusterProxy,
   363  						Namespace:             namespace,
   364  						ClusterName:           clusterName,
   365  						SkipCleanup:           skipCleanup,
   366  					}
   367  				})
   368  			})
   369  		})
   370  	})
   371  
   372  	Context("Creating a Flatcar cluster [OPTIONAL]", func() {
   373  		It("With Flatcar control-plane and worker nodes", func() {
   374  			clusterName = getClusterName(clusterNamePrefix, "flatcar")
   375  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   376  				specName,
   377  				withFlavor("flatcar"),
   378  				withNamespace(namespace.Name),
   379  				withClusterName(clusterName),
   380  				withKubernetesVersion(e2eConfig.GetVariable(FlatcarKubernetesVersion)),
   381  				withControlPlaneMachineCount(1),
   382  				withWorkerMachineCount(1),
   383  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   384  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   385  				}),
   386  				withPostMachinesProvisioned(func() {
   387  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   388  						return DaemonsetsSpecInput{
   389  							BootstrapClusterProxy: bootstrapClusterProxy,
   390  							Namespace:             namespace,
   391  							ClusterName:           clusterName,
   392  						}
   393  					})
   394  				}),
   395  			), result)
   396  
   397  			By("can create and access a load balancer", func() {
   398  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   399  					return AzureLBSpecInput{
   400  						BootstrapClusterProxy: bootstrapClusterProxy,
   401  						Namespace:             namespace,
   402  						ClusterName:           clusterName,
   403  						SkipCleanup:           skipCleanup,
   404  					}
   405  				})
   406  			})
   407  		})
   408  	})
   409  
   410  	Context("Creating a cluster with spot vms [OPTIONAL]", func() {
   411  		It("With spot vm machine deployments", func() {
   412  			clusterName = getClusterName(clusterNamePrefix, "spot")
   413  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   414  				specName,
   415  				withFlavor("spot"),
   416  				withNamespace(namespace.Name),
   417  				withClusterName(clusterName),
   418  				withKubernetesVersion(e2eConfig.GetVariable(FlatcarKubernetesVersion)),
   419  				withControlPlaneMachineCount(1),
   420  				withWorkerMachineCount(1),
   421  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   422  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   423  				}),
   424  				withPostMachinesProvisioned(func() {
   425  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   426  						return DaemonsetsSpecInput{
   427  							BootstrapClusterProxy: bootstrapClusterProxy,
   428  							Namespace:             namespace,
   429  							ClusterName:           clusterName,
   430  						}
   431  					})
   432  				}),
   433  			), result)
   434  
   435  			By("can create and access a load balancer", func() {
   436  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   437  					return AzureLBSpecInput{
   438  						BootstrapClusterProxy: bootstrapClusterProxy,
   439  						Namespace:             namespace,
   440  						ClusterName:           clusterName,
   441  						SkipCleanup:           skipCleanup,
   442  					}
   443  				})
   444  			})
   445  		})
   446  	})
   447  
   448  	Context("Creating a ipv6 control-plane cluster [REQUIRED]", func() {
   449  		It("With ipv6 worker node", func() {
   450  			clusterName = getClusterName(clusterNamePrefix, "ipv6")
   451  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   452  				specName,
   453  				withFlavor("ipv6"),
   454  				withNamespace(namespace.Name),
   455  				withClusterName(clusterName),
   456  				withControlPlaneMachineCount(3),
   457  				withWorkerMachineCount(1),
   458  				withControlPlaneInterval(specName, "wait-control-plane-ha"),
   459  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   460  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   461  				}),
   462  				withPostMachinesProvisioned(func() {
   463  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   464  						return DaemonsetsSpecInput{
   465  							BootstrapClusterProxy: bootstrapClusterProxy,
   466  							Namespace:             namespace,
   467  							ClusterName:           clusterName,
   468  						}
   469  					})
   470  				}),
   471  			), result)
   472  
   473  			By("Verifying expected VM extensions are present on the node", func() {
   474  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   475  					return AzureVMExtensionsSpecInput{
   476  						BootstrapClusterProxy: bootstrapClusterProxy,
   477  						Namespace:             namespace,
   478  						ClusterName:           clusterName,
   479  					}
   480  				})
   481  			})
   482  
   483  			By("Creating an accessible ipv6 load balancer", func() {
   484  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   485  					return AzureLBSpecInput{
   486  						BootstrapClusterProxy: bootstrapClusterProxy,
   487  						Namespace:             namespace,
   488  						ClusterName:           clusterName,
   489  						SkipCleanup:           skipCleanup,
   490  						// Setting IPFamily to ipv6 is not required for single-stack IPv6 clusters. The clusterIP
   491  						// will be automatically assigned IPv6 address. However, setting this config so that
   492  						// we can use the same test code for both single-stack and dual-stack IPv6 clusters.
   493  						IPFamilies: []corev1.IPFamily{corev1.IPv6Protocol},
   494  					}
   495  				})
   496  			})
   497  
   498  			By("PASSED!")
   499  		})
   500  	})
   501  
   502  	Context("Creating a VMSS cluster [REQUIRED]", func() {
   503  		It("with a single control plane node and an AzureMachinePool with 2 Linux and 2 Windows worker nodes", func() {
   504  			clusterName = getClusterName(clusterNamePrefix, "vmss")
   505  
   506  			// Opt into using windows with prow template
   507  			Expect(os.Setenv("WINDOWS_WORKER_MACHINE_COUNT", "2")).To(Succeed())
   508  
   509  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   510  				specName,
   511  				withFlavor("machine-pool"),
   512  				withNamespace(namespace.Name),
   513  				withClusterName(clusterName),
   514  				withControlPlaneMachineCount(1),
   515  				withWorkerMachineCount(2),
   516  				withMachineDeploymentInterval(specName, ""),
   517  				withControlPlaneInterval(specName, "wait-control-plane"),
   518  				withMachinePoolInterval(specName, "wait-machine-pool-nodes"),
   519  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   520  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   521  				}),
   522  				withPostMachinesProvisioned(func() {
   523  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   524  						return DaemonsetsSpecInput{
   525  							BootstrapClusterProxy: bootstrapClusterProxy,
   526  							Namespace:             namespace,
   527  							ClusterName:           clusterName,
   528  						}
   529  					})
   530  				}),
   531  			), result)
   532  
   533  			By("Verifying expected VM extensions are present on the node", func() {
   534  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   535  					return AzureVMExtensionsSpecInput{
   536  						BootstrapClusterProxy: bootstrapClusterProxy,
   537  						Namespace:             namespace,
   538  						ClusterName:           clusterName,
   539  					}
   540  				})
   541  			})
   542  
   543  			By("Creating an accessible load balancer", func() {
   544  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   545  					return AzureLBSpecInput{
   546  						BootstrapClusterProxy: bootstrapClusterProxy,
   547  						Namespace:             namespace,
   548  						ClusterName:           clusterName,
   549  						SkipCleanup:           skipCleanup,
   550  					}
   551  				})
   552  			})
   553  
   554  			By("Creating an accessible load balancer for windows", func() {
   555  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   556  					return AzureLBSpecInput{
   557  						BootstrapClusterProxy: bootstrapClusterProxy,
   558  						Namespace:             namespace,
   559  						ClusterName:           clusterName,
   560  						SkipCleanup:           skipCleanup,
   561  						Windows:               true,
   562  					}
   563  				})
   564  			})
   565  
   566  			By("PASSED!")
   567  		})
   568  	})
   569  
   570  	// ci-e2e.sh and Prow CI skip this test by default, since N-series GPUs are relatively expensive
   571  	// and may require specific quota limits on the subscription.
   572  	// To include this test, set `GINKGO_SKIP=""`.
   573  	// You can override the default SKU `Standard_NV12s_v3` and `Premium_LRS` storage by setting
   574  	// the `AZURE_GPU_NODE_MACHINE_TYPE` and `AZURE_GPU_NODE_STORAGE_TYPE` environment variables.
   575  	// See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/ for pricing.
   576  	Context("Creating a GPU-enabled cluster [OPTIONAL]", func() {
   577  		It("with a single control plane node and 1 node", func() {
   578  			clusterName = getClusterName(clusterNamePrefix, "gpu")
   579  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   580  				specName,
   581  				withFlavor("nvidia-gpu"),
   582  				withNamespace(namespace.Name),
   583  				withClusterName(clusterName),
   584  				withControlPlaneMachineCount(1),
   585  				withWorkerMachineCount(1),
   586  				withMachineDeploymentInterval(specName, "wait-gpu-nodes"),
   587  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   588  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   589  				}),
   590  				withPostMachinesProvisioned(func() {
   591  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   592  						return DaemonsetsSpecInput{
   593  							BootstrapClusterProxy: bootstrapClusterProxy,
   594  							Namespace:             namespace,
   595  							ClusterName:           clusterName,
   596  						}
   597  					})
   598  					InstallGPUOperator(ctx, func() GPUOperatorSpecInput {
   599  						return GPUOperatorSpecInput{
   600  							BootstrapClusterProxy: bootstrapClusterProxy,
   601  							Namespace:             namespace,
   602  							ClusterName:           clusterName,
   603  						}
   604  					})
   605  				}),
   606  			), result)
   607  
   608  			By("Verifying expected VM extensions are present on the node", func() {
   609  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   610  					return AzureVMExtensionsSpecInput{
   611  						BootstrapClusterProxy: bootstrapClusterProxy,
   612  						Namespace:             namespace,
   613  						ClusterName:           clusterName,
   614  					}
   615  				})
   616  			})
   617  
   618  			By("Running a GPU-based calculation", func() {
   619  				AzureGPUSpec(ctx, func() AzureGPUSpecInput {
   620  					return AzureGPUSpecInput{
   621  						BootstrapClusterProxy: bootstrapClusterProxy,
   622  						Namespace:             namespace,
   623  						ClusterName:           clusterName,
   624  						SkipCleanup:           skipCleanup,
   625  					}
   626  				})
   627  			})
   628  
   629  			By("PASSED!")
   630  		})
   631  	})
   632  
   633  	// ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`.
   634  	Context("Creating a cluster with VMSS flex machinepools [OPTIONAL]", func() {
   635  		It("with 1 control plane node and 1 machinepool", func() {
   636  			clusterName = getClusterName(clusterNamePrefix, "flex")
   637  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   638  				specName,
   639  				withFlavor("machine-pool-flex"),
   640  				withNamespace(namespace.Name),
   641  				withClusterName(clusterName),
   642  				withControlPlaneMachineCount(1),
   643  				withWorkerMachineCount(1),
   644  				withKubernetesVersion("v1.26.1"),
   645  				withMachineDeploymentInterval(specName, ""),
   646  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   647  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   648  				}),
   649  				withMachinePoolInterval(specName, "wait-machine-pool-nodes"),
   650  				withControlPlaneInterval(specName, "wait-control-plane"),
   651  			), result)
   652  
   653  			By("Verifying machinepool can scale out and in", func() {
   654  				AzureMachinePoolsSpec(ctx, func() AzureMachinePoolsSpecInput {
   655  					return AzureMachinePoolsSpecInput{
   656  						Cluster:               result.Cluster,
   657  						BootstrapClusterProxy: bootstrapClusterProxy,
   658  						Namespace:             namespace,
   659  						ClusterName:           clusterName,
   660  						WaitIntervals:         e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   661  					}
   662  				})
   663  			})
   664  
   665  			By("Verifying expected VM extensions are present on the node", func() {
   666  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   667  					return AzureVMExtensionsSpecInput{
   668  						BootstrapClusterProxy: bootstrapClusterProxy,
   669  						Namespace:             namespace,
   670  						ClusterName:           clusterName,
   671  					}
   672  				})
   673  			})
   674  
   675  			By("Creating an accessible load balancer", func() {
   676  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   677  					return AzureLBSpecInput{
   678  						BootstrapClusterProxy: bootstrapClusterProxy,
   679  						Namespace:             namespace,
   680  						ClusterName:           clusterName,
   681  						SkipCleanup:           skipCleanup,
   682  					}
   683  				})
   684  			})
   685  
   686  			By("PASSED!")
   687  		})
   688  	})
   689  
   690  	// You can override the default SKU `Standard_D2s_v3` by setting the
   691  	// `AZURE_AKS_NODE_MACHINE_TYPE` environment variable.
   692  	Context("Creating an AKS cluster for control plane tests [Managed Kubernetes]", func() {
   693  		It("with a single control plane node and 1 node", func() {
   694  			clusterName = getClusterName(clusterNamePrefix, aksClusterNameSuffix)
   695  			kubernetesVersionUpgradeFrom, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersionUpgradeFrom)
   696  			Byf("Upgrading from k8s version %s", kubernetesVersionUpgradeFrom)
   697  			Expect(err).NotTo(HaveOccurred())
   698  			kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion)
   699  			Byf("Upgrading to k8s version %s", kubernetesVersion)
   700  			Expect(err).NotTo(HaveOccurred())
   701  
   702  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   703  				specName,
   704  				withFlavor("aks"),
   705  				withAzureCNIv1Manifest(e2eConfig.GetVariable(AzureCNIv1Manifest)),
   706  				withNamespace(namespace.Name),
   707  				withClusterName(clusterName),
   708  				withKubernetesVersion(kubernetesVersionUpgradeFrom),
   709  				withControlPlaneMachineCount(1),
   710  				withWorkerMachineCount(1),
   711  				withMachineDeploymentInterval(specName, ""),
   712  				withMachinePoolInterval(specName, "wait-worker-nodes"),
   713  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   714  					WaitForControlPlaneInitialized:   WaitForAKSControlPlaneInitialized,
   715  					WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady,
   716  				}),
   717  			), result)
   718  
   719  			By("adding an AKS marketplace extension", func() {
   720  				AKSMarketplaceExtensionSpec(ctx, func() AKSMarketplaceExtensionSpecInput {
   721  					return AKSMarketplaceExtensionSpecInput{
   722  						Cluster:       result.Cluster,
   723  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   724  					}
   725  				})
   726  			})
   727  
   728  			By("attaching the cluster to azure fleet", func() {
   729  				AKSFleetsMemberSpec(ctx, func() AKSFleetsMemberInput {
   730  					return AKSFleetsMemberInput{
   731  						Cluster:       result.Cluster,
   732  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   733  					}
   734  				})
   735  			})
   736  
   737  			By("Upgrading the Kubernetes version of the cluster", func() {
   738  				AKSUpgradeSpec(ctx, func() AKSUpgradeSpecInput {
   739  					return AKSUpgradeSpecInput{
   740  						Cluster:                    result.Cluster,
   741  						MachinePools:               result.MachinePools,
   742  						KubernetesVersionUpgradeTo: kubernetesVersion,
   743  						WaitForControlPlane:        e2eConfig.GetIntervals(specName, "wait-machine-upgrade"),
   744  						WaitForMachinePools:        e2eConfig.GetIntervals(specName, "wait-machine-pool-upgrade"),
   745  					}
   746  				})
   747  			})
   748  
   749  			By("modifying the azure cluster-autoscaler settings", func() {
   750  				AKSAzureClusterAutoscalerSettingsSpec(ctx, func() AKSAzureClusterAutoscalerSettingsSpecInput {
   751  					return AKSAzureClusterAutoscalerSettingsSpecInput{
   752  						Cluster:       result.Cluster,
   753  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
   754  					}
   755  				})
   756  			})
   757  		})
   758  	})
   759  
   760  	Context("Creating an AKS cluster for node pool tests [Managed Kubernetes]", func() {
   761  		It("with a single control plane node and 1 node", func() {
   762  			clusterName = getClusterName(clusterNamePrefix, "pool")
   763  			kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion)
   764  			Expect(err).NotTo(HaveOccurred())
   765  
   766  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   767  				specName,
   768  				withFlavor("aks"),
   769  				withAzureCNIv1Manifest(e2eConfig.GetVariable(AzureCNIv1Manifest)),
   770  				withNamespace(namespace.Name),
   771  				withClusterName(clusterName),
   772  				withKubernetesVersion(kubernetesVersion),
   773  				withControlPlaneMachineCount(1),
   774  				withWorkerMachineCount(1),
   775  				withMachineDeploymentInterval(specName, ""),
   776  				withMachinePoolInterval(specName, "wait-worker-nodes"),
   777  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   778  					WaitForControlPlaneInitialized:   WaitForAKSControlPlaneInitialized,
   779  					WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady,
   780  				}),
   781  			), result)
   782  
   783  			By("Exercising machine pools", func() {
   784  				AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
   785  					return AKSMachinePoolSpecInput{
   786  						Cluster:       result.Cluster,
   787  						MachinePools:  result.MachinePools,
   788  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   789  					}
   790  				})
   791  			})
   792  
   793  			By("creating a machine pool with public IP addresses from a prefix", func() {
   794  				// This test is also currently serving as the canonical
   795  				// "create/delete node pool" test. Eventually, that should be
   796  				// made more distinct from this public IP prefix test.
   797  				AKSPublicIPPrefixSpec(ctx, func() AKSPublicIPPrefixSpecInput {
   798  					return AKSPublicIPPrefixSpecInput{
   799  						Cluster:           result.Cluster,
   800  						KubernetesVersion: kubernetesVersion,
   801  						WaitIntervals:     e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   802  					}
   803  				})
   804  			})
   805  
   806  			By("creating a machine pool with spot max price and scale down mode", func() {
   807  				AKSSpotSpec(ctx, func() AKSSpotSpecInput {
   808  					return AKSSpotSpecInput{
   809  						Cluster:           result.Cluster,
   810  						KubernetesVersion: kubernetesVersion,
   811  						WaitIntervals:     e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   812  					}
   813  				})
   814  			})
   815  
   816  			By("modifying nodepool autoscaling configuration", func() {
   817  				AKSAutoscaleSpec(ctx, func() AKSAutoscaleSpecInput {
   818  					return AKSAutoscaleSpecInput{
   819  						Cluster:       result.Cluster,
   820  						MachinePool:   result.MachinePools[0],
   821  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   822  					}
   823  				})
   824  			})
   825  
   826  			By("modifying additionalTags configuration", func() {
   827  				AKSAdditionalTagsSpec(ctx, func() AKSAdditionalTagsSpecInput {
   828  					return AKSAdditionalTagsSpecInput{
   829  						Cluster:       result.Cluster,
   830  						MachinePools:  result.MachinePools,
   831  						WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   832  					}
   833  				})
   834  			})
   835  
   836  			By("modifying node labels configuration", func() {
   837  				AKSNodeLabelsSpec(ctx, func() AKSNodeLabelsSpecInput {
   838  					return AKSNodeLabelsSpecInput{
   839  						Cluster:       result.Cluster,
   840  						MachinePools:  result.MachinePools,
   841  						WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   842  					}
   843  				})
   844  			})
   845  
   846  			By("modifying taints configuration", func() {
   847  				AKSNodeTaintsSpec(ctx, func() AKSNodeTaintsSpecInput {
   848  					return AKSNodeTaintsSpecInput{
   849  						Cluster:       result.Cluster,
   850  						MachinePools:  result.MachinePools,
   851  						WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   852  					}
   853  				})
   854  			})
   855  
   856  			By("creating a byo nodepool", func() {
   857  				AKSBYONodeSpec(ctx, func() AKSBYONodeSpecInput {
   858  					return AKSBYONodeSpecInput{
   859  						Cluster:             result.Cluster,
   860  						KubernetesVersion:   kubernetesVersion,
   861  						WaitIntervals:       e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   862  						ExpectedWorkerNodes: result.ExpectedWorkerNodes(),
   863  					}
   864  				})
   865  			})
   866  
   867  			By("modifying custom patches", func() {
   868  				AKSPatchSpec(ctx, func() AKSPatchSpecInput {
   869  					return AKSPatchSpecInput{
   870  						Cluster:       result.Cluster,
   871  						MachinePools:  result.MachinePools,
   872  						WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   873  					}
   874  				})
   875  			})
   876  		})
   877  	})
   878  
   879  	Context("Creating an AKS cluster using ClusterClass [Managed Kubernetes]", func() {
   880  		It("with a single control plane node and 1 node", func() {
   881  			// Use default as the clusterclass name so test infra can find the clusterclass template
   882  			os.Setenv("CLUSTER_CLASS_NAME", "default")
   883  
   884  			// Use "cc" as spec name because NAT gateway pip name exceeds limit.
   885  			clusterName = getClusterName(clusterNamePrefix, "cc")
   886  			kubernetesVersionUpgradeFrom, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersionUpgradeFrom)
   887  			Byf("Upgrading from k8s version %s", kubernetesVersionUpgradeFrom)
   888  			Expect(err).NotTo(HaveOccurred())
   889  			kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion)
   890  			Byf("Upgrading to k8s version %s", kubernetesVersion)
   891  			Expect(err).NotTo(HaveOccurred())
   892  
   893  			// Create a cluster using the cluster class created above
   894  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   895  				specName,
   896  				withFlavor("aks-clusterclass"),
   897  				withAzureCNIv1Manifest(e2eConfig.GetVariable(AzureCNIv1Manifest)),
   898  				withNamespace(namespace.Name),
   899  				withClusterName(clusterName),
   900  				withKubernetesVersion(kubernetesVersionUpgradeFrom),
   901  				withControlPlaneMachineCount(1),
   902  				withWorkerMachineCount(1),
   903  				withMachineDeploymentInterval(specName, ""),
   904  				withMachinePoolInterval(specName, "wait-machine-pool-nodes"),
   905  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   906  					WaitForControlPlaneInitialized:   WaitForAKSControlPlaneInitialized,
   907  					WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady,
   908  				}),
   909  			), result)
   910  
   911  			By("Performing ClusterClass operations on the cluster", func() {
   912  				AKSClusterClassSpec(ctx, func() AKSClusterClassInput {
   913  					return AKSClusterClassInput{
   914  						Cluster:                    result.Cluster,
   915  						MachinePool:                result.MachinePools[0],
   916  						WaitIntervals:              e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   917  						WaitUpgradeIntervals:       e2eConfig.GetIntervals(specName, "wait-machine-pool-upgrade"),
   918  						KubernetesVersionUpgradeTo: kubernetesVersion,
   919  					}
   920  				})
   921  			})
   922  		})
   923  	})
   924  
   925  	// ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`.
   926  	// This spec expects a user-assigned identity named "cloud-provider-user-identity" in a "capz-ci"
   927  	// resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables.
   928  	Context("Creating a dual-stack cluster [OPTIONAL]", func() {
   929  		It("With dual-stack worker node", func() {
   930  			By("using user-assigned identity")
   931  			clusterName = getClusterName(clusterNamePrefix, "dual-stack")
   932  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   933  				specName,
   934  				withClusterProxy(bootstrapClusterProxy),
   935  				withFlavor("dual-stack"),
   936  				withNamespace(namespace.Name),
   937  				withClusterName(clusterName),
   938  				withControlPlaneMachineCount(3),
   939  				withWorkerMachineCount(1),
   940  				withControlPlaneInterval(specName, "wait-control-plane-ha"),
   941  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   942  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   943  				}),
   944  				withPostMachinesProvisioned(func() {
   945  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   946  						return DaemonsetsSpecInput{
   947  							BootstrapClusterProxy: bootstrapClusterProxy,
   948  							Namespace:             namespace,
   949  							ClusterName:           clusterName,
   950  						}
   951  					})
   952  				}),
   953  			), result)
   954  
   955  			By("Verifying expected VM extensions are present on the node", func() {
   956  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   957  					return AzureVMExtensionsSpecInput{
   958  						BootstrapClusterProxy: bootstrapClusterProxy,
   959  						Namespace:             namespace,
   960  						ClusterName:           clusterName,
   961  					}
   962  				})
   963  			})
   964  
   965  			// dual-stack external IP for dual-stack clusters is not yet supported
   966  			// first ip family in ipFamilies is used for the primary clusterIP and cloud-provider
   967  			// determines the elb/ilb ip family based on the primary clusterIP
   968  			By("Creating an accessible ipv4 load balancer", func() {
   969  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   970  					return AzureLBSpecInput{
   971  						BootstrapClusterProxy: bootstrapClusterProxy,
   972  						Namespace:             namespace,
   973  						ClusterName:           clusterName,
   974  						SkipCleanup:           skipCleanup,
   975  						IPFamilies:            []corev1.IPFamily{corev1.IPv4Protocol},
   976  					}
   977  				})
   978  			})
   979  
   980  			By("Creating an accessible ipv6 load balancer", func() {
   981  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   982  					return AzureLBSpecInput{
   983  						BootstrapClusterProxy: bootstrapClusterProxy,
   984  						Namespace:             namespace,
   985  						ClusterName:           clusterName,
   986  						SkipCleanup:           skipCleanup,
   987  						IPFamilies:            []corev1.IPFamily{corev1.IPv6Protocol},
   988  					}
   989  				})
   990  			})
   991  
   992  			By("PASSED!")
   993  		})
   994  	})
   995  
   996  	Context("Creating clusters using clusterclass [OPTIONAL]", func() {
   997  		It("with a single control plane node, one linux worker node, and one windows worker node", func() {
   998  			// Use ci-default as the clusterclass name so test infra can find the clusterclass template
   999  			os.Setenv("CLUSTER_CLASS_NAME", "ci-default")
  1000  
  1001  			// Use "cc" as spec name because NAT gateway pip name exceeds limit.
  1002  			clusterName = getClusterName(clusterNamePrefix, "cc")
  1003  
  1004  			// Opt into using windows with prow template
  1005  			Expect(os.Setenv("WINDOWS_WORKER_MACHINE_COUNT", "1")).To(Succeed())
  1006  
  1007  			// Create a cluster using the cluster class created above
  1008  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
  1009  				specName,
  1010  				withFlavor("topology"),
  1011  				withNamespace(namespace.Name),
  1012  				withClusterName(clusterName),
  1013  				withControlPlaneMachineCount(1),
  1014  				withWorkerMachineCount(1),
  1015  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
  1016  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
  1017  				}),
  1018  				withPostMachinesProvisioned(func() {
  1019  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
  1020  						return DaemonsetsSpecInput{
  1021  							BootstrapClusterProxy: bootstrapClusterProxy,
  1022  							Namespace:             namespace,
  1023  							ClusterName:           clusterName,
  1024  						}
  1025  					})
  1026  				}),
  1027  			), result)
  1028  
  1029  			By("Verifying expected VM extensions are present on the node", func() {
  1030  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
  1031  					return AzureVMExtensionsSpecInput{
  1032  						BootstrapClusterProxy: bootstrapClusterProxy,
  1033  						Namespace:             namespace,
  1034  						ClusterName:           clusterName,
  1035  					}
  1036  				})
  1037  			})
  1038  
  1039  			By("PASSED!")
  1040  		})
  1041  	})
  1042  
  1043  	// ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`.
  1044  	// This spec expects a user-assigned identity named "cloud-provider-user-identity" in a "capz-ci"
  1045  	// resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables.
  1046  	// You can also override the default SKU `Standard_DS2_v2` and `Standard_DS4_v2` storage by setting
  1047  	// the `AZURE_EDGEZONE_CONTROL_PLANE_MACHINE_TYPE` and `AZURE_EDGEZONE_NODE_MACHINE_TYPE` environment variables.
  1048  	Context("Creating clusters on public MEC [OPTIONAL]", func() {
  1049  		It("with 1 control plane nodes and 1 worker node", func() {
  1050  			By("using user-assigned identity")
  1051  			clusterName = getClusterName(clusterNamePrefix, "edgezone")
  1052  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
  1053  				specName,
  1054  				withFlavor("edgezone"),
  1055  				withNamespace(namespace.Name),
  1056  				withClusterName(clusterName),
  1057  				withControlPlaneMachineCount(1),
  1058  				withWorkerMachineCount(1),
  1059  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
  1060  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
  1061  				}),
  1062  				withPostMachinesProvisioned(func() {
  1063  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
  1064  						return DaemonsetsSpecInput{
  1065  							BootstrapClusterProxy: bootstrapClusterProxy,
  1066  							Namespace:             namespace,
  1067  							ClusterName:           clusterName,
  1068  						}
  1069  					})
  1070  				}),
  1071  			), result)
  1072  
  1073  			By("Verifying extendedLocation property in Azure VMs is corresponding to extendedLocation property in edgezone yaml file", func() {
  1074  				AzureEdgeZoneClusterSpec(ctx, func() AzureEdgeZoneClusterSpecInput {
  1075  					return AzureEdgeZoneClusterSpecInput{
  1076  						BootstrapClusterProxy: bootstrapClusterProxy,
  1077  						Namespace:             namespace,
  1078  						ClusterName:           clusterName,
  1079  						E2EConfig:             e2eConfig,
  1080  					}
  1081  				})
  1082  			})
  1083  
  1084  			By("PASSED!")
  1085  		})
  1086  	})
  1087  
  1088  	// Workload identity test
  1089  	Context("Creating a cluster that uses workload identity [OPTIONAL]", func() {
  1090  		It("with a 1 control plane nodes and 2 worker nodes", func() {
  1091  			By("using workload-identity")
  1092  			clusterName = getClusterName(clusterNamePrefix, "azwi")
  1093  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
  1094  				specName,
  1095  				withFlavor("workload-identity"),
  1096  				withNamespace(namespace.Name),
  1097  				withClusterName(clusterName),
  1098  				withControlPlaneMachineCount(1),
  1099  				withWorkerMachineCount(2),
  1100  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
  1101  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
  1102  				}),
  1103  				withPostMachinesProvisioned(func() {
  1104  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
  1105  						return DaemonsetsSpecInput{
  1106  							BootstrapClusterProxy: bootstrapClusterProxy,
  1107  							Namespace:             namespace,
  1108  							ClusterName:           clusterName,
  1109  						}
  1110  					})
  1111  				}),
  1112  			), result)
  1113  
  1114  			By("Verifying expected VM extensions are present on the node", func() {
  1115  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
  1116  					return AzureVMExtensionsSpecInput{
  1117  						BootstrapClusterProxy: bootstrapClusterProxy,
  1118  						Namespace:             namespace,
  1119  						ClusterName:           clusterName,
  1120  					}
  1121  				})
  1122  			})
  1123  
  1124  			By("Creating an accessible load balancer", func() {
  1125  				AzureLBSpec(ctx, func() AzureLBSpecInput {
  1126  					return AzureLBSpecInput{
  1127  						BootstrapClusterProxy: bootstrapClusterProxy,
  1128  						Namespace:             namespace,
  1129  						ClusterName:           clusterName,
  1130  						SkipCleanup:           skipCleanup,
  1131  					}
  1132  				})
  1133  			})
  1134  
  1135  			By("Workload identity test PASSED!")
  1136  		})
  1137  	})
  1138  })