sigs.k8s.io/cluster-api-provider-azure@v1.17.0/test/e2e/azure_test.go (about)

     1  //go:build e2e
     2  // +build e2e
     3  
     4  /*
     5  Copyright 2020 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11      http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package e2e
    21  
    22  import (
    23  	"context"
    24  	"fmt"
    25  	"os"
    26  	"time"
    27  
    28  	"github.com/Azure/azure-service-operator/v2/pkg/common/config"
    29  	. "github.com/onsi/ginkgo/v2"
    30  	. "github.com/onsi/gomega"
    31  	corev1 "k8s.io/api/core/v1"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/types"
    34  	capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
    35  	"sigs.k8s.io/cluster-api/test/framework/clusterctl"
    36  	"sigs.k8s.io/cluster-api/util"
    37  	"sigs.k8s.io/controller-runtime/pkg/client"
    38  )
    39  
    40  var _ = Describe("Workload cluster creation", func() {
    41  	var (
    42  		ctx                    = context.TODO()
    43  		specName               = "create-workload-cluster"
    44  		namespace              *corev1.Namespace
    45  		cancelWatches          context.CancelFunc
    46  		result                 *clusterctl.ApplyClusterTemplateAndWaitResult
    47  		clusterName            string
    48  		clusterNamePrefix      string
    49  		additionalCleanup      func()
    50  		specTimes              = map[string]time.Time{}
    51  		skipResourceGroupCheck = false
    52  	)
    53  
    54  	BeforeEach(func() {
    55  		logCheckpoint(specTimes)
    56  
    57  		Expect(ctx).NotTo(BeNil(), "ctx is required for %s spec", specName)
    58  		Expect(e2eConfig).NotTo(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName)
    59  		Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName)
    60  		Expect(bootstrapClusterProxy).NotTo(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName)
    61  		Expect(os.MkdirAll(artifactFolder, 0o755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName)
    62  		Expect(e2eConfig.Variables).To(HaveKey(capi_e2e.KubernetesVersion))
    63  
    64  		// CLUSTER_NAME and CLUSTER_NAMESPACE allows for testing existing clusters
    65  		// if CLUSTER_NAMESPACE is set don't generate a new prefix otherwise
    66  		// the correct namespace won't be found and a new cluster will be created
    67  		clusterNameSpace := os.Getenv("CLUSTER_NAMESPACE")
    68  		if clusterNameSpace == "" {
    69  			clusterNamePrefix = fmt.Sprintf("capz-e2e-%s", util.RandomString(6))
    70  		} else {
    71  			clusterNamePrefix = clusterNameSpace
    72  		}
    73  
    74  		// Setup a Namespace where to host objects for this spec and create a watcher for the namespace events.
    75  		var err error
    76  		namespace, cancelWatches, err = setupSpecNamespace(ctx, clusterNamePrefix, bootstrapClusterProxy, artifactFolder)
    77  		Expect(err).NotTo(HaveOccurred())
    78  
    79  		result = new(clusterctl.ApplyClusterTemplateAndWaitResult)
    80  
    81  		asoSecretName := e2eConfig.GetVariable("ASO_CREDENTIAL_SECRET_NAME")
    82  		asoSecret := &corev1.Secret{
    83  			ObjectMeta: metav1.ObjectMeta{
    84  				Namespace: namespace.Name,
    85  				Name:      asoSecretName,
    86  			},
    87  			StringData: map[string]string{
    88  				config.AzureSubscriptionID: e2eConfig.GetVariable(AzureSubscriptionID),
    89  				config.AzureTenantID:       e2eConfig.GetVariable(AzureTenantID),
    90  				config.AzureClientID:       e2eConfig.GetVariable(AzureClientIDUserAssignedIdentity),
    91  				config.AuthMode:            e2eConfig.GetVariable("ASO_CREDENTIAL_SECRET_MODE"),
    92  			},
    93  		}
    94  		err = bootstrapClusterProxy.GetClient().Create(ctx, asoSecret)
    95  		Expect(client.IgnoreAlreadyExists(err)).NotTo(HaveOccurred())
    96  
    97  		identityName := e2eConfig.GetVariable(ClusterIdentityName)
    98  		Expect(os.Setenv(ClusterIdentityName, identityName)).To(Succeed())
    99  		Expect(os.Setenv(ClusterIdentityNamespace, defaultNamespace)).To(Succeed())
   100  		additionalCleanup = nil
   101  	})
   102  
   103  	AfterEach(func() {
   104  		if result.Cluster == nil {
   105  			// this means the cluster failed to come up. We make an attempt to find the cluster to be able to fetch logs for the failed bootstrapping.
   106  			_ = bootstrapClusterProxy.GetClient().Get(ctx, types.NamespacedName{Name: clusterName, Namespace: namespace.Name}, result.Cluster)
   107  		}
   108  
   109  		CheckTestBeforeCleanup()
   110  
   111  		cleanInput := cleanupInput{
   112  			SpecName:               specName,
   113  			Cluster:                result.Cluster,
   114  			ClusterProxy:           bootstrapClusterProxy,
   115  			Namespace:              namespace,
   116  			CancelWatches:          cancelWatches,
   117  			IntervalsGetter:        e2eConfig.GetIntervals,
   118  			SkipCleanup:            skipCleanup,
   119  			SkipLogCollection:      skipLogCollection,
   120  			AdditionalCleanup:      additionalCleanup,
   121  			ArtifactFolder:         artifactFolder,
   122  			SkipResourceGroupCheck: skipResourceGroupCheck,
   123  		}
   124  		dumpSpecResourcesAndCleanup(ctx, cleanInput)
   125  		Expect(os.Unsetenv(AzureResourceGroup)).To(Succeed())
   126  		Expect(os.Unsetenv(AzureCustomVnetResourceGroup)).To(Succeed())
   127  		Expect(os.Unsetenv(AzureVNetName)).To(Succeed())
   128  		Expect(os.Unsetenv(ClusterIdentityName)).To(Succeed())
   129  		Expect(os.Unsetenv(ClusterIdentityNamespace)).To(Succeed())
   130  
   131  		Expect(os.Unsetenv("WINDOWS_WORKER_MACHINE_COUNT")).To(Succeed())
   132  		Expect(os.Unsetenv("K8S_FEATURE_GATES")).To(Succeed())
   133  
   134  		logCheckpoint(specTimes)
   135  	})
   136  
   137  	if os.Getenv("USE_LOCAL_KIND_REGISTRY") != "true" {
   138  		// This spec expects a user-assigned identity with Contributor role assignment named "cloud-provider-user-identity" in a "capz-ci"
   139  		// resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables.
   140  		Context("Creating a private cluster [OPTIONAL]", func() {
   141  			It("Creates a public management cluster in a custom vnet", func() {
   142  				clusterName = getClusterName(clusterNamePrefix, "public-custom-vnet")
   143  				By("Creating a custom virtual network", func() {
   144  					Expect(os.Setenv(AzureCustomVNetName, "custom-vnet")).To(Succeed())
   145  					Expect(os.Setenv(AzureCustomVnetResourceGroup, clusterName+"-vnetrg")).To(Succeed())
   146  					additionalCleanup = SetupExistingVNet(ctx,
   147  						"10.0.0.0/16",
   148  						map[string]string{fmt.Sprintf("%s-controlplane-subnet", os.Getenv(AzureCustomVNetName)): "10.0.0.0/24"},
   149  						map[string]string{fmt.Sprintf("%s-node-subnet", os.Getenv(AzureCustomVNetName)): "10.0.1.0/24"},
   150  						fmt.Sprintf("%s-azure-bastion-subnet", os.Getenv(AzureCustomVNetName)),
   151  						"10.0.2.0/24",
   152  					)
   153  				})
   154  
   155  				clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   156  					specName,
   157  					withFlavor("custom-vnet"),
   158  					withNamespace(namespace.Name),
   159  					withClusterName(clusterName),
   160  					withControlPlaneMachineCount(1),
   161  					withWorkerMachineCount(1),
   162  					withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   163  						WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   164  					}),
   165  					withPostMachinesProvisioned(func() {
   166  						EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   167  							return DaemonsetsSpecInput{
   168  								BootstrapClusterProxy: bootstrapClusterProxy,
   169  								Namespace:             namespace,
   170  								ClusterName:           clusterName,
   171  							}
   172  						})
   173  					}),
   174  				), result)
   175  
   176  				By("Creating a private cluster from the management cluster", func() {
   177  					AzurePrivateClusterSpec(ctx, func() AzurePrivateClusterSpecInput {
   178  						return AzurePrivateClusterSpecInput{
   179  							BootstrapClusterProxy: bootstrapClusterProxy,
   180  							Namespace:             namespace,
   181  							ClusterName:           clusterName,
   182  							ClusterctlConfigPath:  clusterctlConfigPath,
   183  							E2EConfig:             e2eConfig,
   184  							ArtifactFolder:        artifactFolder,
   185  							SkipCleanup:           skipCleanup,
   186  							CancelWatches:         cancelWatches,
   187  						}
   188  					})
   189  				})
   190  
   191  				By("PASSED!")
   192  			})
   193  		})
   194  	} else {
   195  		fmt.Fprintf(GinkgoWriter, "INFO: skipping test requires pushing container images to external repository")
   196  	}
   197  
   198  	Context("Creating a highly available cluster [REQUIRED]", func() {
   199  		It("With 3 control-plane nodes and 2 Linux and 2 Windows worker nodes", func() {
   200  			clusterName = getClusterName(clusterNamePrefix, "ha")
   201  
   202  			// Opt into using windows with prow template
   203  			Expect(os.Setenv("WINDOWS_WORKER_MACHINE_COUNT", "2")).To(Succeed())
   204  
   205  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   206  				specName,
   207  				withNamespace(namespace.Name),
   208  				withClusterName(clusterName),
   209  				withControlPlaneMachineCount(3),
   210  				withWorkerMachineCount(2),
   211  				withControlPlaneInterval(specName, "wait-control-plane-ha"),
   212  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   213  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   214  				}),
   215  				withPostMachinesProvisioned(func() {
   216  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   217  						return DaemonsetsSpecInput{
   218  							BootstrapClusterProxy: bootstrapClusterProxy,
   219  							Namespace:             namespace,
   220  							ClusterName:           clusterName,
   221  						}
   222  					})
   223  				}),
   224  			), result)
   225  
   226  			By("Verifying expected VM extensions are present on the node", func() {
   227  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   228  					return AzureVMExtensionsSpecInput{
   229  						BootstrapClusterProxy: bootstrapClusterProxy,
   230  						Namespace:             namespace,
   231  						ClusterName:           clusterName,
   232  					}
   233  				})
   234  			})
   235  
   236  			By("Verifying security rules are deleted on azure side", func() {
   237  				AzureSecurityGroupsSpec(ctx, func() AzureSecurityGroupsSpecInput {
   238  					return AzureSecurityGroupsSpecInput{
   239  						BootstrapClusterProxy: bootstrapClusterProxy,
   240  						Namespace:             namespace,
   241  						ClusterName:           clusterName,
   242  						Cluster:               result.Cluster,
   243  						WaitForUpdate:         e2eConfig.GetIntervals(specName, "wait-nsg-update"),
   244  					}
   245  				})
   246  			})
   247  
   248  			By("Validating failure domains", func() {
   249  				AzureFailureDomainsSpec(ctx, func() AzureFailureDomainsSpecInput {
   250  					return AzureFailureDomainsSpecInput{
   251  						BootstrapClusterProxy: bootstrapClusterProxy,
   252  						Cluster:               result.Cluster,
   253  						Namespace:             namespace,
   254  						ClusterName:           clusterName,
   255  					}
   256  				})
   257  			})
   258  
   259  			By("Creating an accessible load balancer", func() {
   260  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   261  					return AzureLBSpecInput{
   262  						BootstrapClusterProxy: bootstrapClusterProxy,
   263  						Namespace:             namespace,
   264  						ClusterName:           clusterName,
   265  						SkipCleanup:           skipCleanup,
   266  					}
   267  				})
   268  			})
   269  
   270  			By("Validating network policies", func() {
   271  				AzureNetPolSpec(ctx, func() AzureNetPolSpecInput {
   272  					return AzureNetPolSpecInput{
   273  						BootstrapClusterProxy: bootstrapClusterProxy,
   274  						Namespace:             namespace,
   275  						ClusterName:           clusterName,
   276  						SkipCleanup:           skipCleanup,
   277  					}
   278  				})
   279  			})
   280  
   281  			By("Creating an accessible load balancer for windows", func() {
   282  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   283  					return AzureLBSpecInput{
   284  						BootstrapClusterProxy: bootstrapClusterProxy,
   285  						Namespace:             namespace,
   286  						ClusterName:           clusterName,
   287  						SkipCleanup:           skipCleanup,
   288  						Windows:               true,
   289  					}
   290  				})
   291  			})
   292  
   293  			By("PASSED!")
   294  		})
   295  	})
   296  
   297  	When("Creating a highly available cluster with Azure CNI v1 [REQUIRED]", Label("Azure CNI v1"), func() {
   298  		It("can create 3 control-plane nodes and 2 Linux worker nodes", func() {
   299  			clusterName = getClusterName(clusterNamePrefix, "azcni-v1")
   300  
   301  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   302  				specName,
   303  				withAzureCNIv1Manifest(e2eConfig.GetVariable(AzureCNIv1Manifest)), // AzureCNIManifest is set
   304  				withFlavor("azure-cni-v1"),
   305  				withNamespace(namespace.Name),
   306  				withClusterName(clusterName),
   307  				withControlPlaneMachineCount(3),
   308  				withWorkerMachineCount(2),
   309  				withControlPlaneInterval(specName, "wait-control-plane-ha"),
   310  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   311  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   312  				}),
   313  				withPostMachinesProvisioned(func() {
   314  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   315  						return DaemonsetsSpecInput{
   316  							BootstrapClusterProxy: bootstrapClusterProxy,
   317  							Namespace:             namespace,
   318  							ClusterName:           clusterName,
   319  						}
   320  					})
   321  				}),
   322  			), result)
   323  
   324  			By("can expect VM extensions are present on the node", func() {
   325  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   326  					return AzureVMExtensionsSpecInput{
   327  						BootstrapClusterProxy: bootstrapClusterProxy,
   328  						Namespace:             namespace,
   329  						ClusterName:           clusterName,
   330  					}
   331  				})
   332  			})
   333  
   334  			By("can validate failure domains", func() {
   335  				AzureFailureDomainsSpec(ctx, func() AzureFailureDomainsSpecInput {
   336  					return AzureFailureDomainsSpecInput{
   337  						BootstrapClusterProxy: bootstrapClusterProxy,
   338  						Cluster:               result.Cluster,
   339  						Namespace:             namespace,
   340  						ClusterName:           clusterName,
   341  					}
   342  				})
   343  			})
   344  
   345  			By("can create an accessible load balancer", func() {
   346  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   347  					return AzureLBSpecInput{
   348  						BootstrapClusterProxy: bootstrapClusterProxy,
   349  						Namespace:             namespace,
   350  						ClusterName:           clusterName,
   351  						SkipCleanup:           skipCleanup,
   352  					}
   353  				})
   354  			})
   355  		})
   356  	})
   357  
   358  	Context("Creating a Flatcar cluster [OPTIONAL]", func() {
   359  		It("With Flatcar control-plane and worker nodes", func() {
   360  			clusterName = getClusterName(clusterNamePrefix, "flatcar")
   361  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   362  				specName,
   363  				withFlavor("flatcar"),
   364  				withNamespace(namespace.Name),
   365  				withClusterName(clusterName),
   366  				withKubernetesVersion(e2eConfig.GetVariable(FlatcarKubernetesVersion)),
   367  				withControlPlaneMachineCount(1),
   368  				withWorkerMachineCount(1),
   369  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   370  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   371  				}),
   372  				withPostMachinesProvisioned(func() {
   373  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   374  						return DaemonsetsSpecInput{
   375  							BootstrapClusterProxy: bootstrapClusterProxy,
   376  							Namespace:             namespace,
   377  							ClusterName:           clusterName,
   378  						}
   379  					})
   380  				}),
   381  			), result)
   382  
   383  			By("can create and access a load balancer", func() {
   384  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   385  					return AzureLBSpecInput{
   386  						BootstrapClusterProxy: bootstrapClusterProxy,
   387  						Namespace:             namespace,
   388  						ClusterName:           clusterName,
   389  						SkipCleanup:           skipCleanup,
   390  					}
   391  				})
   392  			})
   393  		})
   394  	})
   395  
   396  	Context("Creating a cluster with spot vms [OPTIONAL]", func() {
   397  		It("With spot vm machine deployments", func() {
   398  			clusterName = getClusterName(clusterNamePrefix, "spot")
   399  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   400  				specName,
   401  				withFlavor("spot"),
   402  				withNamespace(namespace.Name),
   403  				withClusterName(clusterName),
   404  				withControlPlaneMachineCount(1),
   405  				withWorkerMachineCount(1),
   406  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   407  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   408  				}),
   409  				withPostMachinesProvisioned(func() {
   410  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   411  						return DaemonsetsSpecInput{
   412  							BootstrapClusterProxy: bootstrapClusterProxy,
   413  							Namespace:             namespace,
   414  							ClusterName:           clusterName,
   415  						}
   416  					})
   417  				}),
   418  			), result)
   419  
   420  			By("can create and access a load balancer", func() {
   421  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   422  					return AzureLBSpecInput{
   423  						BootstrapClusterProxy: bootstrapClusterProxy,
   424  						Namespace:             namespace,
   425  						ClusterName:           clusterName,
   426  						SkipCleanup:           skipCleanup,
   427  					}
   428  				})
   429  			})
   430  		})
   431  	})
   432  
   433  	Context("Creating a ipv6 control-plane cluster [REQUIRED]", func() {
   434  		It("With ipv6 worker node", func() {
   435  			clusterName = getClusterName(clusterNamePrefix, "ipv6")
   436  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   437  				specName,
   438  				withFlavor("ipv6"),
   439  				withNamespace(namespace.Name),
   440  				withClusterName(clusterName),
   441  				withControlPlaneMachineCount(3),
   442  				withWorkerMachineCount(1),
   443  				withControlPlaneInterval(specName, "wait-control-plane-ha"),
   444  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   445  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   446  				}),
   447  				withPostMachinesProvisioned(func() {
   448  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   449  						return DaemonsetsSpecInput{
   450  							BootstrapClusterProxy: bootstrapClusterProxy,
   451  							Namespace:             namespace,
   452  							ClusterName:           clusterName,
   453  						}
   454  					})
   455  				}),
   456  			), result)
   457  
   458  			By("Verifying expected VM extensions are present on the node", func() {
   459  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   460  					return AzureVMExtensionsSpecInput{
   461  						BootstrapClusterProxy: bootstrapClusterProxy,
   462  						Namespace:             namespace,
   463  						ClusterName:           clusterName,
   464  					}
   465  				})
   466  			})
   467  
   468  			By("Creating an accessible ipv6 load balancer", func() {
   469  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   470  					return AzureLBSpecInput{
   471  						BootstrapClusterProxy: bootstrapClusterProxy,
   472  						Namespace:             namespace,
   473  						ClusterName:           clusterName,
   474  						SkipCleanup:           skipCleanup,
   475  						// Setting IPFamily to ipv6 is not required for single-stack IPv6 clusters. The clusterIP
   476  						// will be automatically assigned IPv6 address. However, setting this config so that
   477  						// we can use the same test code for both single-stack and dual-stack IPv6 clusters.
   478  						IPFamilies: []corev1.IPFamily{corev1.IPv6Protocol},
   479  					}
   480  				})
   481  			})
   482  
   483  			By("PASSED!")
   484  		})
   485  	})
   486  
   487  	Context("Creating a VMSS cluster [REQUIRED]", func() {
   488  		It("with a single control plane node and an AzureMachinePool with 2 Linux and 2 Windows worker nodes", func() {
   489  			clusterName = getClusterName(clusterNamePrefix, "vmss")
   490  
   491  			// Opt into using windows with prow template
   492  			Expect(os.Setenv("WINDOWS_WORKER_MACHINE_COUNT", "2")).To(Succeed())
   493  
   494  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   495  				specName,
   496  				withFlavor("machine-pool"),
   497  				withNamespace(namespace.Name),
   498  				withClusterName(clusterName),
   499  				withControlPlaneMachineCount(1),
   500  				withWorkerMachineCount(2),
   501  				withMachineDeploymentInterval(specName, ""),
   502  				withControlPlaneInterval(specName, "wait-control-plane"),
   503  				withMachinePoolInterval(specName, "wait-machine-pool-nodes"),
   504  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   505  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   506  				}),
   507  				withPostMachinesProvisioned(func() {
   508  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   509  						return DaemonsetsSpecInput{
   510  							BootstrapClusterProxy: bootstrapClusterProxy,
   511  							Namespace:             namespace,
   512  							ClusterName:           clusterName,
   513  						}
   514  					})
   515  				}),
   516  			), result)
   517  
   518  			By("Verifying expected VM extensions are present on the node", func() {
   519  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   520  					return AzureVMExtensionsSpecInput{
   521  						BootstrapClusterProxy: bootstrapClusterProxy,
   522  						Namespace:             namespace,
   523  						ClusterName:           clusterName,
   524  					}
   525  				})
   526  			})
   527  
   528  			By("Creating an accessible load balancer", func() {
   529  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   530  					return AzureLBSpecInput{
   531  						BootstrapClusterProxy: bootstrapClusterProxy,
   532  						Namespace:             namespace,
   533  						ClusterName:           clusterName,
   534  						SkipCleanup:           skipCleanup,
   535  					}
   536  				})
   537  			})
   538  
   539  			By("Creating an accessible load balancer for windows", func() {
   540  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   541  					return AzureLBSpecInput{
   542  						BootstrapClusterProxy: bootstrapClusterProxy,
   543  						Namespace:             namespace,
   544  						ClusterName:           clusterName,
   545  						SkipCleanup:           skipCleanup,
   546  						Windows:               true,
   547  					}
   548  				})
   549  			})
   550  
   551  			By("PASSED!")
   552  		})
   553  	})
   554  
   555  	// ci-e2e.sh and Prow CI skip this test by default, since N-series GPUs are relatively expensive
   556  	// and may require specific quota limits on the subscription.
   557  	// To include this test, set `GINKGO_SKIP=""`.
   558  	// You can override the default SKU `Standard_NV12s_v3` and `Premium_LRS` storage by setting
   559  	// the `AZURE_GPU_NODE_MACHINE_TYPE` and `AZURE_GPU_NODE_STORAGE_TYPE` environment variables.
   560  	// See https://azure.microsoft.com/en-us/pricing/details/virtual-machines/linux/ for pricing.
   561  	Context("Creating a GPU-enabled cluster [OPTIONAL]", func() {
   562  		It("with a single control plane node and 1 node", func() {
   563  			Skip("Skipping since the e2e subscription has no quota for GPU SKUs")
   564  			clusterName = getClusterName(clusterNamePrefix, "gpu")
   565  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   566  				specName,
   567  				withFlavor("nvidia-gpu"),
   568  				withNamespace(namespace.Name),
   569  				withClusterName(clusterName),
   570  				withControlPlaneMachineCount(1),
   571  				withWorkerMachineCount(1),
   572  				withMachineDeploymentInterval(specName, "wait-gpu-nodes"),
   573  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   574  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   575  				}),
   576  				withPostMachinesProvisioned(func() {
   577  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   578  						return DaemonsetsSpecInput{
   579  							BootstrapClusterProxy: bootstrapClusterProxy,
   580  							Namespace:             namespace,
   581  							ClusterName:           clusterName,
   582  						}
   583  					})
   584  					InstallGPUOperator(ctx, func() GPUOperatorSpecInput {
   585  						return GPUOperatorSpecInput{
   586  							BootstrapClusterProxy: bootstrapClusterProxy,
   587  							Namespace:             namespace,
   588  							ClusterName:           clusterName,
   589  						}
   590  					})
   591  				}),
   592  			), result)
   593  
   594  			By("Verifying expected VM extensions are present on the node", func() {
   595  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   596  					return AzureVMExtensionsSpecInput{
   597  						BootstrapClusterProxy: bootstrapClusterProxy,
   598  						Namespace:             namespace,
   599  						ClusterName:           clusterName,
   600  					}
   601  				})
   602  			})
   603  
   604  			By("Running a GPU-based calculation", func() {
   605  				AzureGPUSpec(ctx, func() AzureGPUSpecInput {
   606  					return AzureGPUSpecInput{
   607  						BootstrapClusterProxy: bootstrapClusterProxy,
   608  						Namespace:             namespace,
   609  						ClusterName:           clusterName,
   610  						SkipCleanup:           skipCleanup,
   611  					}
   612  				})
   613  			})
   614  
   615  			By("PASSED!")
   616  		})
   617  	})
   618  
   619  	// ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`.
   620  	Context("Creating a cluster with VMSS flex machinepools [OPTIONAL]", func() {
   621  		It("with 1 control plane node and 1 machinepool", func() {
   622  			clusterName = getClusterName(clusterNamePrefix, "flex")
   623  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   624  				specName,
   625  				withFlavor("machine-pool-flex"),
   626  				withNamespace(namespace.Name),
   627  				withClusterName(clusterName),
   628  				withControlPlaneMachineCount(1),
   629  				withWorkerMachineCount(1),
   630  				withMachineDeploymentInterval(specName, ""),
   631  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   632  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   633  				}),
   634  				withMachinePoolInterval(specName, "wait-machine-pool-nodes"),
   635  				withControlPlaneInterval(specName, "wait-control-plane"),
   636  			), result)
   637  
   638  			By("Verifying machinepool can scale out and in", func() {
   639  				AzureMachinePoolsSpec(ctx, func() AzureMachinePoolsSpecInput {
   640  					return AzureMachinePoolsSpecInput{
   641  						Cluster:               result.Cluster,
   642  						BootstrapClusterProxy: bootstrapClusterProxy,
   643  						Namespace:             namespace,
   644  						ClusterName:           clusterName,
   645  						WaitIntervals:         e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   646  					}
   647  				})
   648  			})
   649  
   650  			By("Verifying expected VM extensions are present on the node", func() {
   651  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   652  					return AzureVMExtensionsSpecInput{
   653  						BootstrapClusterProxy: bootstrapClusterProxy,
   654  						Namespace:             namespace,
   655  						ClusterName:           clusterName,
   656  					}
   657  				})
   658  			})
   659  
   660  			By("Creating an accessible load balancer", func() {
   661  				AzureLBSpec(ctx, func() AzureLBSpecInput {
   662  					return AzureLBSpecInput{
   663  						BootstrapClusterProxy: bootstrapClusterProxy,
   664  						Namespace:             namespace,
   665  						ClusterName:           clusterName,
   666  						SkipCleanup:           skipCleanup,
   667  					}
   668  				})
   669  			})
   670  
   671  			By("PASSED!")
   672  		})
   673  	})
   674  
   675  	// You can override the default SKU `Standard_D2s_v3` by setting the
   676  	// `AZURE_AKS_NODE_MACHINE_TYPE` environment variable.
   677  	Context("Creating an AKS cluster for control plane tests [Managed Kubernetes]", func() {
   678  		It("with a single control plane node and 1 node", func() {
   679  			clusterName = getClusterName(clusterNamePrefix, aksClusterNameSuffix)
   680  			kubernetesVersionUpgradeFrom, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersionUpgradeFrom)
   681  			Byf("Upgrading from k8s version %s", kubernetesVersionUpgradeFrom)
   682  			Expect(err).NotTo(HaveOccurred())
   683  			kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion)
   684  			Byf("Upgrading to k8s version %s", kubernetesVersion)
   685  			Expect(err).NotTo(HaveOccurred())
   686  
   687  			clusterTemplate := createApplyClusterTemplateInput(
   688  				specName,
   689  				withFlavor("aks"),
   690  				withAzureCNIv1Manifest(e2eConfig.GetVariable(AzureCNIv1Manifest)),
   691  				withNamespace(namespace.Name),
   692  				withClusterName(clusterName),
   693  				withKubernetesVersion(kubernetesVersionUpgradeFrom),
   694  				withControlPlaneMachineCount(1),
   695  				withWorkerMachineCount(1),
   696  				withMachineDeploymentInterval(specName, ""),
   697  				withMachinePoolInterval(specName, "wait-worker-nodes"),
   698  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   699  					WaitForControlPlaneInitialized:   WaitForAKSControlPlaneInitialized,
   700  					WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady,
   701  				}),
   702  			)
   703  
   704  			clusterctl.ApplyClusterTemplateAndWait(ctx, clusterTemplate, result)
   705  
   706  			// This test should be first to make sure that the template re-applied here matches the current
   707  			// state of the cluster exactly.
   708  			By("orphaning and adopting the cluster", func() {
   709  				AKSAdoptSpec(ctx, func() AKSAdoptSpecInput {
   710  					return AKSAdoptSpecInput{
   711  						ApplyInput:   clusterTemplate,
   712  						ApplyResult:  result,
   713  						Cluster:      result.Cluster,
   714  						MachinePools: result.MachinePools,
   715  					}
   716  				})
   717  			})
   718  
   719  			By("adding an AKS marketplace extension", func() {
   720  				AKSMarketplaceExtensionSpec(ctx, func() AKSMarketplaceExtensionSpecInput {
   721  					return AKSMarketplaceExtensionSpecInput{
   722  						Cluster:       result.Cluster,
   723  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   724  					}
   725  				})
   726  			})
   727  
   728  			By("attaching the cluster to azure fleet", func() {
   729  				AKSFleetsMemberSpec(ctx, func() AKSFleetsMemberInput {
   730  					return AKSFleetsMemberInput{
   731  						Cluster:       result.Cluster,
   732  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   733  					}
   734  				})
   735  			})
   736  
   737  			By("Upgrading the Kubernetes version of the cluster", func() {
   738  				AKSUpgradeSpec(ctx, func() AKSUpgradeSpecInput {
   739  					return AKSUpgradeSpecInput{
   740  						Cluster:                    result.Cluster,
   741  						MachinePools:               result.MachinePools,
   742  						KubernetesVersionUpgradeTo: kubernetesVersion,
   743  						WaitForControlPlane:        e2eConfig.GetIntervals(specName, "wait-machine-upgrade"),
   744  						WaitForMachinePools:        e2eConfig.GetIntervals(specName, "wait-machine-pool-upgrade"),
   745  					}
   746  				})
   747  			})
   748  
   749  			By("modifying the azure cluster-autoscaler settings", func() {
   750  				AKSAzureClusterAutoscalerSettingsSpec(ctx, func() AKSAzureClusterAutoscalerSettingsSpecInput {
   751  					return AKSAzureClusterAutoscalerSettingsSpecInput{
   752  						Cluster:       result.Cluster,
   753  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
   754  					}
   755  				})
   756  			})
   757  		})
   758  	})
   759  
   760  	Context("Creating an AKS cluster for node pool tests [Managed Kubernetes]", func() {
   761  		It("with a single control plane node and 1 node", func() {
   762  			clusterName = getClusterName(clusterNamePrefix, "pool")
   763  			kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion)
   764  			Expect(err).NotTo(HaveOccurred())
   765  
   766  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   767  				specName,
   768  				withFlavor("aks"),
   769  				withAzureCNIv1Manifest(e2eConfig.GetVariable(AzureCNIv1Manifest)),
   770  				withNamespace(namespace.Name),
   771  				withClusterName(clusterName),
   772  				withKubernetesVersion(kubernetesVersion),
   773  				withControlPlaneMachineCount(1),
   774  				withWorkerMachineCount(1),
   775  				withMachineDeploymentInterval(specName, ""),
   776  				withMachinePoolInterval(specName, "wait-worker-nodes"),
   777  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   778  					WaitForControlPlaneInitialized:   WaitForAKSControlPlaneInitialized,
   779  					WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady,
   780  				}),
   781  			), result)
   782  
   783  			By("Exercising machine pools", func() {
   784  				AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
   785  					return AKSMachinePoolSpecInput{
   786  						Cluster:       result.Cluster,
   787  						MachinePools:  result.MachinePools,
   788  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   789  					}
   790  				})
   791  			})
   792  
   793  			By("creating a machine pool with public IP addresses from a prefix", func() {
   794  				// This test is also currently serving as the canonical
   795  				// "create/delete node pool" test. Eventually, that should be
   796  				// made more distinct from this public IP prefix test.
   797  				AKSPublicIPPrefixSpec(ctx, func() AKSPublicIPPrefixSpecInput {
   798  					return AKSPublicIPPrefixSpecInput{
   799  						Cluster:           result.Cluster,
   800  						KubernetesVersion: kubernetesVersion,
   801  						WaitIntervals:     e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   802  					}
   803  				})
   804  			})
   805  
   806  			By("creating a machine pool with spot max price and scale down mode", func() {
   807  				AKSSpotSpec(ctx, func() AKSSpotSpecInput {
   808  					return AKSSpotSpecInput{
   809  						Cluster:           result.Cluster,
   810  						KubernetesVersion: kubernetesVersion,
   811  						WaitIntervals:     e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   812  					}
   813  				})
   814  			})
   815  
   816  			By("modifying nodepool autoscaling configuration", func() {
   817  				AKSAutoscaleSpec(ctx, func() AKSAutoscaleSpecInput {
   818  					return AKSAutoscaleSpecInput{
   819  						Cluster:       result.Cluster,
   820  						MachinePool:   result.MachinePools[0],
   821  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   822  					}
   823  				})
   824  			})
   825  
   826  			By("modifying additionalTags configuration", func() {
   827  				AKSAdditionalTagsSpec(ctx, func() AKSAdditionalTagsSpecInput {
   828  					return AKSAdditionalTagsSpecInput{
   829  						Cluster:       result.Cluster,
   830  						MachinePools:  result.MachinePools,
   831  						WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   832  					}
   833  				})
   834  			})
   835  
   836  			By("modifying node labels configuration", func() {
   837  				AKSNodeLabelsSpec(ctx, func() AKSNodeLabelsSpecInput {
   838  					return AKSNodeLabelsSpecInput{
   839  						Cluster:       result.Cluster,
   840  						MachinePools:  result.MachinePools,
   841  						WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   842  					}
   843  				})
   844  			})
   845  
   846  			By("modifying taints configuration", func() {
   847  				AKSNodeTaintsSpec(ctx, func() AKSNodeTaintsSpecInput {
   848  					return AKSNodeTaintsSpecInput{
   849  						Cluster:       result.Cluster,
   850  						MachinePools:  result.MachinePools,
   851  						WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   852  					}
   853  				})
   854  			})
   855  
   856  			By("creating a byo nodepool", func() {
   857  				AKSBYONodeSpec(ctx, func() AKSBYONodeSpecInput {
   858  					return AKSBYONodeSpecInput{
   859  						Cluster:             result.Cluster,
   860  						KubernetesVersion:   kubernetesVersion,
   861  						WaitIntervals:       e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   862  						ExpectedWorkerNodes: result.ExpectedWorkerNodes(),
   863  					}
   864  				})
   865  			})
   866  
   867  			By("modifying custom patches", func() {
   868  				AKSPatchSpec(ctx, func() AKSPatchSpecInput {
   869  					return AKSPatchSpecInput{
   870  						Cluster:       result.Cluster,
   871  						MachinePools:  result.MachinePools,
   872  						WaitForUpdate: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   873  					}
   874  				})
   875  			})
   876  		})
   877  	})
   878  
   879  	Context("Creating an AKS cluster using ClusterClass [Managed Kubernetes]", func() {
   880  		It("with a single control plane node and 1 node", func() {
   881  			// Use default as the clusterclass name so test infra can find the clusterclass template
   882  			os.Setenv("CLUSTER_CLASS_NAME", "default")
   883  
   884  			// Use "cc" as spec name because NAT gateway pip name exceeds limit.
   885  			clusterName = getClusterName(clusterNamePrefix, "cc")
   886  			kubernetesVersionUpgradeFrom, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersionUpgradeFrom)
   887  			Byf("Upgrading from k8s version %s", kubernetesVersionUpgradeFrom)
   888  			Expect(err).NotTo(HaveOccurred())
   889  			kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion)
   890  			Byf("Upgrading to k8s version %s", kubernetesVersion)
   891  			Expect(err).NotTo(HaveOccurred())
   892  
   893  			// Create a cluster using the cluster class created above
   894  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   895  				specName,
   896  				withFlavor("aks-topology"),
   897  				withAzureCNIv1Manifest(e2eConfig.GetVariable(AzureCNIv1Manifest)),
   898  				withNamespace(namespace.Name),
   899  				withClusterName(clusterName),
   900  				withKubernetesVersion(kubernetesVersionUpgradeFrom),
   901  				withControlPlaneMachineCount(1),
   902  				withWorkerMachineCount(1),
   903  				withMachineDeploymentInterval(specName, ""),
   904  				withMachinePoolInterval(specName, "wait-machine-pool-nodes"),
   905  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   906  					WaitForControlPlaneInitialized:   WaitForAKSControlPlaneInitialized,
   907  					WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady,
   908  				}),
   909  			), result)
   910  
   911  			By("Performing ClusterClass operations on the cluster", func() {
   912  				AKSClusterClassSpec(ctx, func() AKSClusterClassInput {
   913  					return AKSClusterClassInput{
   914  						Cluster:                    result.Cluster,
   915  						MachinePool:                result.MachinePools[0],
   916  						WaitIntervals:              e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   917  						WaitUpgradeIntervals:       e2eConfig.GetIntervals(specName, "wait-machine-pool-upgrade"),
   918  						KubernetesVersionUpgradeTo: kubernetesVersion,
   919  					}
   920  				})
   921  			})
   922  		})
   923  	})
   924  
   925  	Context("Creating an AKS cluster with the ASO API [Managed Kubernetes]", func() {
   926  		It("with a single control plane node and 1 node", func() {
   927  			clusterName = getClusterName(clusterNamePrefix, "asoapi")
   928  			kubernetesVersion, err := GetAKSKubernetesVersion(ctx, e2eConfig, AKSKubernetesVersion)
   929  			Expect(err).NotTo(HaveOccurred())
   930  
   931  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   932  				specName,
   933  				withFlavor("aks-aso"),
   934  				withNamespace(namespace.Name),
   935  				withClusterName(clusterName),
   936  				withKubernetesVersion(kubernetesVersion),
   937  				withWorkerMachineCount(1),
   938  				withMachinePoolInterval(specName, "wait-worker-nodes"),
   939  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   940  					WaitForControlPlaneInitialized:   WaitForAKSControlPlaneInitialized,
   941  					WaitForControlPlaneMachinesReady: WaitForAKSControlPlaneReady,
   942  				}),
   943  			), result)
   944  
   945  			By("Exercising machine pools", func() {
   946  				AKSMachinePoolSpec(ctx, func() AKSMachinePoolSpecInput {
   947  					return AKSMachinePoolSpecInput{
   948  						Cluster:       result.Cluster,
   949  						MachinePools:  result.MachinePools,
   950  						WaitIntervals: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   951  					}
   952  				})
   953  			})
   954  		})
   955  	})
   956  
   957  	// ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`.
   958  	// This spec expects a user-assigned identity named "cloud-provider-user-identity" in a "capz-ci"
   959  	// resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables.
   960  	Context("Creating a dual-stack cluster [OPTIONAL]", func() {
   961  		It("With dual-stack worker node", func() {
   962  			By("using user-assigned identity")
   963  			clusterName = getClusterName(clusterNamePrefix, "dual-stack")
   964  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
   965  				specName,
   966  				withClusterProxy(bootstrapClusterProxy),
   967  				withFlavor("dual-stack"),
   968  				withNamespace(namespace.Name),
   969  				withClusterName(clusterName),
   970  				withControlPlaneMachineCount(3),
   971  				withWorkerMachineCount(1),
   972  				withControlPlaneInterval(specName, "wait-control-plane-ha"),
   973  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
   974  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
   975  				}),
   976  				withPostMachinesProvisioned(func() {
   977  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
   978  						return DaemonsetsSpecInput{
   979  							BootstrapClusterProxy: bootstrapClusterProxy,
   980  							Namespace:             namespace,
   981  							ClusterName:           clusterName,
   982  						}
   983  					})
   984  				}),
   985  			), result)
   986  
   987  			By("Verifying expected VM extensions are present on the node", func() {
   988  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
   989  					return AzureVMExtensionsSpecInput{
   990  						BootstrapClusterProxy: bootstrapClusterProxy,
   991  						Namespace:             namespace,
   992  						ClusterName:           clusterName,
   993  					}
   994  				})
   995  			})
   996  
   997  			// dual-stack external IP for dual-stack clusters is not yet supported
   998  			// first ip family in ipFamilies is used for the primary clusterIP and cloud-provider
   999  			// determines the elb/ilb ip family based on the primary clusterIP
  1000  			By("Creating an accessible ipv4 load balancer", func() {
  1001  				AzureLBSpec(ctx, func() AzureLBSpecInput {
  1002  					return AzureLBSpecInput{
  1003  						BootstrapClusterProxy: bootstrapClusterProxy,
  1004  						Namespace:             namespace,
  1005  						ClusterName:           clusterName,
  1006  						SkipCleanup:           skipCleanup,
  1007  						IPFamilies:            []corev1.IPFamily{corev1.IPv4Protocol},
  1008  					}
  1009  				})
  1010  			})
  1011  
  1012  			By("Creating an accessible ipv6 load balancer", func() {
  1013  				AzureLBSpec(ctx, func() AzureLBSpecInput {
  1014  					return AzureLBSpecInput{
  1015  						BootstrapClusterProxy: bootstrapClusterProxy,
  1016  						Namespace:             namespace,
  1017  						ClusterName:           clusterName,
  1018  						SkipCleanup:           skipCleanup,
  1019  						IPFamilies:            []corev1.IPFamily{corev1.IPv6Protocol},
  1020  					}
  1021  				})
  1022  			})
  1023  
  1024  			By("PASSED!")
  1025  		})
  1026  	})
  1027  
  1028  	Context("Creating clusters using clusterclass [OPTIONAL]", func() {
  1029  		It("with a single control plane node, one linux worker node, and one windows worker node", func() {
  1030  			// Use ci-default as the clusterclass name so test infra can find the clusterclass template
  1031  			os.Setenv("CLUSTER_CLASS_NAME", "ci-default")
  1032  
  1033  			// Use "cc" as spec name because NAT gateway pip name exceeds limit.
  1034  			clusterName = getClusterName(clusterNamePrefix, "cc")
  1035  
  1036  			// Opt into using windows with prow template
  1037  			Expect(os.Setenv("WINDOWS_WORKER_MACHINE_COUNT", "1")).To(Succeed())
  1038  
  1039  			// Create a cluster using the cluster class created above
  1040  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
  1041  				specName,
  1042  				withFlavor("topology"),
  1043  				withNamespace(namespace.Name),
  1044  				withClusterName(clusterName),
  1045  				withControlPlaneMachineCount(1),
  1046  				withWorkerMachineCount(1),
  1047  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
  1048  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
  1049  				}),
  1050  				withPostMachinesProvisioned(func() {
  1051  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
  1052  						return DaemonsetsSpecInput{
  1053  							BootstrapClusterProxy: bootstrapClusterProxy,
  1054  							Namespace:             namespace,
  1055  							ClusterName:           clusterName,
  1056  						}
  1057  					})
  1058  				}),
  1059  			), result)
  1060  
  1061  			By("Verifying expected VM extensions are present on the node", func() {
  1062  				AzureVMExtensionsSpec(ctx, func() AzureVMExtensionsSpecInput {
  1063  					return AzureVMExtensionsSpecInput{
  1064  						BootstrapClusterProxy: bootstrapClusterProxy,
  1065  						Namespace:             namespace,
  1066  						ClusterName:           clusterName,
  1067  					}
  1068  				})
  1069  			})
  1070  
  1071  			By("PASSED!")
  1072  		})
  1073  	})
  1074  
  1075  	// ci-e2e.sh and Prow CI skip this test by default. To include this test, set `GINKGO_SKIP=""`.
  1076  	// This spec expects a user-assigned identity named "cloud-provider-user-identity" in a "capz-ci"
  1077  	// resource group. Override these defaults by setting the USER_IDENTITY and CI_RG environment variables.
  1078  	// You can also override the default SKU `Standard_DS2_v2` and `Standard_DS4_v2` storage by setting
  1079  	// the `AZURE_EDGEZONE_CONTROL_PLANE_MACHINE_TYPE` and `AZURE_EDGEZONE_NODE_MACHINE_TYPE` environment variables.
  1080  	Context("Creating clusters on public MEC [OPTIONAL]", func() {
  1081  		It("with 1 control plane nodes and 1 worker node", func() {
  1082  			Skip("Skipping public MEC test until a new edgezone is available")
  1083  			By("using user-assigned identity")
  1084  			clusterName = getClusterName(clusterNamePrefix, "edgezone")
  1085  			clusterctl.ApplyClusterTemplateAndWait(ctx, createApplyClusterTemplateInput(
  1086  				specName,
  1087  				withFlavor("edgezone"),
  1088  				withNamespace(namespace.Name),
  1089  				withClusterName(clusterName),
  1090  				withControlPlaneMachineCount(1),
  1091  				withWorkerMachineCount(1),
  1092  				withControlPlaneWaiters(clusterctl.ControlPlaneWaiters{
  1093  					WaitForControlPlaneInitialized: EnsureControlPlaneInitializedNoAddons,
  1094  				}),
  1095  				withPostMachinesProvisioned(func() {
  1096  					EnsureDaemonsets(ctx, func() DaemonsetsSpecInput {
  1097  						return DaemonsetsSpecInput{
  1098  							BootstrapClusterProxy: bootstrapClusterProxy,
  1099  							Namespace:             namespace,
  1100  							ClusterName:           clusterName,
  1101  						}
  1102  					})
  1103  				}),
  1104  			), result)
  1105  
  1106  			By("Verifying extendedLocation property in Azure VMs is corresponding to extendedLocation property in edgezone yaml file", func() {
  1107  				AzureEdgeZoneClusterSpec(ctx, func() AzureEdgeZoneClusterSpecInput {
  1108  					return AzureEdgeZoneClusterSpecInput{
  1109  						BootstrapClusterProxy: bootstrapClusterProxy,
  1110  						Namespace:             namespace,
  1111  						ClusterName:           clusterName,
  1112  						E2EConfig:             e2eConfig,
  1113  					}
  1114  				})
  1115  			})
  1116  
  1117  			By("PASSED!")
  1118  		})
  1119  	})
  1120  })