sigs.k8s.io/cluster-api-provider-azure@v1.14.3/test/e2e/common.go (about)

     1  //go:build e2e
     2  // +build e2e
     3  
     4  /*
     5  Copyright 2020 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11      http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package e2e
    21  
    22  import (
    23  	"context"
    24  	"fmt"
    25  	"os"
    26  	"os/exec"
    27  	"path"
    28  	"path/filepath"
    29  	"strings"
    30  
    31  	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
    32  	"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
    33  	. "github.com/onsi/ginkgo/v2"
    34  	"github.com/onsi/ginkgo/v2/types"
    35  	. "github.com/onsi/gomega"
    36  	corev1 "k8s.io/api/core/v1"
    37  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    38  	"k8s.io/client-go/rest"
    39  	"k8s.io/client-go/tools/clientcmd"
    40  	"k8s.io/utils/ptr"
    41  	infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
    42  	"sigs.k8s.io/cluster-api-provider-azure/azure"
    43  	e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace"
    44  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    45  	kubeadmv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
    46  	capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
    47  	"sigs.k8s.io/cluster-api/test/framework"
    48  	"sigs.k8s.io/cluster-api/test/framework/clusterctl"
    49  	"sigs.k8s.io/cluster-api/util/kubeconfig"
    50  	"sigs.k8s.io/controller-runtime/pkg/client"
    51  )
    52  
    53  // Test suite constants for e2e config variables
    54  const (
    55  	AddonsPath                        = "ADDONS_PATH"
    56  	RedactLogScriptPath               = "REDACT_LOG_SCRIPT"
    57  	AzureLocation                     = "AZURE_LOCATION"
    58  	AzureExtendedLocationType         = "AZURE_EXTENDEDLOCATION_TYPE"
    59  	AzureExtendedLocationName         = "AZURE_EXTENDEDLOCATION_NAME"
    60  	AzureResourceGroup                = "AZURE_RESOURCE_GROUP"
    61  	AzureCustomVnetResourceGroup      = "AZURE_CUSTOM_VNET_RESOURCE_GROUP"
    62  	AzureVNetName                     = "AZURE_VNET_NAME"
    63  	AzureCustomVNetName               = "AZURE_CUSTOM_VNET_NAME"
    64  	AzureInternalLBIP                 = "AZURE_INTERNAL_LB_IP"
    65  	AzureCPSubnetCidr                 = "AZURE_CP_SUBNET_CIDR"
    66  	AzureVNetCidr                     = "AZURE_PRIVATE_VNET_CIDR"
    67  	AzureNodeSubnetCidr               = "AZURE_NODE_SUBNET_CIDR"
    68  	AzureBastionSubnetCidr            = "AZURE_BASTION_SUBNET_CIDR"
    69  	ClusterIdentityName               = "CLUSTER_IDENTITY_NAME"
    70  	ClusterIdentityNamespace          = "CLUSTER_IDENTITY_NAMESPACE"
    71  	ClusterIdentitySecretName         = "AZURE_CLUSTER_IDENTITY_SECRET_NAME"      //nolint:gosec // Not a secret itself, just its name
    72  	ClusterIdentitySecretNamespace    = "AZURE_CLUSTER_IDENTITY_SECRET_NAMESPACE" //nolint:gosec // Not a secret itself, just its name
    73  	AzureClientSecret                 = "AZURE_CLIENT_SECRET"                     //nolint:gosec // Not a secret itself, just its name
    74  	AzureClientID                     = "AZURE_CLIENT_ID"
    75  	AzureSubscriptionID               = "AZURE_SUBSCRIPTION_ID"
    76  	AzureUserIdentity                 = "USER_IDENTITY"
    77  	AzureIdentityResourceGroup        = "CI_RG"
    78  	JobName                           = "JOB_NAME"
    79  	Timestamp                         = "TIMESTAMP"
    80  	AKSKubernetesVersion              = "AKS_KUBERNETES_VERSION"
    81  	AKSKubernetesVersionUpgradeFrom   = "AKS_KUBERNETES_VERSION_UPGRADE_FROM"
    82  	FlatcarKubernetesVersion          = "FLATCAR_KUBERNETES_VERSION"
    83  	FlatcarVersion                    = "FLATCAR_VERSION"
    84  	SecurityScanFailThreshold         = "SECURITY_SCAN_FAIL_THRESHOLD"
    85  	SecurityScanContainer             = "SECURITY_SCAN_CONTAINER"
    86  	CalicoVersion                     = "CALICO_VERSION"
    87  	ManagedClustersResourceType       = "managedClusters"
    88  	capiImagePublisher                = "cncf-upstream"
    89  	capiOfferName                     = "capi"
    90  	capiWindowsOfferName              = "capi-windows"
    91  	aksClusterNameSuffix              = "aks"
    92  	flatcarCAPICommunityGallery       = "flatcar4capi-742ef0cb-dcaa-4ecb-9cb0-bfd2e43dccc0"
    93  	defaultNamespace                  = "default"
    94  	AzureCNIv1Manifest                = "AZURE_CNI_V1_MANIFEST_PATH"
    95  	OldProviderUpgradeVersion         = "OLD_PROVIDER_UPGRADE_VERSION"
    96  	LatestProviderUpgradeVersion      = "LATEST_PROVIDER_UPGRADE_VERSION"
    97  	OldCAPIUpgradeVersion             = "OLD_CAPI_UPGRADE_VERSION"
    98  	LatestCAPIUpgradeVersion          = "LATEST_CAPI_UPGRADE_VERSION"
    99  	OldAddonProviderUpgradeVersion    = "OLD_CAAPH_UPGRADE_VERSION"
   100  	LatestAddonProviderUpgradeVersion = "LATEST_CAAPH_UPGRADE_VERSION"
   101  	KubernetesVersionAPIUpgradeFrom   = "KUBERNETES_VERSION_API_UPGRADE_FROM"
   102  )
   103  
   104  func Byf(format string, a ...interface{}) {
   105  	By(fmt.Sprintf(format, a...))
   106  }
   107  
   108  func setupSpecNamespace(ctx context.Context, namespaceName string, clusterProxy framework.ClusterProxy, artifactFolder string) (*corev1.Namespace, context.CancelFunc, error) {
   109  	Byf("Creating namespace %q for hosting the cluster", namespaceName)
   110  	Logf("starting to create namespace for hosting the %q test spec", namespaceName)
   111  	logPath := filepath.Join(artifactFolder, "clusters", clusterProxy.GetName())
   112  	namespace, err := e2e_namespace.Get(ctx, clusterProxy.GetClientSet(), namespaceName)
   113  	if err != nil && !apierrors.IsNotFound(err) {
   114  		return nil, nil, err
   115  	}
   116  
   117  	// namespace exists wire it up
   118  	if err == nil {
   119  		Byf("Creating event watcher for existing namespace %q", namespace.Name)
   120  		watchesCtx, cancelWatches := context.WithCancel(ctx)
   121  		go func() {
   122  			defer GinkgoRecover()
   123  			framework.WatchNamespaceEvents(watchesCtx, framework.WatchNamespaceEventsInput{
   124  				ClientSet: clusterProxy.GetClientSet(),
   125  				Name:      namespace.Name,
   126  				LogFolder: logPath,
   127  			})
   128  		}()
   129  
   130  		return namespace, cancelWatches, nil
   131  	}
   132  
   133  	// create and wire up namespace
   134  	namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{
   135  		Creator:   clusterProxy.GetClient(),
   136  		ClientSet: clusterProxy.GetClientSet(),
   137  		Name:      namespaceName,
   138  		LogFolder: logPath,
   139  	})
   140  
   141  	return namespace, cancelWatches, nil
   142  }
   143  
   144  type cleanupInput struct {
   145  	SpecName               string
   146  	ClusterProxy           framework.ClusterProxy
   147  	ArtifactFolder         string
   148  	Namespace              *corev1.Namespace
   149  	CancelWatches          context.CancelFunc
   150  	Cluster                *clusterv1.Cluster
   151  	IntervalsGetter        func(spec, key string) []interface{}
   152  	SkipCleanup            bool
   153  	SkipLogCollection      bool
   154  	AdditionalCleanup      func()
   155  	SkipResourceGroupCheck bool
   156  }
   157  
   158  func dumpSpecResourcesAndCleanup(ctx context.Context, input cleanupInput) {
   159  	defer func() {
   160  		input.CancelWatches()
   161  		redactLogs()
   162  	}()
   163  
   164  	Logf("Dumping all the Cluster API resources in the %q namespace", input.Namespace.Name)
   165  	// Dump all Cluster API related resources to artifacts before deleting them.
   166  	framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
   167  		Lister:    input.ClusterProxy.GetClient(),
   168  		Namespace: input.Namespace.Name,
   169  		LogPath:   filepath.Join(input.ArtifactFolder, "clusters", input.ClusterProxy.GetName(), "resources"),
   170  	})
   171  
   172  	if input.Cluster == nil {
   173  		By("Unable to dump workload cluster logs as the cluster is nil")
   174  	} else if !input.SkipLogCollection {
   175  		Byf("Dumping logs from the %q workload cluster", input.Cluster.Name)
   176  		input.ClusterProxy.CollectWorkloadClusterLogs(ctx, input.Cluster.Namespace, input.Cluster.Name, filepath.Join(input.ArtifactFolder, "clusters", input.Cluster.Name))
   177  	}
   178  
   179  	if input.SkipCleanup {
   180  		return
   181  	}
   182  
   183  	Logf("Deleting all clusters in the %s namespace", input.Namespace.Name)
   184  	// While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance
   185  	// that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait
   186  	// instead of DeleteClusterAndWait
   187  	deleteTimeoutConfig := "wait-delete-cluster"
   188  	if strings.Contains(input.Cluster.Name, aksClusterNameSuffix) {
   189  		deleteTimeoutConfig = "wait-delete-cluster-aks"
   190  	}
   191  	framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
   192  		Client:    input.ClusterProxy.GetClient(),
   193  		Namespace: input.Namespace.Name,
   194  	}, input.IntervalsGetter(input.SpecName, deleteTimeoutConfig)...)
   195  
   196  	Logf("Deleting namespace used for hosting the %q test spec", input.SpecName)
   197  	framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
   198  		Deleter: input.ClusterProxy.GetClient(),
   199  		Name:    input.Namespace.Name,
   200  	})
   201  
   202  	if input.AdditionalCleanup != nil {
   203  		Logf("Running additional cleanup for the %q test spec", input.SpecName)
   204  		input.AdditionalCleanup()
   205  	}
   206  
   207  	Logf("Checking if any resources are left over in Azure for spec %q", input.SpecName)
   208  
   209  	if !input.SkipResourceGroupCheck {
   210  		ExpectResourceGroupToBe404(ctx)
   211  	}
   212  }
   213  
   214  // ExpectResourceGroupToBe404 performs a GET request to Azure to determine if the cluster resource group still exists.
   215  // If it does still exist, it means the cluster was not deleted and is leaking Azure resources.
   216  func ExpectResourceGroupToBe404(ctx context.Context) {
   217  	cred, err := azidentity.NewDefaultAzureCredential(nil)
   218  	Expect(err).NotTo(HaveOccurred())
   219  	groupsClient, err := armresources.NewResourceGroupsClient(getSubscriptionID(Default), cred, nil)
   220  	Expect(err).NotTo(HaveOccurred())
   221  	_, err = groupsClient.Get(ctx, os.Getenv(AzureResourceGroup), nil)
   222  	Expect(azure.ResourceNotFound(err)).To(BeTrue(), "The resource group in Azure still exists. After deleting the cluster all of the Azure resources should also be deleted.")
   223  }
   224  
   225  func redactLogs() {
   226  	By("Redacting sensitive information from logs")
   227  	Expect(e2eConfig.Variables).To(HaveKey(RedactLogScriptPath))
   228  	//nolint:gosec // Ignore warning about running a command constructed from user input
   229  	cmd := exec.Command(e2eConfig.GetVariable(RedactLogScriptPath))
   230  	if err := cmd.Run(); err != nil {
   231  		LogWarningf("Redact logs command failed: %v", err)
   232  	}
   233  }
   234  
   235  func createRestConfig(ctx context.Context, tmpdir, namespace, clusterName string) *rest.Config {
   236  	cluster := client.ObjectKey{
   237  		Namespace: namespace,
   238  		Name:      clusterName,
   239  	}
   240  	kubeConfigData, err := kubeconfig.FromSecret(ctx, bootstrapClusterProxy.GetClient(), cluster)
   241  	Expect(err).NotTo(HaveOccurred())
   242  
   243  	kubeConfigPath := path.Join(tmpdir, clusterName+".kubeconfig")
   244  	Expect(os.WriteFile(kubeConfigPath, kubeConfigData, 0o600)).To(Succeed())
   245  
   246  	config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
   247  	Expect(err).NotTo(HaveOccurred())
   248  
   249  	return config
   250  }
   251  
   252  // EnsureControlPlaneInitialized waits for the cluster KubeadmControlPlane object to be initialized
   253  // and then installs cloud-provider-azure components via Helm.
   254  // Fulfills the clusterctl.Waiter type so that it can be used as ApplyClusterTemplateAndWaitInput data
   255  // in the flow of a clusterctl.ApplyClusterTemplateAndWait E2E test scenario.
   256  func EnsureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) {
   257  	ensureControlPlaneInitialized(ctx, input, result, true)
   258  }
   259  
   260  // EnsureControlPlaneInitializedNoAddons waits for the cluster KubeadmControlPlane object to be initialized
   261  // and then installs cloud-provider-azure components via Helm.
   262  // Fulfills the clusterctl.Waiter type so that it can be used as ApplyClusterTemplateAndWaitInput data
   263  // in the flow of a clusterctl.ApplyClusterTemplateAndWait E2E test scenario.
   264  func EnsureControlPlaneInitializedNoAddons(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) {
   265  	ensureControlPlaneInitialized(ctx, input, result, false)
   266  }
   267  
   268  // ensureControlPlaneInitialized waits for the cluster KubeadmControlPlane object to be initialized
   269  // and then installs cloud-provider-azure components via Helm.
   270  // Fulfills the clusterctl.Waiter type so that it can be used as ApplyClusterTemplateAndWaitInput data
   271  // in the flow of a clusterctl.ApplyClusterTemplateAndWait E2E test scenario.
   272  func ensureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult, installHelmCharts bool) {
   273  	getter := input.ClusterProxy.GetClient()
   274  	cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
   275  		Getter:    getter,
   276  		Name:      input.ClusterName,
   277  		Namespace: input.Namespace,
   278  	})
   279  	kubeadmControlPlane := &kubeadmv1.KubeadmControlPlane{}
   280  	key := client.ObjectKey{
   281  		Namespace: cluster.Spec.ControlPlaneRef.Namespace,
   282  		Name:      cluster.Spec.ControlPlaneRef.Name,
   283  	}
   284  
   285  	By("Ensuring KubeadmControlPlane is initialized")
   286  	Eventually(func(g Gomega) {
   287  		g.Expect(getter.Get(ctx, key, kubeadmControlPlane)).To(Succeed(), "Failed to get KubeadmControlPlane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)
   288  		g.Expect(kubeadmControlPlane.Status.Initialized).To(BeTrue(), "KubeadmControlPlane is not yet initialized")
   289  	}, input.WaitForControlPlaneIntervals...).Should(Succeed(), "KubeadmControlPlane object %s/%s was not initialized in time", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)
   290  
   291  	By("Ensuring API Server is reachable before applying Helm charts")
   292  	Eventually(func(g Gomega) {
   293  		ns := &corev1.Namespace{}
   294  		clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.Namespace, input.ClusterName)
   295  		g.Expect(clusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: kubesystem}, ns)).To(Succeed(), "Failed to get kube-system namespace")
   296  	}, input.WaitForControlPlaneIntervals...).Should(Succeed(), "API Server was not reachable in time")
   297  
   298  	_, hasWindows := cluster.Labels["cni-windows"]
   299  
   300  	if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] != infrav1.AzureNetworkPluginName {
   301  		// There is a co-dependency between cloud-provider and CNI so we install both together if cloud-provider is external.
   302  		EnsureCNIAndCloudProviderAzureHelmChart(ctx, input, installHelmCharts, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows)
   303  	} else {
   304  		EnsureCNI(ctx, input, installHelmCharts, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows)
   305  	}
   306  	controlPlane := discoveryAndWaitForControlPlaneInitialized(ctx, input, result)
   307  	EnsureAzureDiskCSIDriverHelmChart(ctx, input, installHelmCharts, hasWindows)
   308  	result.ControlPlane = controlPlane
   309  }
   310  
   311  // CheckTestBeforeCleanup checks to see if the current running Ginkgo test failed, and prints
   312  // a status message regarding cleanup.
   313  func CheckTestBeforeCleanup() {
   314  	if CurrentSpecReport().State.Is(types.SpecStateFailureStates) {
   315  		Logf("FAILED!")
   316  	}
   317  	Logf("Cleaning up after \"%s\" spec", CurrentSpecReport().FullText())
   318  }
   319  
   320  func discoveryAndWaitForControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) *kubeadmv1.KubeadmControlPlane {
   321  	return framework.DiscoveryAndWaitForControlPlaneInitialized(ctx, framework.DiscoveryAndWaitForControlPlaneInitializedInput{
   322  		Lister:  input.ClusterProxy.GetClient(),
   323  		Cluster: result.Cluster,
   324  	}, input.WaitForControlPlaneIntervals...)
   325  }
   326  
   327  func createApplyClusterTemplateInput(specName string, changes ...func(*clusterctl.ApplyClusterTemplateAndWaitInput)) clusterctl.ApplyClusterTemplateAndWaitInput {
   328  	input := clusterctl.ApplyClusterTemplateAndWaitInput{
   329  		ClusterProxy: bootstrapClusterProxy,
   330  		ConfigCluster: clusterctl.ConfigClusterInput{
   331  			LogFolder:                filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
   332  			ClusterctlConfigPath:     clusterctlConfigPath,
   333  			KubeconfigPath:           bootstrapClusterProxy.GetKubeconfigPath(),
   334  			InfrastructureProvider:   clusterctl.DefaultInfrastructureProvider,
   335  			Flavor:                   clusterctl.DefaultFlavor,
   336  			Namespace:                "default",
   337  			ClusterName:              "cluster",
   338  			KubernetesVersion:        e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
   339  			ControlPlaneMachineCount: ptr.To[int64](1),
   340  			WorkerMachineCount:       ptr.To[int64](1),
   341  		},
   342  		WaitForClusterIntervals:      e2eConfig.GetIntervals(specName, "wait-cluster"),
   343  		WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
   344  		WaitForMachineDeployments:    e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   345  		WaitForMachinePools:          e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   346  		CNIManifestPath:              "",
   347  	}
   348  	for _, change := range changes {
   349  		change(&input)
   350  	}
   351  
   352  	return input
   353  }
   354  
   355  func withClusterProxy(proxy framework.ClusterProxy) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   356  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   357  		input.ClusterProxy = proxy
   358  	}
   359  }
   360  
   361  func withFlavor(flavor string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   362  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   363  		input.ConfigCluster.Flavor = flavor
   364  	}
   365  }
   366  
   367  func withNamespace(namespace string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   368  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   369  		input.ConfigCluster.Namespace = namespace
   370  	}
   371  }
   372  
   373  func withClusterName(clusterName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   374  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   375  		input.ConfigCluster.ClusterName = clusterName
   376  	}
   377  }
   378  
   379  func withKubernetesVersion(version string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   380  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   381  		input.ConfigCluster.KubernetesVersion = version
   382  	}
   383  }
   384  
   385  func withControlPlaneMachineCount(count int64) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   386  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   387  		input.ConfigCluster.ControlPlaneMachineCount = ptr.To[int64](count)
   388  	}
   389  }
   390  
   391  func withWorkerMachineCount(count int64) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   392  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   393  		input.ConfigCluster.WorkerMachineCount = ptr.To[int64](count)
   394  	}
   395  }
   396  
   397  func withClusterInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   398  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   399  		if intervalName != "" {
   400  			input.WaitForClusterIntervals = e2eConfig.GetIntervals(specName, intervalName)
   401  		}
   402  	}
   403  }
   404  
   405  func withControlPlaneInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   406  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   407  		if intervalName != "" {
   408  			input.WaitForControlPlaneIntervals = e2eConfig.GetIntervals(specName, intervalName)
   409  		}
   410  	}
   411  }
   412  
   413  func withMachineDeploymentInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   414  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   415  		if intervalName != "" {
   416  			input.WaitForMachineDeployments = e2eConfig.GetIntervals(specName, intervalName)
   417  		}
   418  	}
   419  }
   420  
   421  func withMachinePoolInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   422  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   423  		if intervalName != "" {
   424  			input.WaitForMachinePools = e2eConfig.GetIntervals(specName, intervalName)
   425  		}
   426  	}
   427  }
   428  
   429  func withControlPlaneWaiters(waiters clusterctl.ControlPlaneWaiters) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   430  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   431  		input.ControlPlaneWaiters = waiters
   432  	}
   433  }
   434  
   435  func withPostMachinesProvisioned(postMachinesProvisioned func()) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   436  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   437  		input.PostMachinesProvisioned = postMachinesProvisioned
   438  	}
   439  }
   440  
   441  func withAzureCNIv1Manifest(manifestPath string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   442  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   443  		input.CNIManifestPath = manifestPath
   444  	}
   445  }