sigs.k8s.io/cluster-api-provider-azure@v1.17.0/test/e2e/common.go (about)

     1  //go:build e2e
     2  // +build e2e
     3  
     4  /*
     5  Copyright 2020 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11      http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package e2e
    21  
    22  import (
    23  	"context"
    24  	"fmt"
    25  	"os"
    26  	"os/exec"
    27  	"path"
    28  	"path/filepath"
    29  	"strings"
    30  
    31  	"github.com/Azure/azure-sdk-for-go/sdk/azidentity"
    32  	"github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources"
    33  	. "github.com/onsi/ginkgo/v2"
    34  	"github.com/onsi/ginkgo/v2/types"
    35  	. "github.com/onsi/gomega"
    36  	corev1 "k8s.io/api/core/v1"
    37  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    38  	"k8s.io/client-go/rest"
    39  	"k8s.io/client-go/tools/clientcmd"
    40  	"k8s.io/utils/ptr"
    41  	infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
    42  	"sigs.k8s.io/cluster-api-provider-azure/azure"
    43  	e2e_namespace "sigs.k8s.io/cluster-api-provider-azure/test/e2e/kubernetes/namespace"
    44  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    45  	kubeadmv1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
    46  	capi_e2e "sigs.k8s.io/cluster-api/test/e2e"
    47  	"sigs.k8s.io/cluster-api/test/framework"
    48  	"sigs.k8s.io/cluster-api/test/framework/clusterctl"
    49  	"sigs.k8s.io/cluster-api/util/kubeconfig"
    50  	"sigs.k8s.io/controller-runtime/pkg/client"
    51  )
    52  
    53  // Test suite constants for e2e config variables
    54  const (
    55  	AddonsPath                        = "ADDONS_PATH"
    56  	RedactLogScriptPath               = "REDACT_LOG_SCRIPT"
    57  	AzureLocation                     = "AZURE_LOCATION"
    58  	AzureExtendedLocationType         = "AZURE_EXTENDEDLOCATION_TYPE"
    59  	AzureExtendedLocationName         = "AZURE_EXTENDEDLOCATION_NAME"
    60  	AzureResourceGroup                = "AZURE_RESOURCE_GROUP"
    61  	AzureCustomVnetResourceGroup      = "AZURE_CUSTOM_VNET_RESOURCE_GROUP"
    62  	AzureVNetName                     = "AZURE_VNET_NAME"
    63  	AzureCustomVNetName               = "AZURE_CUSTOM_VNET_NAME"
    64  	AzureInternalLBIP                 = "AZURE_INTERNAL_LB_IP"
    65  	AzureCPSubnetCidr                 = "AZURE_CP_SUBNET_CIDR"
    66  	AzureVNetCidr                     = "AZURE_PRIVATE_VNET_CIDR"
    67  	AzureNodeSubnetCidr               = "AZURE_NODE_SUBNET_CIDR"
    68  	AzureBastionSubnetCidr            = "AZURE_BASTION_SUBNET_CIDR"
    69  	ClusterIdentityName               = "CLUSTER_IDENTITY_NAME"
    70  	ClusterIdentityNamespace          = "CLUSTER_IDENTITY_NAMESPACE"
    71  	AzureClientID                     = "AZURE_CLIENT_ID"
    72  	AzureClientIDUserAssignedIdentity = "AZURE_CLIENT_ID_USER_ASSIGNED_IDENTITY"
    73  	AzureSubscriptionID               = "AZURE_SUBSCRIPTION_ID"
    74  	AzureTenantID                     = "AZURE_TENANT_ID"
    75  	AzureUserIdentity                 = "USER_IDENTITY"
    76  	AzureIdentityResourceGroup        = "CI_RG"
    77  	JobName                           = "JOB_NAME"
    78  	Timestamp                         = "TIMESTAMP"
    79  	AKSKubernetesVersion              = "AKS_KUBERNETES_VERSION"
    80  	AKSKubernetesVersionUpgradeFrom   = "AKS_KUBERNETES_VERSION_UPGRADE_FROM"
    81  	FlatcarKubernetesVersion          = "FLATCAR_KUBERNETES_VERSION"
    82  	FlatcarVersion                    = "FLATCAR_VERSION"
    83  	SecurityScanFailThreshold         = "SECURITY_SCAN_FAIL_THRESHOLD"
    84  	SecurityScanContainer             = "SECURITY_SCAN_CONTAINER"
    85  	CalicoVersion                     = "CALICO_VERSION"
    86  	ManagedClustersResourceType       = "managedClusters"
    87  	capiImagePublisher                = "cncf-upstream"
    88  	capiOfferName                     = "capi"
    89  	capiWindowsOfferName              = "capi-windows"
    90  	aksClusterNameSuffix              = "aks"
    91  	flatcarCAPICommunityGallery       = "flatcar4capi-742ef0cb-dcaa-4ecb-9cb0-bfd2e43dccc0"
    92  	defaultNamespace                  = "default"
    93  	AzureCNIv1Manifest                = "AZURE_CNI_V1_MANIFEST_PATH"
    94  	OldProviderUpgradeVersion         = "OLD_PROVIDER_UPGRADE_VERSION"
    95  	LatestProviderUpgradeVersion      = "LATEST_PROVIDER_UPGRADE_VERSION"
    96  	OldCAPIUpgradeVersion             = "OLD_CAPI_UPGRADE_VERSION"
    97  	LatestCAPIUpgradeVersion          = "LATEST_CAPI_UPGRADE_VERSION"
    98  	OldAddonProviderUpgradeVersion    = "OLD_CAAPH_UPGRADE_VERSION"
    99  	LatestAddonProviderUpgradeVersion = "LATEST_CAAPH_UPGRADE_VERSION"
   100  	KubernetesVersionAPIUpgradeFrom   = "KUBERNETES_VERSION_API_UPGRADE_FROM"
   101  )
   102  
   103  func Byf(format string, a ...interface{}) {
   104  	By(fmt.Sprintf(format, a...))
   105  }
   106  
   107  func setupSpecNamespace(ctx context.Context, namespaceName string, clusterProxy framework.ClusterProxy, artifactFolder string) (*corev1.Namespace, context.CancelFunc, error) {
   108  	Byf("Creating namespace %q for hosting the cluster", namespaceName)
   109  	Logf("starting to create namespace for hosting the %q test spec", namespaceName)
   110  	logPath := filepath.Join(artifactFolder, "clusters", clusterProxy.GetName())
   111  	namespace, err := e2e_namespace.Get(ctx, clusterProxy.GetClientSet(), namespaceName)
   112  	if err != nil && !apierrors.IsNotFound(err) {
   113  		return nil, nil, err
   114  	}
   115  
   116  	// namespace exists wire it up
   117  	if err == nil {
   118  		Byf("Creating event watcher for existing namespace %q", namespace.Name)
   119  		watchesCtx, cancelWatches := context.WithCancel(ctx)
   120  		go func() {
   121  			defer GinkgoRecover()
   122  			framework.WatchNamespaceEvents(watchesCtx, framework.WatchNamespaceEventsInput{
   123  				ClientSet: clusterProxy.GetClientSet(),
   124  				Name:      namespace.Name,
   125  				LogFolder: logPath,
   126  			})
   127  		}()
   128  
   129  		return namespace, cancelWatches, nil
   130  	}
   131  
   132  	// create and wire up namespace
   133  	namespace, cancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{
   134  		Creator:   clusterProxy.GetClient(),
   135  		ClientSet: clusterProxy.GetClientSet(),
   136  		Name:      namespaceName,
   137  		LogFolder: logPath,
   138  	})
   139  
   140  	return namespace, cancelWatches, nil
   141  }
   142  
   143  type cleanupInput struct {
   144  	SpecName               string
   145  	ClusterProxy           framework.ClusterProxy
   146  	ArtifactFolder         string
   147  	Namespace              *corev1.Namespace
   148  	CancelWatches          context.CancelFunc
   149  	Cluster                *clusterv1.Cluster
   150  	IntervalsGetter        func(spec, key string) []interface{}
   151  	SkipCleanup            bool
   152  	SkipLogCollection      bool
   153  	AdditionalCleanup      func()
   154  	SkipResourceGroupCheck bool
   155  }
   156  
   157  func dumpSpecResourcesAndCleanup(ctx context.Context, input cleanupInput) {
   158  	defer func() {
   159  		input.CancelWatches()
   160  		redactLogs()
   161  	}()
   162  
   163  	Logf("Dumping all the Cluster API resources in the %q namespace", input.Namespace.Name)
   164  	// Dump all Cluster API related resources to artifacts before deleting them.
   165  	framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{
   166  		Lister:    input.ClusterProxy.GetClient(),
   167  		Namespace: input.Namespace.Name,
   168  		LogPath:   filepath.Join(input.ArtifactFolder, "clusters", input.ClusterProxy.GetName(), "resources"),
   169  	})
   170  
   171  	if input.Cluster == nil {
   172  		By("Unable to dump workload cluster logs as the cluster is nil")
   173  	} else if !input.SkipLogCollection {
   174  		Byf("Dumping logs from the %q workload cluster", input.Cluster.Name)
   175  		input.ClusterProxy.CollectWorkloadClusterLogs(ctx, input.Cluster.Namespace, input.Cluster.Name, filepath.Join(input.ArtifactFolder, "clusters", input.Cluster.Name))
   176  	}
   177  
   178  	if input.SkipCleanup {
   179  		return
   180  	}
   181  
   182  	Logf("Deleting all clusters in the %s namespace", input.Namespace.Name)
   183  	// While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance
   184  	// that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait
   185  	// instead of DeleteClusterAndWait
   186  	deleteTimeoutConfig := "wait-delete-cluster"
   187  	if strings.Contains(input.Cluster.Name, aksClusterNameSuffix) {
   188  		deleteTimeoutConfig = "wait-delete-cluster-aks"
   189  	}
   190  	framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{
   191  		Client:    input.ClusterProxy.GetClient(),
   192  		Namespace: input.Namespace.Name,
   193  	}, input.IntervalsGetter(input.SpecName, deleteTimeoutConfig)...)
   194  
   195  	Logf("Deleting namespace used for hosting the %q test spec", input.SpecName)
   196  	framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{
   197  		Deleter: input.ClusterProxy.GetClient(),
   198  		Name:    input.Namespace.Name,
   199  	})
   200  
   201  	if input.AdditionalCleanup != nil {
   202  		Logf("Running additional cleanup for the %q test spec", input.SpecName)
   203  		input.AdditionalCleanup()
   204  	}
   205  
   206  	Logf("Checking if any resources are left over in Azure for spec %q", input.SpecName)
   207  
   208  	if !input.SkipResourceGroupCheck {
   209  		ExpectResourceGroupToBe404(ctx)
   210  	}
   211  }
   212  
   213  // ExpectResourceGroupToBe404 performs a GET request to Azure to determine if the cluster resource group still exists.
   214  // If it does still exist, it means the cluster was not deleted and is leaking Azure resources.
   215  func ExpectResourceGroupToBe404(ctx context.Context) {
   216  	cred, err := azidentity.NewDefaultAzureCredential(nil)
   217  	Expect(err).NotTo(HaveOccurred())
   218  	groupsClient, err := armresources.NewResourceGroupsClient(getSubscriptionID(Default), cred, nil)
   219  	Expect(err).NotTo(HaveOccurred())
   220  	_, err = groupsClient.Get(ctx, os.Getenv(AzureResourceGroup), nil)
   221  	Expect(azure.ResourceNotFound(err)).To(BeTrue(), "The resource group in Azure still exists. After deleting the cluster all of the Azure resources should also be deleted.")
   222  }
   223  
   224  func redactLogs() {
   225  	By("Redacting sensitive information from logs")
   226  	Expect(e2eConfig.Variables).To(HaveKey(RedactLogScriptPath))
   227  	//nolint:gosec // Ignore warning about running a command constructed from user input
   228  	cmd := exec.Command(e2eConfig.GetVariable(RedactLogScriptPath))
   229  	if err := cmd.Run(); err != nil {
   230  		LogWarningf("Redact logs command failed: %v", err)
   231  	}
   232  }
   233  
   234  func createRestConfig(ctx context.Context, tmpdir, namespace, clusterName string) *rest.Config {
   235  	cluster := client.ObjectKey{
   236  		Namespace: namespace,
   237  		Name:      clusterName,
   238  	}
   239  	kubeConfigData, err := kubeconfig.FromSecret(ctx, bootstrapClusterProxy.GetClient(), cluster)
   240  	Expect(err).NotTo(HaveOccurred())
   241  
   242  	kubeConfigPath := path.Join(tmpdir, clusterName+".kubeconfig")
   243  	Expect(os.WriteFile(kubeConfigPath, kubeConfigData, 0o600)).To(Succeed())
   244  
   245  	config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath)
   246  	Expect(err).NotTo(HaveOccurred())
   247  
   248  	return config
   249  }
   250  
   251  // EnsureControlPlaneInitialized waits for the cluster KubeadmControlPlane object to be initialized
   252  // and then installs cloud-provider-azure components via Helm.
   253  // Fulfills the clusterctl.Waiter type so that it can be used as ApplyClusterTemplateAndWaitInput data
   254  // in the flow of a clusterctl.ApplyClusterTemplateAndWait E2E test scenario.
   255  func EnsureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) {
   256  	ensureControlPlaneInitialized(ctx, input, result, true)
   257  }
   258  
   259  // EnsureControlPlaneInitializedNoAddons waits for the cluster KubeadmControlPlane object to be initialized
   260  // and then installs cloud-provider-azure components via Helm.
   261  // Fulfills the clusterctl.Waiter type so that it can be used as ApplyClusterTemplateAndWaitInput data
   262  // in the flow of a clusterctl.ApplyClusterTemplateAndWait E2E test scenario.
   263  func EnsureControlPlaneInitializedNoAddons(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) {
   264  	ensureControlPlaneInitialized(ctx, input, result, false)
   265  }
   266  
   267  // ensureControlPlaneInitialized waits for the cluster KubeadmControlPlane object to be initialized
   268  // and then installs cloud-provider-azure components via Helm.
   269  // Fulfills the clusterctl.Waiter type so that it can be used as ApplyClusterTemplateAndWaitInput data
   270  // in the flow of a clusterctl.ApplyClusterTemplateAndWait E2E test scenario.
   271  func ensureControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult, installHelmCharts bool) {
   272  	getter := input.ClusterProxy.GetClient()
   273  	cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
   274  		Getter:    getter,
   275  		Name:      input.ClusterName,
   276  		Namespace: input.Namespace,
   277  	})
   278  	kubeadmControlPlane := &kubeadmv1.KubeadmControlPlane{}
   279  	key := client.ObjectKey{
   280  		Namespace: cluster.Spec.ControlPlaneRef.Namespace,
   281  		Name:      cluster.Spec.ControlPlaneRef.Name,
   282  	}
   283  
   284  	By("Ensuring KubeadmControlPlane is initialized")
   285  	Eventually(func(g Gomega) {
   286  		g.Expect(getter.Get(ctx, key, kubeadmControlPlane)).To(Succeed(), "Failed to get KubeadmControlPlane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)
   287  		g.Expect(kubeadmControlPlane.Status.Initialized).To(BeTrue(), "KubeadmControlPlane is not yet initialized")
   288  	}, input.WaitForControlPlaneIntervals...).Should(Succeed(), "KubeadmControlPlane object %s/%s was not initialized in time", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name)
   289  
   290  	By("Ensuring API Server is reachable before applying Helm charts")
   291  	Eventually(func(g Gomega) {
   292  		ns := &corev1.Namespace{}
   293  		clusterProxy := input.ClusterProxy.GetWorkloadCluster(ctx, input.Namespace, input.ClusterName)
   294  		g.Expect(clusterProxy.GetClient().Get(ctx, client.ObjectKey{Name: kubesystem}, ns)).To(Succeed(), "Failed to get kube-system namespace")
   295  	}, input.WaitForControlPlaneIntervals...).Should(Succeed(), "API Server was not reachable in time")
   296  
   297  	_, hasWindows := cluster.Labels["cni-windows"]
   298  
   299  	if kubeadmControlPlane.Spec.KubeadmConfigSpec.ClusterConfiguration.ControllerManager.ExtraArgs["cloud-provider"] != infrav1.AzureNetworkPluginName {
   300  		// There is a co-dependency between cloud-provider and CNI so we install both together if cloud-provider is external.
   301  		EnsureCNIAndCloudProviderAzureHelmChart(ctx, input, installHelmCharts, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows)
   302  	} else {
   303  		EnsureCNI(ctx, input, installHelmCharts, cluster.Spec.ClusterNetwork.Pods.CIDRBlocks, hasWindows)
   304  	}
   305  	controlPlane := discoveryAndWaitForControlPlaneInitialized(ctx, input, result)
   306  	EnsureAzureDiskCSIDriverHelmChart(ctx, input, installHelmCharts, hasWindows)
   307  	result.ControlPlane = controlPlane
   308  }
   309  
   310  // CheckTestBeforeCleanup checks to see if the current running Ginkgo test failed, and prints
   311  // a status message regarding cleanup.
   312  func CheckTestBeforeCleanup() {
   313  	if CurrentSpecReport().State.Is(types.SpecStateFailureStates) {
   314  		Logf("FAILED!")
   315  	}
   316  	Logf("Cleaning up after \"%s\" spec", CurrentSpecReport().FullText())
   317  }
   318  
   319  func discoveryAndWaitForControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) *kubeadmv1.KubeadmControlPlane {
   320  	return framework.DiscoveryAndWaitForControlPlaneInitialized(ctx, framework.DiscoveryAndWaitForControlPlaneInitializedInput{
   321  		Lister:  input.ClusterProxy.GetClient(),
   322  		Cluster: result.Cluster,
   323  	}, input.WaitForControlPlaneIntervals...)
   324  }
   325  
   326  func createApplyClusterTemplateInput(specName string, changes ...func(*clusterctl.ApplyClusterTemplateAndWaitInput)) clusterctl.ApplyClusterTemplateAndWaitInput {
   327  	input := clusterctl.ApplyClusterTemplateAndWaitInput{
   328  		ClusterProxy: bootstrapClusterProxy,
   329  		ConfigCluster: clusterctl.ConfigClusterInput{
   330  			LogFolder:                filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()),
   331  			ClusterctlConfigPath:     clusterctlConfigPath,
   332  			KubeconfigPath:           bootstrapClusterProxy.GetKubeconfigPath(),
   333  			InfrastructureProvider:   clusterctl.DefaultInfrastructureProvider,
   334  			Flavor:                   clusterctl.DefaultFlavor,
   335  			Namespace:                "default",
   336  			ClusterName:              "cluster",
   337  			KubernetesVersion:        e2eConfig.GetVariable(capi_e2e.KubernetesVersion),
   338  			ControlPlaneMachineCount: ptr.To[int64](1),
   339  			WorkerMachineCount:       ptr.To[int64](1),
   340  		},
   341  		WaitForClusterIntervals:      e2eConfig.GetIntervals(specName, "wait-cluster"),
   342  		WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"),
   343  		WaitForMachineDeployments:    e2eConfig.GetIntervals(specName, "wait-worker-nodes"),
   344  		WaitForMachinePools:          e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"),
   345  		CNIManifestPath:              "",
   346  	}
   347  	for _, change := range changes {
   348  		change(&input)
   349  	}
   350  
   351  	return input
   352  }
   353  
   354  func withClusterProxy(proxy framework.ClusterProxy) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   355  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   356  		input.ClusterProxy = proxy
   357  	}
   358  }
   359  
   360  func withFlavor(flavor string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   361  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   362  		input.ConfigCluster.Flavor = flavor
   363  	}
   364  }
   365  
   366  func withNamespace(namespace string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   367  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   368  		input.ConfigCluster.Namespace = namespace
   369  	}
   370  }
   371  
   372  func withClusterName(clusterName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   373  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   374  		input.ConfigCluster.ClusterName = clusterName
   375  	}
   376  }
   377  
   378  func withKubernetesVersion(version string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   379  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   380  		input.ConfigCluster.KubernetesVersion = version
   381  	}
   382  }
   383  
   384  func withControlPlaneMachineCount(count int64) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   385  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   386  		input.ConfigCluster.ControlPlaneMachineCount = ptr.To[int64](count)
   387  	}
   388  }
   389  
   390  func withWorkerMachineCount(count int64) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   391  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   392  		input.ConfigCluster.WorkerMachineCount = ptr.To[int64](count)
   393  	}
   394  }
   395  
   396  func withClusterInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   397  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   398  		if intervalName != "" {
   399  			input.WaitForClusterIntervals = e2eConfig.GetIntervals(specName, intervalName)
   400  		}
   401  	}
   402  }
   403  
   404  func withControlPlaneInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   405  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   406  		if intervalName != "" {
   407  			input.WaitForControlPlaneIntervals = e2eConfig.GetIntervals(specName, intervalName)
   408  		}
   409  	}
   410  }
   411  
   412  func withMachineDeploymentInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   413  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   414  		if intervalName != "" {
   415  			input.WaitForMachineDeployments = e2eConfig.GetIntervals(specName, intervalName)
   416  		}
   417  	}
   418  }
   419  
   420  func withMachinePoolInterval(specName string, intervalName string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   421  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   422  		if intervalName != "" {
   423  			input.WaitForMachinePools = e2eConfig.GetIntervals(specName, intervalName)
   424  		}
   425  	}
   426  }
   427  
   428  func withControlPlaneWaiters(waiters clusterctl.ControlPlaneWaiters) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   429  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   430  		input.ControlPlaneWaiters = waiters
   431  	}
   432  }
   433  
   434  func withPostMachinesProvisioned(postMachinesProvisioned func()) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   435  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   436  		input.PostMachinesProvisioned = postMachinesProvisioned
   437  	}
   438  }
   439  
   440  func withAzureCNIv1Manifest(manifestPath string) func(*clusterctl.ApplyClusterTemplateAndWaitInput) {
   441  	return func(input *clusterctl.ApplyClusterTemplateAndWaitInput) {
   442  		input.CNIManifestPath = manifestPath
   443  	}
   444  }