sigs.k8s.io/cluster-api-provider-azure@v1.17.0/test/e2e/aks.go (about)

     1  //go:build e2e
     2  // +build e2e
     3  
     4  /*
     5  Copyright 2021 The Kubernetes Authors.
     6  
     7  Licensed under the Apache License, Version 2.0 (the "License");
     8  you may not use this file except in compliance with the License.
     9  You may obtain a copy of the License at
    10  
    11      http://www.apache.org/licenses/LICENSE-2.0
    12  
    13  Unless required by applicable law or agreed to in writing, software
    14  distributed under the License is distributed on an "AS IS" BASIS,
    15  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    16  See the License for the specific language governing permissions and
    17  limitations under the License.
    18  */
    19  
    20  package e2e
    21  
    22  import (
    23  	"context"
    24  
    25  	asocontainerservicev1 "github.com/Azure/azure-service-operator/v2/api/containerservice/v1api20231001"
    26  	. "github.com/onsi/ginkgo/v2"
    27  	. "github.com/onsi/gomega"
    28  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    29  	"k8s.io/apimachinery/pkg/types"
    30  	"k8s.io/utils/ptr"
    31  	infrav1alpha "sigs.k8s.io/cluster-api-provider-azure/api/v1alpha1"
    32  	infrav1 "sigs.k8s.io/cluster-api-provider-azure/api/v1beta1"
    33  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    34  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    35  	"sigs.k8s.io/cluster-api/test/framework"
    36  	"sigs.k8s.io/cluster-api/test/framework/clusterctl"
    37  	"sigs.k8s.io/controller-runtime/pkg/client"
    38  )
    39  
    40  // DiscoverAndWaitForAKSControlPlaneInput contains the fields the required for checking the status of azure managed control plane.
    41  type DiscoverAndWaitForAKSControlPlaneInput struct {
    42  	Lister  framework.Lister
    43  	Getter  framework.Getter
    44  	Cluster *clusterv1.Cluster
    45  }
    46  
    47  // WaitForAKSControlPlaneInitialized waits for the Azure managed control plane to be initialized.
    48  // This will be invoked by cluster api e2e framework.
    49  func WaitForAKSControlPlaneInitialized(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) {
    50  	client := input.ClusterProxy.GetClient()
    51  	cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{
    52  		Getter:    client,
    53  		Name:      input.ClusterName,
    54  		Namespace: input.Namespace,
    55  	})
    56  
    57  	DiscoverAndWaitForAKSControlPlaneInitialized(ctx, DiscoverAndWaitForAKSControlPlaneInput{
    58  		Lister:  client,
    59  		Getter:  client,
    60  		Cluster: result.Cluster,
    61  	}, input.WaitForControlPlaneIntervals...)
    62  	if cluster.Spec.ClusterNetwork != nil && cluster.Spec.ClusterNetwork.Services != nil {
    63  		InstallCNIManifest(ctx, input, cluster.Spec.ClusterNetwork.Services.CIDRBlocks, true)
    64  	}
    65  }
    66  
    67  // WaitForAKSControlPlaneReady waits for the azure managed control plane to be ready.
    68  // This will be invoked by cluster api e2e framework.
    69  func WaitForAKSControlPlaneReady(ctx context.Context, input clusterctl.ApplyCustomClusterTemplateAndWaitInput, result *clusterctl.ApplyCustomClusterTemplateAndWaitResult) {
    70  	client := input.ClusterProxy.GetClient()
    71  	DiscoverAndWaitForAKSControlPlaneReady(ctx, DiscoverAndWaitForAKSControlPlaneInput{
    72  		Lister:  client,
    73  		Getter:  client,
    74  		Cluster: result.Cluster,
    75  	}, input.WaitForControlPlaneIntervals...)
    76  }
    77  
    78  // DiscoverAndWaitForAKSControlPlaneInitialized gets the Azure managed control plane associated with the cluster
    79  // and waits for at least one machine in the "system" node pool to exist.
    80  func DiscoverAndWaitForAKSControlPlaneInitialized(ctx context.Context, input DiscoverAndWaitForAKSControlPlaneInput, intervals ...interface{}) {
    81  	Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoverAndWaitForAKSControlPlaneInitialized")
    82  	Expect(input.Lister).NotTo(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoverAndWaitForAKSControlPlaneInitialized")
    83  	Expect(input.Cluster).NotTo(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoverAndWaitForAKSControlPlaneInitialized")
    84  
    85  	controlPlaneNamespace := input.Cluster.Spec.ControlPlaneRef.Namespace
    86  	controlPlaneName := input.Cluster.Spec.ControlPlaneRef.Name
    87  
    88  	Logf("Waiting for the first AKS machine in the %s/%s 'system' node pool to exist", controlPlaneNamespace, controlPlaneName)
    89  	WaitForAtLeastOneSystemNodePoolMachineToExist(ctx, WaitForControlPlaneAndMachinesReadyInput{
    90  		Lister:      input.Lister,
    91  		Getter:      input.Getter,
    92  		ClusterName: input.Cluster.Name,
    93  		Namespace:   input.Cluster.Namespace,
    94  	}, intervals...)
    95  }
    96  
    97  // DiscoverAndWaitForAKSControlPlaneReady gets the Azure managed control plane associated with the cluster
    98  // and waits for all the machines in the 'system' node pool to exist.
    99  func DiscoverAndWaitForAKSControlPlaneReady(ctx context.Context, input DiscoverAndWaitForAKSControlPlaneInput, intervals ...interface{}) {
   100  	Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoverAndWaitForAKSControlPlaneReady")
   101  	Expect(input.Lister).NotTo(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoverAndWaitForAKSControlPlaneReady")
   102  	Expect(input.Cluster).NotTo(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoverAndWaitForAKSControlPlaneReady")
   103  
   104  	controlPlaneNamespace := input.Cluster.Spec.ControlPlaneRef.Namespace
   105  	controlPlaneName := input.Cluster.Spec.ControlPlaneRef.Name
   106  
   107  	Logf("Waiting for all AKS machines in the %s/%s 'system' node pool to exist", controlPlaneNamespace, controlPlaneName)
   108  	WaitForAllControlPlaneAndMachinesToExist(ctx, WaitForControlPlaneAndMachinesReadyInput{
   109  		Lister:      input.Lister,
   110  		Getter:      input.Getter,
   111  		ClusterName: input.Cluster.Name,
   112  		Namespace:   input.Cluster.Namespace,
   113  	}, intervals...)
   114  }
   115  
   116  // WaitForControlPlaneAndMachinesReadyInput contains the fields required for checking the status of azure managed control plane machines.
   117  type WaitForControlPlaneAndMachinesReadyInput struct {
   118  	Lister      framework.Lister
   119  	Getter      framework.Getter
   120  	ClusterName string
   121  	Namespace   string
   122  }
   123  
   124  // WaitForAtLeastOneSystemNodePoolMachineToExist waits for at least one machine in the "system" node pool to exist.
   125  func WaitForAtLeastOneSystemNodePoolMachineToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) {
   126  	By("Waiting for at least one node to exist in the 'system' node pool")
   127  	WaitForAKSSystemNodePoolMachinesToExist(ctx, input, atLeastOne, intervals...)
   128  }
   129  
   130  // WaitForAllControlPlaneAndMachinesToExist waits for all machines in the "system" node pool to exist.
   131  func WaitForAllControlPlaneAndMachinesToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, intervals ...interface{}) {
   132  	By("Waiting for all nodes to exist in the 'system' node pool")
   133  	WaitForAKSSystemNodePoolMachinesToExist(ctx, input, all, intervals...)
   134  }
   135  
   136  // controlPlaneReplicas represents the count of control plane machines.
   137  type controlPlaneReplicas string
   138  
   139  const (
   140  	atLeastOne controlPlaneReplicas = "atLeastOne"
   141  	all        controlPlaneReplicas = "all"
   142  )
   143  
   144  // value returns the integer equivalent of controlPlaneReplicas
   145  func (r controlPlaneReplicas) value(mp *expv1.MachinePool) int {
   146  	switch r {
   147  	case atLeastOne:
   148  		return 1
   149  	case all:
   150  		return int(*mp.Spec.Replicas)
   151  	}
   152  	return 0
   153  }
   154  
   155  // WaitForAKSSystemNodePoolMachinesToExist waits for a certain number of machines in the "system" node pool to exist.
   156  func WaitForAKSSystemNodePoolMachinesToExist(ctx context.Context, input WaitForControlPlaneAndMachinesReadyInput, minReplicas controlPlaneReplicas, intervals ...interface{}) {
   157  	Eventually(func() bool {
   158  		opt1 := client.InNamespace(input.Namespace)
   159  		opt2 := client.MatchingLabels(map[string]string{
   160  			clusterv1.ClusterNameLabel: input.ClusterName,
   161  		})
   162  		opt3 := client.MatchingLabels(map[string]string{
   163  			infrav1.LabelAgentPoolMode: string(infrav1.NodePoolModeSystem),
   164  		})
   165  
   166  		var capzMPs []client.Object
   167  
   168  		ammpList := &infrav1.AzureManagedMachinePoolList{}
   169  		asommpList := &infrav1alpha.AzureASOManagedMachinePoolList{}
   170  
   171  		if err := input.Lister.List(ctx, ammpList, opt1, opt2, opt3); err != nil {
   172  			LogWarningf("Failed to list AzureManagedMachinePools: %+v", err)
   173  			return false
   174  		}
   175  		for _, ammp := range ammpList.Items {
   176  			capzMPs = append(capzMPs, ptr.To(ammp))
   177  		}
   178  
   179  		if err := input.Lister.List(ctx, asommpList, opt1, opt2); err != nil {
   180  			LogWarningf("Failed to list AzureASOManagedMachinePools: %+v", err)
   181  			return false
   182  		}
   183  		for _, asommp := range asommpList.Items {
   184  			var resources []*unstructured.Unstructured
   185  			for _, resource := range asommp.Spec.Resources {
   186  				u := &unstructured.Unstructured{}
   187  				Expect(u.UnmarshalJSON(resource.Raw)).To(Succeed())
   188  				resources = append(resources, u)
   189  			}
   190  			for _, resource := range resources {
   191  				if resource.GroupVersionKind().Group != asocontainerservicev1.GroupVersion.Group ||
   192  					resource.GroupVersionKind().Kind != "ManagedClustersAgentPool" {
   193  					continue
   194  				}
   195  				mode, _, err := unstructured.NestedString(resource.UnstructuredContent(), "spec", "mode")
   196  				if err != nil {
   197  					LogWarningf("Failed to get spec.mode for AzureASOManagedMachinePools %s/%s: %v", asommp.Namespace, asommp.Name, err)
   198  					continue
   199  				}
   200  				if mode == string(asocontainerservicev1.AgentPoolMode_System) {
   201  					capzMPs = append(capzMPs, ptr.To(asommp))
   202  				}
   203  				break
   204  			}
   205  		}
   206  
   207  		for _, pool := range capzMPs {
   208  			// Fetch the owning MachinePool.
   209  			for _, ref := range pool.GetOwnerReferences() {
   210  				if ref.Kind != "MachinePool" {
   211  					continue
   212  				}
   213  
   214  				ownerMachinePool := &expv1.MachinePool{}
   215  				if err := input.Getter.Get(ctx, types.NamespacedName{Namespace: input.Namespace, Name: ref.Name},
   216  					ownerMachinePool); err != nil {
   217  					LogWarningf("Failed to get machinePool: %+v", err)
   218  					return false
   219  				}
   220  				if len(ownerMachinePool.Status.NodeRefs) >= minReplicas.value(ownerMachinePool) {
   221  					return true
   222  				}
   223  			}
   224  		}
   225  
   226  		return false
   227  	}, intervals...).Should(BeTrue(), "System machine pools not detected")
   228  }