sigs.k8s.io/cluster-api@v1.6.3/internal/controllers/topology/cluster/desired_state_test.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cluster
    18  
    19  import (
    20  	"strings"
    21  	"testing"
    22  	"time"
    23  
    24  	"github.com/google/go-cmp/cmp"
    25  	. "github.com/onsi/gomega"
    26  	corev1 "k8s.io/api/core/v1"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    29  	"k8s.io/apimachinery/pkg/util/intstr"
    30  	utilfeature "k8s.io/component-base/featuregate/testing"
    31  	"k8s.io/utils/pointer"
    32  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    33  
    34  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    35  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    36  	runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
    37  	runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
    38  	runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
    39  	"sigs.k8s.io/cluster-api/feature"
    40  	"sigs.k8s.io/cluster-api/internal/contract"
    41  	"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
    42  	"sigs.k8s.io/cluster-api/internal/hooks"
    43  	fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
    44  	"sigs.k8s.io/cluster-api/internal/test/builder"
    45  	"sigs.k8s.io/cluster-api/internal/topology/names"
    46  	"sigs.k8s.io/cluster-api/util"
    47  )
    48  
    49  var (
    50  	fakeRef1 = &corev1.ObjectReference{
    51  		Kind:       "refKind1",
    52  		Namespace:  "refNamespace1",
    53  		Name:       "refName1",
    54  		APIVersion: "refAPIVersion1",
    55  	}
    56  
    57  	fakeRef2 = &corev1.ObjectReference{
    58  		Kind:       "refKind2",
    59  		Namespace:  "refNamespace2",
    60  		Name:       "refName2",
    61  		APIVersion: "refAPIVersion2",
    62  	}
    63  )
    64  
    65  func TestComputeInfrastructureCluster(t *testing.T) {
    66  	// templates and ClusterClass
    67  	infrastructureClusterTemplate := builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "template1").
    68  		Build()
    69  	clusterClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
    70  		WithInfrastructureClusterTemplate(infrastructureClusterTemplate).
    71  		Build()
    72  
    73  	// aggregating templates and cluster class into a blueprint (simulating getBlueprint)
    74  	blueprint := &scope.ClusterBlueprint{
    75  		ClusterClass:                  clusterClass,
    76  		InfrastructureClusterTemplate: infrastructureClusterTemplate,
    77  	}
    78  
    79  	// current cluster objects
    80  	cluster := &clusterv1.Cluster{
    81  		ObjectMeta: metav1.ObjectMeta{
    82  			Name:      "cluster1",
    83  			Namespace: metav1.NamespaceDefault,
    84  		},
    85  	}
    86  
    87  	t.Run("Generates the infrastructureCluster from the template", func(t *testing.T) {
    88  		g := NewWithT(t)
    89  
    90  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
    91  		scope := scope.New(cluster)
    92  		scope.Blueprint = blueprint
    93  
    94  		obj, err := computeInfrastructureCluster(ctx, scope)
    95  		g.Expect(err).ToNot(HaveOccurred())
    96  		g.Expect(obj).ToNot(BeNil())
    97  
    98  		assertTemplateToObject(g, assertTemplateInput{
    99  			cluster:     scope.Current.Cluster,
   100  			templateRef: blueprint.ClusterClass.Spec.Infrastructure.Ref,
   101  			template:    blueprint.InfrastructureClusterTemplate,
   102  			labels:      nil,
   103  			annotations: nil,
   104  			currentRef:  nil,
   105  			obj:         obj,
   106  		})
   107  
   108  		// Ensure no ownership is added to generated InfrastructureCluster.
   109  		g.Expect(obj.GetOwnerReferences()).To(BeEmpty())
   110  	})
   111  	t.Run("If there is already a reference to the infrastructureCluster, it preserves the reference name", func(t *testing.T) {
   112  		g := NewWithT(t)
   113  
   114  		// current cluster objects for the test scenario
   115  		clusterWithInfrastructureRef := cluster.DeepCopy()
   116  		clusterWithInfrastructureRef.Spec.InfrastructureRef = fakeRef1
   117  
   118  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   119  		scope := scope.New(clusterWithInfrastructureRef)
   120  		scope.Blueprint = blueprint
   121  
   122  		obj, err := computeInfrastructureCluster(ctx, scope)
   123  		g.Expect(err).ToNot(HaveOccurred())
   124  		g.Expect(obj).ToNot(BeNil())
   125  
   126  		assertTemplateToObject(g, assertTemplateInput{
   127  			cluster:     scope.Current.Cluster,
   128  			templateRef: blueprint.ClusterClass.Spec.Infrastructure.Ref,
   129  			template:    blueprint.InfrastructureClusterTemplate,
   130  			labels:      nil,
   131  			annotations: nil,
   132  			currentRef:  scope.Current.Cluster.Spec.InfrastructureRef,
   133  			obj:         obj,
   134  		})
   135  	})
   136  	t.Run("Carry over the owner reference to ClusterShim, if any", func(t *testing.T) {
   137  		g := NewWithT(t)
   138  		shim := clusterShim(cluster)
   139  
   140  		// current cluster objects for the test scenario
   141  		clusterWithInfrastructureRef := cluster.DeepCopy()
   142  		clusterWithInfrastructureRef.Spec.InfrastructureRef = fakeRef1
   143  
   144  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   145  		scope := scope.New(clusterWithInfrastructureRef)
   146  		scope.Current.InfrastructureCluster = infrastructureClusterTemplate.DeepCopy()
   147  		scope.Current.InfrastructureCluster.SetOwnerReferences([]metav1.OwnerReference{*ownerReferenceTo(shim)})
   148  		scope.Blueprint = blueprint
   149  
   150  		obj, err := computeInfrastructureCluster(ctx, scope)
   151  		g.Expect(err).ToNot(HaveOccurred())
   152  		g.Expect(obj).ToNot(BeNil())
   153  		g.Expect(hasOwnerReferenceFrom(obj, shim)).To(BeTrue())
   154  	})
   155  }
   156  
   157  func TestComputeControlPlaneInfrastructureMachineTemplate(t *testing.T) {
   158  	// templates and ClusterClass
   159  	labels := map[string]string{"l1": ""}
   160  	annotations := map[string]string{"a1": ""}
   161  
   162  	// current cluster objects
   163  	cluster := &clusterv1.Cluster{
   164  		ObjectMeta: metav1.ObjectMeta{
   165  			Name:      "cluster1",
   166  			Namespace: metav1.NamespaceDefault,
   167  		},
   168  		Spec: clusterv1.ClusterSpec{
   169  			Topology: &clusterv1.Topology{
   170  				ControlPlane: clusterv1.ControlPlaneTopology{
   171  					Metadata: clusterv1.ObjectMeta{
   172  						Labels:      map[string]string{"l2": ""},
   173  						Annotations: map[string]string{"a2": ""},
   174  					},
   175  				},
   176  			},
   177  		},
   178  	}
   179  
   180  	infrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "template1").
   181  		Build()
   182  	clusterClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
   183  		WithControlPlaneMetadata(labels, annotations).
   184  		WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate).Build()
   185  
   186  	// aggregating templates and cluster class into a blueprint (simulating getBlueprint)
   187  	blueprint := &scope.ClusterBlueprint{
   188  		Topology:     cluster.Spec.Topology,
   189  		ClusterClass: clusterClass,
   190  		ControlPlane: &scope.ControlPlaneBlueprint{
   191  			InfrastructureMachineTemplate: infrastructureMachineTemplate,
   192  		},
   193  	}
   194  
   195  	t.Run("Generates the infrastructureMachineTemplate from the template", func(t *testing.T) {
   196  		g := NewWithT(t)
   197  
   198  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   199  		scope := scope.New(cluster)
   200  		scope.Blueprint = blueprint
   201  
   202  		obj, err := computeControlPlaneInfrastructureMachineTemplate(ctx, scope)
   203  		g.Expect(err).ToNot(HaveOccurred())
   204  		g.Expect(obj).ToNot(BeNil())
   205  
   206  		assertTemplateToTemplate(g, assertTemplateInput{
   207  			cluster:     scope.Current.Cluster,
   208  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref,
   209  			template:    blueprint.ControlPlane.InfrastructureMachineTemplate,
   210  			currentRef:  nil,
   211  			obj:         obj,
   212  		})
   213  
   214  		// Ensure Cluster ownership is added to generated InfrastructureCluster.
   215  		g.Expect(obj.GetOwnerReferences()).To(HaveLen(1))
   216  		g.Expect(obj.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
   217  		g.Expect(obj.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
   218  	})
   219  	t.Run("If there is already a reference to the infrastructureMachineTemplate, it preserves the reference name", func(t *testing.T) {
   220  		g := NewWithT(t)
   221  
   222  		// current cluster objects for the test scenario
   223  		currentInfrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cluster1-template1").Build()
   224  
   225  		controlPlane := &unstructured.Unstructured{Object: map[string]interface{}{}}
   226  		err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Set(controlPlane, currentInfrastructureMachineTemplate)
   227  		g.Expect(err).ToNot(HaveOccurred())
   228  
   229  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   230  		s := scope.New(cluster)
   231  		s.Current.ControlPlane = &scope.ControlPlaneState{
   232  			Object:                        controlPlane,
   233  			InfrastructureMachineTemplate: currentInfrastructureMachineTemplate,
   234  		}
   235  		s.Blueprint = blueprint
   236  
   237  		obj, err := computeControlPlaneInfrastructureMachineTemplate(ctx, s)
   238  		g.Expect(err).ToNot(HaveOccurred())
   239  		g.Expect(obj).ToNot(BeNil())
   240  
   241  		assertTemplateToTemplate(g, assertTemplateInput{
   242  			cluster:     s.Current.Cluster,
   243  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref,
   244  			template:    blueprint.ControlPlane.InfrastructureMachineTemplate,
   245  			currentRef:  contract.ObjToRef(currentInfrastructureMachineTemplate),
   246  			obj:         obj,
   247  		})
   248  	})
   249  }
   250  
   251  func TestComputeControlPlane(t *testing.T) {
   252  	// templates and ClusterClass
   253  	labels := map[string]string{"l1": ""}
   254  	annotations := map[string]string{"a1": ""}
   255  
   256  	controlPlaneTemplate := builder.ControlPlaneTemplate(metav1.NamespaceDefault, "template1").
   257  		Build()
   258  	controlPlaneMachineTemplateLabels := map[string]string{
   259  		"machineTemplateLabel": "machineTemplateLabelValue",
   260  	}
   261  	controlPlaneMachineTemplateAnnotations := map[string]string{
   262  		"machineTemplateAnnotation": "machineTemplateAnnotationValue",
   263  	}
   264  	controlPlaneTemplateWithMachineTemplate := controlPlaneTemplate.DeepCopy()
   265  	_ = contract.ControlPlaneTemplate().Template().MachineTemplate().Metadata().Set(controlPlaneTemplateWithMachineTemplate, &clusterv1.ObjectMeta{
   266  		Labels:      controlPlaneMachineTemplateLabels,
   267  		Annotations: controlPlaneMachineTemplateAnnotations,
   268  	})
   269  	clusterClassDuration := 20 * time.Second
   270  	clusterClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
   271  		WithControlPlaneMetadata(labels, annotations).
   272  		WithControlPlaneTemplate(controlPlaneTemplate).
   273  		WithControlPlaneNodeDrainTimeout(&metav1.Duration{Duration: clusterClassDuration}).
   274  		WithControlPlaneNodeVolumeDetachTimeout(&metav1.Duration{Duration: clusterClassDuration}).
   275  		WithControlPlaneNodeDeletionTimeout(&metav1.Duration{Duration: clusterClassDuration}).
   276  		Build()
   277  	// TODO: Replace with object builder.
   278  	// current cluster objects
   279  	version := "v1.21.2"
   280  	replicas := int32(3)
   281  	topologyDuration := 10 * time.Second
   282  	nodeDrainTimeout := metav1.Duration{Duration: topologyDuration}
   283  	nodeVolumeDetachTimeout := metav1.Duration{Duration: topologyDuration}
   284  	nodeDeletionTimeout := metav1.Duration{Duration: topologyDuration}
   285  	cluster := &clusterv1.Cluster{
   286  		ObjectMeta: metav1.ObjectMeta{
   287  			Name:      "cluster1",
   288  			Namespace: metav1.NamespaceDefault,
   289  		},
   290  		Spec: clusterv1.ClusterSpec{
   291  			Topology: &clusterv1.Topology{
   292  				Version: version,
   293  				ControlPlane: clusterv1.ControlPlaneTopology{
   294  					Metadata: clusterv1.ObjectMeta{
   295  						Labels:      map[string]string{"l2": ""},
   296  						Annotations: map[string]string{"a2": ""},
   297  					},
   298  					Replicas:                &replicas,
   299  					NodeDrainTimeout:        &nodeDrainTimeout,
   300  					NodeVolumeDetachTimeout: &nodeVolumeDetachTimeout,
   301  					NodeDeletionTimeout:     &nodeDeletionTimeout,
   302  				},
   303  			},
   304  		},
   305  	}
   306  
   307  	t.Run("Generates the ControlPlane from the template", func(t *testing.T) {
   308  		g := NewWithT(t)
   309  
   310  		blueprint := &scope.ClusterBlueprint{
   311  			Topology:     cluster.Spec.Topology,
   312  			ClusterClass: clusterClass,
   313  			ControlPlane: &scope.ControlPlaneBlueprint{
   314  				Template: controlPlaneTemplate,
   315  			},
   316  		}
   317  
   318  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   319  		scope := scope.New(cluster)
   320  		scope.Blueprint = blueprint
   321  
   322  		r := &Reconciler{}
   323  
   324  		obj, err := r.computeControlPlane(ctx, scope, nil)
   325  		g.Expect(err).ToNot(HaveOccurred())
   326  		g.Expect(obj).ToNot(BeNil())
   327  
   328  		assertTemplateToObject(g, assertTemplateInput{
   329  			cluster:     scope.Current.Cluster,
   330  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref,
   331  			template:    blueprint.ControlPlane.Template,
   332  			currentRef:  nil,
   333  			obj:         obj,
   334  			labels:      util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels),
   335  			annotations: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations),
   336  		})
   337  
   338  		assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...)
   339  		assertNestedField(g, obj, int64(replicas), contract.ControlPlane().Replicas().Path()...)
   340  		assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...)
   341  		assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...)
   342  		assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...)
   343  		assertNestedFieldUnset(g, obj, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...)
   344  
   345  		// Ensure no ownership is added to generated ControlPlane.
   346  		g.Expect(obj.GetOwnerReferences()).To(BeEmpty())
   347  	})
   348  	t.Run("Generates the ControlPlane from the template using ClusterClass defaults", func(t *testing.T) {
   349  		g := NewWithT(t)
   350  
   351  		cluster := &clusterv1.Cluster{
   352  			ObjectMeta: metav1.ObjectMeta{
   353  				Name:      "cluster1",
   354  				Namespace: metav1.NamespaceDefault,
   355  			},
   356  			Spec: clusterv1.ClusterSpec{
   357  				Topology: &clusterv1.Topology{
   358  					Version: version,
   359  					ControlPlane: clusterv1.ControlPlaneTopology{
   360  						Metadata: clusterv1.ObjectMeta{
   361  							Labels:      map[string]string{"l2": ""},
   362  							Annotations: map[string]string{"a2": ""},
   363  						},
   364  						Replicas: &replicas,
   365  						// no values for NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout
   366  					},
   367  				},
   368  			},
   369  		}
   370  
   371  		blueprint := &scope.ClusterBlueprint{
   372  			Topology:     cluster.Spec.Topology,
   373  			ClusterClass: clusterClass,
   374  			ControlPlane: &scope.ControlPlaneBlueprint{
   375  				Template: controlPlaneTemplate,
   376  			},
   377  		}
   378  
   379  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   380  		scope := scope.New(cluster)
   381  		scope.Blueprint = blueprint
   382  
   383  		r := &Reconciler{}
   384  
   385  		obj, err := r.computeControlPlane(ctx, scope, nil)
   386  		g.Expect(err).ToNot(HaveOccurred())
   387  		g.Expect(obj).ToNot(BeNil())
   388  
   389  		// checking only values from CC defaults
   390  		assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...)
   391  		assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...)
   392  		assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...)
   393  	})
   394  	t.Run("Skips setting replicas if required", func(t *testing.T) {
   395  		g := NewWithT(t)
   396  
   397  		// current cluster objects
   398  		clusterWithoutReplicas := cluster.DeepCopy()
   399  		clusterWithoutReplicas.Spec.Topology.ControlPlane.Replicas = nil
   400  
   401  		blueprint := &scope.ClusterBlueprint{
   402  			Topology:     clusterWithoutReplicas.Spec.Topology,
   403  			ClusterClass: clusterClass,
   404  			ControlPlane: &scope.ControlPlaneBlueprint{
   405  				Template: controlPlaneTemplate,
   406  			},
   407  		}
   408  
   409  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   410  		scope := scope.New(clusterWithoutReplicas)
   411  		scope.Blueprint = blueprint
   412  
   413  		r := &Reconciler{}
   414  
   415  		obj, err := r.computeControlPlane(ctx, scope, nil)
   416  		g.Expect(err).ToNot(HaveOccurred())
   417  		g.Expect(obj).ToNot(BeNil())
   418  
   419  		assertTemplateToObject(g, assertTemplateInput{
   420  			cluster:     scope.Current.Cluster,
   421  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref,
   422  			template:    blueprint.ControlPlane.Template,
   423  			currentRef:  nil,
   424  			obj:         obj,
   425  			labels:      util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels),
   426  			annotations: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations),
   427  		})
   428  
   429  		assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...)
   430  		assertNestedFieldUnset(g, obj, contract.ControlPlane().Replicas().Path()...)
   431  		assertNestedFieldUnset(g, obj, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...)
   432  	})
   433  	t.Run("Generates the ControlPlane from the template and adds the infrastructure machine template if required", func(t *testing.T) {
   434  		g := NewWithT(t)
   435  
   436  		// templates and ClusterClass
   437  		infrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "template1").Build()
   438  		clusterClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
   439  			WithControlPlaneMetadata(labels, annotations).
   440  			WithControlPlaneTemplate(controlPlaneTemplateWithMachineTemplate).
   441  			WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate).Build()
   442  
   443  		// aggregating templates and cluster class into a blueprint (simulating getBlueprint)
   444  		blueprint := &scope.ClusterBlueprint{
   445  			Topology:     cluster.Spec.Topology,
   446  			ClusterClass: clusterClass,
   447  			ControlPlane: &scope.ControlPlaneBlueprint{
   448  				Template:                      controlPlaneTemplateWithMachineTemplate,
   449  				InfrastructureMachineTemplate: infrastructureMachineTemplate,
   450  			},
   451  		}
   452  
   453  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   454  		s := scope.New(cluster)
   455  		s.Blueprint = blueprint
   456  		s.Current.ControlPlane = &scope.ControlPlaneState{}
   457  
   458  		r := &Reconciler{}
   459  
   460  		obj, err := r.computeControlPlane(ctx, s, infrastructureMachineTemplate)
   461  		g.Expect(err).ToNot(HaveOccurred())
   462  		g.Expect(obj).ToNot(BeNil())
   463  
   464  		// machineTemplate is removed from the template for assertion as we can't
   465  		// simply compare the machineTemplate in template with the one in object as
   466  		// computeControlPlane() adds additional fields like the timeouts to machineTemplate.
   467  		// Note: machineTemplate ia asserted further down below instead.
   468  		controlPlaneTemplateWithoutMachineTemplate := blueprint.ControlPlane.Template.DeepCopy()
   469  		unstructured.RemoveNestedField(controlPlaneTemplateWithoutMachineTemplate.Object, "spec", "template", "spec", "machineTemplate")
   470  
   471  		assertTemplateToObject(g, assertTemplateInput{
   472  			cluster:     s.Current.Cluster,
   473  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref,
   474  			template:    controlPlaneTemplateWithoutMachineTemplate,
   475  			currentRef:  nil,
   476  			obj:         obj,
   477  			labels:      util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels),
   478  			annotations: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations),
   479  		})
   480  		gotMetadata, err := contract.ControlPlane().MachineTemplate().Metadata().Get(obj)
   481  		g.Expect(err).ToNot(HaveOccurred())
   482  
   483  		expectedLabels := util.MergeMap(s.Current.Cluster.Spec.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels, controlPlaneMachineTemplateLabels)
   484  		expectedLabels[clusterv1.ClusterNameLabel] = cluster.Name
   485  		expectedLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
   486  		g.Expect(gotMetadata).To(BeComparableTo(&clusterv1.ObjectMeta{
   487  			Labels:      expectedLabels,
   488  			Annotations: util.MergeMap(s.Current.Cluster.Spec.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations, controlPlaneMachineTemplateAnnotations),
   489  		}))
   490  
   491  		assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...)
   492  		assertNestedField(g, obj, int64(replicas), contract.ControlPlane().Replicas().Path()...)
   493  		assertNestedField(g, obj, map[string]interface{}{
   494  			"kind":       infrastructureMachineTemplate.GetKind(),
   495  			"namespace":  infrastructureMachineTemplate.GetNamespace(),
   496  			"name":       infrastructureMachineTemplate.GetName(),
   497  			"apiVersion": infrastructureMachineTemplate.GetAPIVersion(),
   498  		}, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...)
   499  	})
   500  	t.Run("If there is already a reference to the ControlPlane, it preserves the reference name", func(t *testing.T) {
   501  		g := NewWithT(t)
   502  
   503  		// current cluster objects for the test scenario
   504  		clusterWithControlPlaneRef := cluster.DeepCopy()
   505  		clusterWithControlPlaneRef.Spec.ControlPlaneRef = fakeRef1
   506  
   507  		blueprint := &scope.ClusterBlueprint{
   508  			Topology:     clusterWithControlPlaneRef.Spec.Topology,
   509  			ClusterClass: clusterClass,
   510  			ControlPlane: &scope.ControlPlaneBlueprint{
   511  				Template: controlPlaneTemplate,
   512  			},
   513  		}
   514  
   515  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   516  		scope := scope.New(clusterWithControlPlaneRef)
   517  		scope.Blueprint = blueprint
   518  
   519  		r := &Reconciler{}
   520  
   521  		obj, err := r.computeControlPlane(ctx, scope, nil)
   522  		g.Expect(err).ToNot(HaveOccurred())
   523  		g.Expect(obj).ToNot(BeNil())
   524  
   525  		assertTemplateToObject(g, assertTemplateInput{
   526  			cluster:     scope.Current.Cluster,
   527  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref,
   528  			template:    blueprint.ControlPlane.Template,
   529  			currentRef:  scope.Current.Cluster.Spec.ControlPlaneRef,
   530  			obj:         obj,
   531  			labels:      util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels),
   532  			annotations: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations),
   533  		})
   534  	})
   535  	t.Run("Should choose the correct version for control plane", func(t *testing.T) {
   536  		// Note: in all of the following tests we are setting it up so that there are not machine deployments.
   537  		// A more extensive list of scenarios is tested in TestComputeControlPlaneVersion.
   538  		tests := []struct {
   539  			name                string
   540  			currentControlPlane *unstructured.Unstructured
   541  			topologyVersion     string
   542  			expectedVersion     string
   543  		}{
   544  			{
   545  				name:                "use cluster.spec.topology.version if creating a new control plane",
   546  				currentControlPlane: nil,
   547  				topologyVersion:     "v1.2.3",
   548  				expectedVersion:     "v1.2.3",
   549  			},
   550  			{
   551  				name: "use controlplane.spec.version if the control plane's spec.version is not equal to status.version",
   552  				currentControlPlane: builder.ControlPlane("test1", "cp1").
   553  					WithSpecFields(map[string]interface{}{
   554  						"spec.version": "v1.2.2",
   555  					}).
   556  					WithStatusFields(map[string]interface{}{
   557  						"status.version": "v1.2.1",
   558  					}).
   559  					Build(),
   560  				topologyVersion: "v1.2.3",
   561  				expectedVersion: "v1.2.2",
   562  			},
   563  		}
   564  
   565  		for _, tt := range tests {
   566  			t.Run(tt.name, func(t *testing.T) {
   567  				g := NewWithT(t)
   568  
   569  				// Current cluster objects for the test scenario.
   570  				clusterWithControlPlaneRef := cluster.DeepCopy()
   571  				clusterWithControlPlaneRef.Spec.ControlPlaneRef = fakeRef1
   572  				clusterWithControlPlaneRef.Spec.Topology.Version = tt.topologyVersion
   573  
   574  				blueprint := &scope.ClusterBlueprint{
   575  					Topology:     clusterWithControlPlaneRef.Spec.Topology,
   576  					ClusterClass: clusterClass,
   577  					ControlPlane: &scope.ControlPlaneBlueprint{
   578  						Template: controlPlaneTemplate,
   579  					},
   580  				}
   581  
   582  				// Aggregating current cluster objects into ClusterState (simulating getCurrentState).
   583  				s := scope.New(clusterWithControlPlaneRef)
   584  				s.Blueprint = blueprint
   585  				s.Current.ControlPlane = &scope.ControlPlaneState{
   586  					Object: tt.currentControlPlane,
   587  				}
   588  
   589  				r := &Reconciler{}
   590  
   591  				obj, err := r.computeControlPlane(ctx, s, nil)
   592  				g.Expect(err).ToNot(HaveOccurred())
   593  				g.Expect(obj).NotTo(BeNil())
   594  				assertNestedField(g, obj, tt.expectedVersion, contract.ControlPlane().Version().Path()...)
   595  			})
   596  		}
   597  	})
   598  	t.Run("Carry over the owner reference to ClusterShim, if any", func(t *testing.T) {
   599  		g := NewWithT(t)
   600  		shim := clusterShim(cluster)
   601  
   602  		// current cluster objects
   603  		clusterWithoutReplicas := cluster.DeepCopy()
   604  		clusterWithoutReplicas.Spec.Topology.ControlPlane.Replicas = nil
   605  
   606  		blueprint := &scope.ClusterBlueprint{
   607  			Topology:     clusterWithoutReplicas.Spec.Topology,
   608  			ClusterClass: clusterClass,
   609  			ControlPlane: &scope.ControlPlaneBlueprint{
   610  				Template: controlPlaneTemplate,
   611  			},
   612  		}
   613  
   614  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   615  		s := scope.New(clusterWithoutReplicas)
   616  		s.Current.ControlPlane = &scope.ControlPlaneState{
   617  			Object: builder.ControlPlane("test1", "cp1").
   618  				WithSpecFields(map[string]interface{}{
   619  					"spec.version": "v1.2.2",
   620  				}).
   621  				WithStatusFields(map[string]interface{}{
   622  					"status.version": "v1.2.1",
   623  				}).
   624  				Build(),
   625  		}
   626  		s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerReferenceTo(shim)})
   627  		s.Blueprint = blueprint
   628  
   629  		r := &Reconciler{}
   630  
   631  		obj, err := r.computeControlPlane(ctx, s, nil)
   632  		g.Expect(err).ToNot(HaveOccurred())
   633  		g.Expect(obj).ToNot(BeNil())
   634  		g.Expect(hasOwnerReferenceFrom(obj, shim)).To(BeTrue())
   635  	})
   636  }
   637  
   638  func TestComputeControlPlaneVersion(t *testing.T) {
   639  	t.Run("Compute control plane version under various circumstances", func(t *testing.T) {
   640  		defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
   641  
   642  		nonBlockingBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{
   643  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   644  				CommonResponse: runtimehooksv1.CommonResponse{
   645  					Status: runtimehooksv1.ResponseStatusSuccess,
   646  				},
   647  			},
   648  		}
   649  
   650  		blockingBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{
   651  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   652  				CommonResponse: runtimehooksv1.CommonResponse{
   653  					Status: runtimehooksv1.ResponseStatusSuccess,
   654  				},
   655  				RetryAfterSeconds: int32(10),
   656  			},
   657  		}
   658  
   659  		failureBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{
   660  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   661  				CommonResponse: runtimehooksv1.CommonResponse{
   662  					Status: runtimehooksv1.ResponseStatusFailure,
   663  				},
   664  			},
   665  		}
   666  
   667  		catalog := runtimecatalog.New()
   668  		_ = runtimehooksv1.AddToCatalog(catalog)
   669  
   670  		beforeClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterUpgrade)
   671  		if err != nil {
   672  			panic("unable to compute GVH")
   673  		}
   674  
   675  		tests := []struct {
   676  			name                        string
   677  			hookResponse                *runtimehooksv1.BeforeClusterUpgradeResponse
   678  			topologyVersion             string
   679  			controlPlaneObj             *unstructured.Unstructured
   680  			upgradingMachineDeployments []string
   681  			upgradingMachinePools       []string
   682  			expectedVersion             string
   683  			wantErr                     bool
   684  		}{
   685  			{
   686  				name:            "should return cluster.spec.topology.version if creating a new control plane",
   687  				topologyVersion: "v1.2.3",
   688  				controlPlaneObj: nil,
   689  				expectedVersion: "v1.2.3",
   690  			},
   691  			{
   692  				// Control plane is not upgrading implies that controlplane.spec.version is equal to controlplane.status.version.
   693  				// Control plane is not scaling implies that controlplane.spec.replicas is equal to controlplane.status.replicas,
   694  				// Controlplane.status.updatedReplicas and controlplane.status.readyReplicas.
   695  				name:            "should return cluster.spec.topology.version if the control plane is not upgrading and not scaling",
   696  				hookResponse:    nonBlockingBeforeClusterUpgradeResponse,
   697  				topologyVersion: "v1.2.3",
   698  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   699  					WithSpecFields(map[string]interface{}{
   700  						"spec.version":  "v1.2.2",
   701  						"spec.replicas": int64(2),
   702  					}).
   703  					WithStatusFields(map[string]interface{}{
   704  						"status.version":             "v1.2.2",
   705  						"status.replicas":            int64(2),
   706  						"status.updatedReplicas":     int64(2),
   707  						"status.readyReplicas":       int64(2),
   708  						"status.unavailableReplicas": int64(0),
   709  					}).
   710  					Build(),
   711  				expectedVersion: "v1.2.3",
   712  			},
   713  			{
   714  				// Control plane is considered upgrading if controlplane.spec.version is not equal to controlplane.status.version.
   715  				name:            "should return controlplane.spec.version if the control plane is upgrading",
   716  				topologyVersion: "v1.2.3",
   717  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   718  					WithSpecFields(map[string]interface{}{
   719  						"spec.version": "v1.2.2",
   720  					}).
   721  					WithStatusFields(map[string]interface{}{
   722  						"status.version": "v1.2.1",
   723  					}).
   724  					Build(),
   725  				expectedVersion: "v1.2.2",
   726  			},
   727  			{
   728  				// Control plane is considered scaling if controlplane.spec.replicas is not equal to any of
   729  				// controlplane.status.replicas, controlplane.status.readyReplicas, controlplane.status.updatedReplicas.
   730  				name:            "should return controlplane.spec.version if the control plane is scaling",
   731  				topologyVersion: "v1.2.3",
   732  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   733  					WithSpecFields(map[string]interface{}{
   734  						"spec.version":  "v1.2.2",
   735  						"spec.replicas": int64(2),
   736  					}).
   737  					WithStatusFields(map[string]interface{}{
   738  						"status.version":             "v1.2.2",
   739  						"status.replicas":            int64(1),
   740  						"status.updatedReplicas":     int64(1),
   741  						"status.readyReplicas":       int64(1),
   742  						"status.unavailableReplicas": int64(0),
   743  					}).
   744  					Build(),
   745  				expectedVersion: "v1.2.2",
   746  			},
   747  			{
   748  				name:            "should return controlplane.spec.version if control plane is not upgrading and not scaling and one of the MachineDeployments and one of the MachinePools is upgrading",
   749  				topologyVersion: "v1.2.3",
   750  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   751  					WithSpecFields(map[string]interface{}{
   752  						"spec.version":  "v1.2.2",
   753  						"spec.replicas": int64(2),
   754  					}).
   755  					WithStatusFields(map[string]interface{}{
   756  						"status.version":             "v1.2.2",
   757  						"status.replicas":            int64(2),
   758  						"status.updatedReplicas":     int64(2),
   759  						"status.readyReplicas":       int64(2),
   760  						"status.unavailableReplicas": int64(0),
   761  					}).
   762  					Build(),
   763  				upgradingMachineDeployments: []string{"md1"},
   764  				upgradingMachinePools:       []string{"mp1"},
   765  				expectedVersion:             "v1.2.2",
   766  			},
   767  			{
   768  				name:            "should return cluster.spec.topology.version if control plane is not upgrading and not scaling and none of the MachineDeployments and MachinePools are upgrading - hook returns non blocking response",
   769  				hookResponse:    nonBlockingBeforeClusterUpgradeResponse,
   770  				topologyVersion: "v1.2.3",
   771  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   772  					WithSpecFields(map[string]interface{}{
   773  						"spec.version":  "v1.2.2",
   774  						"spec.replicas": int64(2),
   775  					}).
   776  					WithStatusFields(map[string]interface{}{
   777  						"status.version":             "v1.2.2",
   778  						"status.replicas":            int64(2),
   779  						"status.updatedReplicas":     int64(2),
   780  						"status.readyReplicas":       int64(2),
   781  						"status.unavailableReplicas": int64(0),
   782  					}).
   783  					Build(),
   784  				upgradingMachineDeployments: []string{},
   785  				upgradingMachinePools:       []string{},
   786  				expectedVersion:             "v1.2.3",
   787  			},
   788  			{
   789  				name:            "should return the controlplane.spec.version if the BeforeClusterUpgrade hooks returns a blocking response",
   790  				hookResponse:    blockingBeforeClusterUpgradeResponse,
   791  				topologyVersion: "v1.2.3",
   792  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   793  					WithSpecFields(map[string]interface{}{
   794  						"spec.version":  "v1.2.2",
   795  						"spec.replicas": int64(2),
   796  					}).
   797  					WithStatusFields(map[string]interface{}{
   798  						"status.version":             "v1.2.2",
   799  						"status.replicas":            int64(2),
   800  						"status.updatedReplicas":     int64(2),
   801  						"status.readyReplicas":       int64(2),
   802  						"status.unavailableReplicas": int64(0),
   803  					}).
   804  					Build(),
   805  				expectedVersion: "v1.2.2",
   806  			},
   807  			{
   808  				name:            "should fail if the BeforeClusterUpgrade hooks returns a failure response",
   809  				hookResponse:    failureBeforeClusterUpgradeResponse,
   810  				topologyVersion: "v1.2.3",
   811  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   812  					WithSpecFields(map[string]interface{}{
   813  						"spec.version":  "v1.2.2",
   814  						"spec.replicas": int64(2),
   815  					}).
   816  					WithStatusFields(map[string]interface{}{
   817  						"status.version":             "v1.2.2",
   818  						"status.replicas":            int64(2),
   819  						"status.updatedReplicas":     int64(2),
   820  						"status.readyReplicas":       int64(2),
   821  						"status.unavailableReplicas": int64(0),
   822  					}).
   823  					Build(),
   824  				expectedVersion: "v1.2.2",
   825  				wantErr:         true,
   826  			},
   827  		}
   828  		for _, tt := range tests {
   829  			t.Run(tt.name, func(t *testing.T) {
   830  				g := NewWithT(t)
   831  
   832  				s := &scope.Scope{
   833  					Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{
   834  						Version: tt.topologyVersion,
   835  						ControlPlane: clusterv1.ControlPlaneTopology{
   836  							Replicas: pointer.Int32(2),
   837  						},
   838  					}},
   839  					Current: &scope.ClusterState{
   840  						Cluster: &clusterv1.Cluster{
   841  							ObjectMeta: metav1.ObjectMeta{
   842  								Name:      "test-cluster",
   843  								Namespace: "test-ns",
   844  							},
   845  						},
   846  						ControlPlane: &scope.ControlPlaneState{Object: tt.controlPlaneObj},
   847  					},
   848  					UpgradeTracker:      scope.NewUpgradeTracker(),
   849  					HookResponseTracker: scope.NewHookResponseTracker(),
   850  				}
   851  				if len(tt.upgradingMachineDeployments) > 0 {
   852  					s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...)
   853  				}
   854  				if len(tt.upgradingMachinePools) > 0 {
   855  					s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...)
   856  				}
   857  
   858  				runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
   859  					WithCatalog(catalog).
   860  					WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
   861  						beforeClusterUpgradeGVH: tt.hookResponse,
   862  					}).
   863  					Build()
   864  
   865  				fakeClient := fake.NewClientBuilder().WithObjects(s.Current.Cluster).Build()
   866  
   867  				r := &Reconciler{
   868  					Client:        fakeClient,
   869  					APIReader:     fakeClient,
   870  					RuntimeClient: runtimeClient,
   871  				}
   872  				version, err := r.computeControlPlaneVersion(ctx, s)
   873  				if tt.wantErr {
   874  					g.Expect(err).To(HaveOccurred())
   875  				} else {
   876  					g.Expect(err).ToNot(HaveOccurred())
   877  					g.Expect(version).To(Equal(tt.expectedVersion))
   878  					// Verify that if the upgrade is pending it is captured in the upgrade tracker.
   879  					upgradePending := tt.expectedVersion != tt.topologyVersion
   880  					g.Expect(s.UpgradeTracker.ControlPlane.IsPendingUpgrade).To(Equal(upgradePending))
   881  				}
   882  			})
   883  		}
   884  	})
   885  
   886  	t.Run("Calling AfterControlPlaneUpgrade hook", func(t *testing.T) {
   887  		defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
   888  
   889  		catalog := runtimecatalog.New()
   890  		_ = runtimehooksv1.AddToCatalog(catalog)
   891  
   892  		afterControlPlaneUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterControlPlaneUpgrade)
   893  		if err != nil {
   894  			panic(err)
   895  		}
   896  
   897  		blockingResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{
   898  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   899  				RetryAfterSeconds: int32(10),
   900  				CommonResponse: runtimehooksv1.CommonResponse{
   901  					Status: runtimehooksv1.ResponseStatusSuccess,
   902  				},
   903  			},
   904  		}
   905  		nonBlockingResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{
   906  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   907  				RetryAfterSeconds: int32(0),
   908  				CommonResponse: runtimehooksv1.CommonResponse{
   909  					Status: runtimehooksv1.ResponseStatusSuccess,
   910  				},
   911  			},
   912  		}
   913  		failureResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{
   914  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   915  				CommonResponse: runtimehooksv1.CommonResponse{
   916  					Status: runtimehooksv1.ResponseStatusFailure,
   917  				},
   918  			},
   919  		}
   920  
   921  		topologyVersion := "v1.2.3"
   922  		lowerVersion := "v1.2.2"
   923  		controlPlaneStable := builder.ControlPlane("test-ns", "cp1").
   924  			WithSpecFields(map[string]interface{}{
   925  				"spec.version":  topologyVersion,
   926  				"spec.replicas": int64(2),
   927  			}).
   928  			WithStatusFields(map[string]interface{}{
   929  				"status.version":         topologyVersion,
   930  				"status.replicas":        int64(2),
   931  				"status.updatedReplicas": int64(2),
   932  				"status.readyReplicas":   int64(2),
   933  			}).
   934  			Build()
   935  
   936  		controlPlaneUpgrading := builder.ControlPlane("test-ns", "cp1").
   937  			WithSpecFields(map[string]interface{}{
   938  				"spec.version":  topologyVersion,
   939  				"spec.replicas": int64(2),
   940  			}).
   941  			WithStatusFields(map[string]interface{}{
   942  				"status.version":         lowerVersion,
   943  				"status.replicas":        int64(2),
   944  				"status.updatedReplicas": int64(2),
   945  				"status.readyReplicas":   int64(2),
   946  			}).
   947  			Build()
   948  
   949  		controlPlaneProvisioning := builder.ControlPlane("test-ns", "cp1").
   950  			WithSpecFields(map[string]interface{}{
   951  				"spec.version":  "v1.2.2",
   952  				"spec.replicas": int64(2),
   953  			}).
   954  			WithStatusFields(map[string]interface{}{
   955  				"status.version": "",
   956  			}).
   957  			Build()
   958  
   959  		tests := []struct {
   960  			name               string
   961  			s                  *scope.Scope
   962  			hookResponse       *runtimehooksv1.AfterControlPlaneUpgradeResponse
   963  			wantIntentToCall   bool
   964  			wantHookToBeCalled bool
   965  			wantHookToBlock    bool
   966  			wantErr            bool
   967  		}{
   968  			{
   969  				name: "should not call hook if it is not marked",
   970  				s: &scope.Scope{
   971  					Blueprint: &scope.ClusterBlueprint{
   972  						Topology: &clusterv1.Topology{
   973  							Version:      topologyVersion,
   974  							ControlPlane: clusterv1.ControlPlaneTopology{},
   975  						},
   976  					},
   977  					Current: &scope.ClusterState{
   978  						Cluster: &clusterv1.Cluster{
   979  							ObjectMeta: metav1.ObjectMeta{
   980  								Name:      "test-cluster",
   981  								Namespace: "test-ns",
   982  							},
   983  							Spec: clusterv1.ClusterSpec{},
   984  						},
   985  						ControlPlane: &scope.ControlPlaneState{
   986  							Object: controlPlaneStable,
   987  						},
   988  					},
   989  					UpgradeTracker:      scope.NewUpgradeTracker(),
   990  					HookResponseTracker: scope.NewHookResponseTracker(),
   991  				},
   992  				wantIntentToCall:   false,
   993  				wantHookToBeCalled: false,
   994  				wantErr:            false,
   995  			},
   996  			{
   997  				name: "should not call hook if the control plane is provisioning - there is intent to call hook",
   998  				s: &scope.Scope{
   999  					Blueprint: &scope.ClusterBlueprint{
  1000  						Topology: &clusterv1.Topology{
  1001  							Version:      topologyVersion,
  1002  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1003  						},
  1004  					},
  1005  					Current: &scope.ClusterState{
  1006  						Cluster: &clusterv1.Cluster{
  1007  							ObjectMeta: metav1.ObjectMeta{
  1008  								Name:      "test-cluster",
  1009  								Namespace: "test-ns",
  1010  								Annotations: map[string]string{
  1011  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1012  								},
  1013  							},
  1014  							Spec: clusterv1.ClusterSpec{},
  1015  						},
  1016  						ControlPlane: &scope.ControlPlaneState{
  1017  							Object: controlPlaneProvisioning,
  1018  						},
  1019  					},
  1020  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1021  					HookResponseTracker: scope.NewHookResponseTracker(),
  1022  				},
  1023  				wantIntentToCall:   true,
  1024  				wantHookToBeCalled: false,
  1025  				wantErr:            false,
  1026  			},
  1027  			{
  1028  				name: "should not call hook if the control plane is upgrading - there is intent to call hook",
  1029  				s: &scope.Scope{
  1030  					Blueprint: &scope.ClusterBlueprint{
  1031  						Topology: &clusterv1.Topology{
  1032  							Version:      topologyVersion,
  1033  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1034  						},
  1035  					},
  1036  					Current: &scope.ClusterState{
  1037  						Cluster: &clusterv1.Cluster{
  1038  							ObjectMeta: metav1.ObjectMeta{
  1039  								Name:      "test-cluster",
  1040  								Namespace: "test-ns",
  1041  								Annotations: map[string]string{
  1042  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1043  								},
  1044  							},
  1045  							Spec: clusterv1.ClusterSpec{},
  1046  						},
  1047  						ControlPlane: &scope.ControlPlaneState{
  1048  							Object: controlPlaneUpgrading,
  1049  						},
  1050  					},
  1051  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1052  					HookResponseTracker: scope.NewHookResponseTracker(),
  1053  				},
  1054  				wantIntentToCall:   true,
  1055  				wantHookToBeCalled: false,
  1056  				wantErr:            false,
  1057  			},
  1058  			{
  1059  				name: "should call hook if the control plane is at desired version - non blocking response should remove hook from pending hooks list and allow MD upgrades",
  1060  				s: &scope.Scope{
  1061  					Blueprint: &scope.ClusterBlueprint{
  1062  						Topology: &clusterv1.Topology{
  1063  							Version:      topologyVersion,
  1064  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1065  						},
  1066  					},
  1067  					Current: &scope.ClusterState{
  1068  						Cluster: &clusterv1.Cluster{
  1069  							ObjectMeta: metav1.ObjectMeta{
  1070  								Name:      "test-cluster",
  1071  								Namespace: "test-ns",
  1072  								Annotations: map[string]string{
  1073  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1074  								},
  1075  							},
  1076  							Spec: clusterv1.ClusterSpec{},
  1077  						},
  1078  						ControlPlane: &scope.ControlPlaneState{
  1079  							Object: controlPlaneStable,
  1080  						},
  1081  					},
  1082  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1083  					HookResponseTracker: scope.NewHookResponseTracker(),
  1084  				},
  1085  				hookResponse:       nonBlockingResponse,
  1086  				wantIntentToCall:   false,
  1087  				wantHookToBeCalled: true,
  1088  				wantHookToBlock:    false,
  1089  				wantErr:            false,
  1090  			},
  1091  			{
  1092  				name: "should call hook if the control plane is at desired version - blocking response should leave the hook in pending hooks list and block MD upgrades",
  1093  				s: &scope.Scope{
  1094  					Blueprint: &scope.ClusterBlueprint{
  1095  						Topology: &clusterv1.Topology{
  1096  							Version:      topologyVersion,
  1097  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1098  						},
  1099  					},
  1100  					Current: &scope.ClusterState{
  1101  						Cluster: &clusterv1.Cluster{
  1102  							ObjectMeta: metav1.ObjectMeta{
  1103  								Name:      "test-cluster",
  1104  								Namespace: "test-ns",
  1105  								Annotations: map[string]string{
  1106  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1107  								},
  1108  							},
  1109  							Spec: clusterv1.ClusterSpec{},
  1110  						},
  1111  						ControlPlane: &scope.ControlPlaneState{
  1112  							Object: controlPlaneStable,
  1113  						},
  1114  					},
  1115  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1116  					HookResponseTracker: scope.NewHookResponseTracker(),
  1117  				},
  1118  				hookResponse:       blockingResponse,
  1119  				wantIntentToCall:   true,
  1120  				wantHookToBeCalled: true,
  1121  				wantHookToBlock:    true,
  1122  				wantErr:            false,
  1123  			},
  1124  			{
  1125  				name: "should call hook if the control plane is at desired version - failure response should leave the hook in pending hooks list",
  1126  				s: &scope.Scope{
  1127  					Blueprint: &scope.ClusterBlueprint{
  1128  						Topology: &clusterv1.Topology{
  1129  							Version:      topologyVersion,
  1130  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1131  						},
  1132  					},
  1133  					Current: &scope.ClusterState{
  1134  						Cluster: &clusterv1.Cluster{
  1135  							ObjectMeta: metav1.ObjectMeta{
  1136  								Name:      "test-cluster",
  1137  								Namespace: "test-ns",
  1138  								Annotations: map[string]string{
  1139  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1140  								},
  1141  							},
  1142  							Spec: clusterv1.ClusterSpec{},
  1143  						},
  1144  						ControlPlane: &scope.ControlPlaneState{
  1145  							Object: controlPlaneStable,
  1146  						},
  1147  					},
  1148  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1149  					HookResponseTracker: scope.NewHookResponseTracker(),
  1150  				},
  1151  				hookResponse:       failureResponse,
  1152  				wantIntentToCall:   true,
  1153  				wantHookToBeCalled: true,
  1154  				wantErr:            true,
  1155  			},
  1156  		}
  1157  
  1158  		for _, tt := range tests {
  1159  			t.Run(tt.name, func(t *testing.T) {
  1160  				g := NewWithT(t)
  1161  
  1162  				fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
  1163  					WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
  1164  						afterControlPlaneUpgradeGVH: tt.hookResponse,
  1165  					}).
  1166  					WithCatalog(catalog).
  1167  					Build()
  1168  
  1169  				fakeClient := fake.NewClientBuilder().WithObjects(tt.s.Current.Cluster).Build()
  1170  
  1171  				r := &Reconciler{
  1172  					Client:        fakeClient,
  1173  					APIReader:     fakeClient,
  1174  					RuntimeClient: fakeRuntimeClient,
  1175  				}
  1176  
  1177  				_, err := r.computeControlPlaneVersion(ctx, tt.s)
  1178  				g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.AfterControlPlaneUpgrade) == 1).To(Equal(tt.wantHookToBeCalled))
  1179  				g.Expect(hooks.IsPending(runtimehooksv1.AfterControlPlaneUpgrade, tt.s.Current.Cluster)).To(Equal(tt.wantIntentToCall))
  1180  				g.Expect(err != nil).To(Equal(tt.wantErr))
  1181  				if tt.wantHookToBeCalled && !tt.wantErr {
  1182  					g.Expect(tt.s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade)).To(Equal(tt.wantHookToBlock))
  1183  				}
  1184  			})
  1185  		}
  1186  	})
  1187  
  1188  	t.Run("register intent to call AfterClusterUpgrade and AfterControlPlaneUpgrade hooks", func(t *testing.T) {
  1189  		defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
  1190  
  1191  		catalog := runtimecatalog.New()
  1192  		_ = runtimehooksv1.AddToCatalog(catalog)
  1193  		beforeClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterUpgrade)
  1194  		if err != nil {
  1195  			panic("unable to compute GVH")
  1196  		}
  1197  		beforeClusterUpgradeNonBlockingResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{
  1198  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
  1199  				CommonResponse: runtimehooksv1.CommonResponse{
  1200  					Status: runtimehooksv1.ResponseStatusSuccess,
  1201  				},
  1202  			},
  1203  		}
  1204  
  1205  		controlPlaneStable := builder.ControlPlane("test-ns", "cp1").
  1206  			WithSpecFields(map[string]interface{}{
  1207  				"spec.version":  "v1.2.2",
  1208  				"spec.replicas": int64(2),
  1209  			}).
  1210  			WithStatusFields(map[string]interface{}{
  1211  				"status.version":             "v1.2.2",
  1212  				"status.replicas":            int64(2),
  1213  				"status.updatedReplicas":     int64(2),
  1214  				"status.readyReplicas":       int64(2),
  1215  				"status.unavailableReplicas": int64(0),
  1216  			}).
  1217  			Build()
  1218  
  1219  		s := &scope.Scope{
  1220  			Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{
  1221  				Version: "v1.2.3",
  1222  				ControlPlane: clusterv1.ControlPlaneTopology{
  1223  					Replicas: pointer.Int32(2),
  1224  				},
  1225  			}},
  1226  			Current: &scope.ClusterState{
  1227  				Cluster: &clusterv1.Cluster{
  1228  					ObjectMeta: metav1.ObjectMeta{
  1229  						Name:      "test-cluster",
  1230  						Namespace: "test-ns",
  1231  					},
  1232  				},
  1233  				ControlPlane: &scope.ControlPlaneState{Object: controlPlaneStable},
  1234  			},
  1235  			UpgradeTracker:      scope.NewUpgradeTracker(),
  1236  			HookResponseTracker: scope.NewHookResponseTracker(),
  1237  		}
  1238  
  1239  		runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
  1240  			WithCatalog(catalog).
  1241  			WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
  1242  				beforeClusterUpgradeGVH: beforeClusterUpgradeNonBlockingResponse,
  1243  			}).
  1244  			Build()
  1245  
  1246  		fakeClient := fake.NewClientBuilder().WithObjects(s.Current.Cluster).Build()
  1247  
  1248  		r := &Reconciler{
  1249  			Client:        fakeClient,
  1250  			APIReader:     fakeClient,
  1251  			RuntimeClient: runtimeClient,
  1252  		}
  1253  
  1254  		desiredVersion, err := r.computeControlPlaneVersion(ctx, s)
  1255  		g := NewWithT(t)
  1256  		g.Expect(err).ToNot(HaveOccurred())
  1257  		// When successfully picking up the new version the intent to call AfterControlPlaneUpgrade and AfterClusterUpgrade hooks should be registered.
  1258  		g.Expect(desiredVersion).To(Equal("v1.2.3"))
  1259  		g.Expect(hooks.IsPending(runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster)).To(BeTrue())
  1260  		g.Expect(hooks.IsPending(runtimehooksv1.AfterClusterUpgrade, s.Current.Cluster)).To(BeTrue())
  1261  	})
  1262  }
  1263  
  1264  func TestComputeCluster(t *testing.T) {
  1265  	g := NewWithT(t)
  1266  
  1267  	// generated objects
  1268  	infrastructureCluster := builder.InfrastructureCluster(metav1.NamespaceDefault, "infrastructureCluster1").
  1269  		Build()
  1270  	controlPlane := builder.ControlPlane(metav1.NamespaceDefault, "controlplane1").
  1271  		Build()
  1272  
  1273  	// current cluster objects
  1274  	cluster := &clusterv1.Cluster{
  1275  		ObjectMeta: metav1.ObjectMeta{
  1276  			Name:      "cluster1",
  1277  			Namespace: metav1.NamespaceDefault,
  1278  		},
  1279  	}
  1280  
  1281  	// aggregating current cluster objects into ClusterState (simulating getCurrentState)
  1282  	scope := scope.New(cluster)
  1283  
  1284  	obj, err := computeCluster(ctx, scope, infrastructureCluster, controlPlane)
  1285  	g.Expect(err).ToNot(HaveOccurred())
  1286  	g.Expect(obj).ToNot(BeNil())
  1287  
  1288  	// TypeMeta
  1289  	g.Expect(obj.APIVersion).To(Equal(cluster.APIVersion))
  1290  	g.Expect(obj.Kind).To(Equal(cluster.Kind))
  1291  
  1292  	// ObjectMeta
  1293  	g.Expect(obj.Name).To(Equal(cluster.Name))
  1294  	g.Expect(obj.Namespace).To(Equal(cluster.Namespace))
  1295  	g.Expect(obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterNameLabel, cluster.Name))
  1296  	g.Expect(obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyOwnedLabel, ""))
  1297  
  1298  	// Spec
  1299  	g.Expect(obj.Spec.InfrastructureRef).To(BeComparableTo(contract.ObjToRef(infrastructureCluster)))
  1300  	g.Expect(obj.Spec.ControlPlaneRef).To(BeComparableTo(contract.ObjToRef(controlPlane)))
  1301  }
  1302  
  1303  func TestComputeMachineDeployment(t *testing.T) {
  1304  	workerInfrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "linux-worker-inframachinetemplate").
  1305  		Build()
  1306  	workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstraptemplate").
  1307  		Build()
  1308  	labels := map[string]string{"fizzLabel": "buzz", "fooLabel": "bar"}
  1309  	annotations := map[string]string{"fizzAnnotation": "buzz", "fooAnnotation": "bar"}
  1310  
  1311  	unhealthyConditions := []clusterv1.UnhealthyCondition{
  1312  		{
  1313  			Type:    corev1.NodeReady,
  1314  			Status:  corev1.ConditionUnknown,
  1315  			Timeout: metav1.Duration{Duration: 5 * time.Minute},
  1316  		},
  1317  		{
  1318  			Type:    corev1.NodeReady,
  1319  			Status:  corev1.ConditionFalse,
  1320  			Timeout: metav1.Duration{Duration: 5 * time.Minute},
  1321  		},
  1322  	}
  1323  	nodeTimeoutDuration := &metav1.Duration{Duration: time.Duration(1)}
  1324  
  1325  	clusterClassFailureDomain := "A"
  1326  	clusterClassDuration := metav1.Duration{Duration: 20 * time.Second}
  1327  	var clusterClassMinReadySeconds int32 = 20
  1328  	clusterClassStrategy := clusterv1.MachineDeploymentStrategy{
  1329  		Type: clusterv1.OnDeleteMachineDeploymentStrategyType,
  1330  	}
  1331  	md1 := builder.MachineDeploymentClass("linux-worker").
  1332  		WithLabels(labels).
  1333  		WithAnnotations(annotations).
  1334  		WithInfrastructureTemplate(workerInfrastructureMachineTemplate).
  1335  		WithBootstrapTemplate(workerBootstrapTemplate).
  1336  		WithMachineHealthCheckClass(&clusterv1.MachineHealthCheckClass{
  1337  			UnhealthyConditions: unhealthyConditions,
  1338  			NodeStartupTimeout:  nodeTimeoutDuration,
  1339  		}).
  1340  		WithFailureDomain(&clusterClassFailureDomain).
  1341  		WithNodeDrainTimeout(&clusterClassDuration).
  1342  		WithNodeVolumeDetachTimeout(&clusterClassDuration).
  1343  		WithNodeDeletionTimeout(&clusterClassDuration).
  1344  		WithMinReadySeconds(&clusterClassMinReadySeconds).
  1345  		WithStrategy(&clusterClassStrategy).
  1346  		Build()
  1347  	mcds := []clusterv1.MachineDeploymentClass{*md1}
  1348  	fakeClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
  1349  		WithWorkerMachineDeploymentClasses(mcds...).
  1350  		Build()
  1351  
  1352  	version := "v1.21.2"
  1353  	cluster := &clusterv1.Cluster{
  1354  		ObjectMeta: metav1.ObjectMeta{
  1355  			Name:      "cluster1",
  1356  			Namespace: metav1.NamespaceDefault,
  1357  		},
  1358  		Spec: clusterv1.ClusterSpec{
  1359  			Topology: &clusterv1.Topology{
  1360  				Version: version,
  1361  			},
  1362  		},
  1363  	}
  1364  
  1365  	blueprint := &scope.ClusterBlueprint{
  1366  		Topology:     cluster.Spec.Topology,
  1367  		ClusterClass: fakeClass,
  1368  		MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{
  1369  			"linux-worker": {
  1370  				Metadata: clusterv1.ObjectMeta{
  1371  					Labels:      labels,
  1372  					Annotations: annotations,
  1373  				},
  1374  				BootstrapTemplate:             workerBootstrapTemplate,
  1375  				InfrastructureMachineTemplate: workerInfrastructureMachineTemplate,
  1376  				MachineHealthCheck: &clusterv1.MachineHealthCheckClass{
  1377  					UnhealthyConditions: unhealthyConditions,
  1378  					NodeStartupTimeout: &metav1.Duration{
  1379  						Duration: time.Duration(1)},
  1380  				},
  1381  			},
  1382  		},
  1383  	}
  1384  
  1385  	replicas := int32(5)
  1386  	topologyFailureDomain := "B"
  1387  	topologyDuration := metav1.Duration{Duration: 10 * time.Second}
  1388  	var topologyMinReadySeconds int32 = 10
  1389  	topologyStrategy := clusterv1.MachineDeploymentStrategy{
  1390  		Type: clusterv1.RollingUpdateMachineDeploymentStrategyType,
  1391  	}
  1392  	mdTopology := clusterv1.MachineDeploymentTopology{
  1393  		Metadata: clusterv1.ObjectMeta{
  1394  			Labels: map[string]string{
  1395  				// Should overwrite the label from the MachineDeployment class.
  1396  				"fooLabel": "baz",
  1397  			},
  1398  			Annotations: map[string]string{
  1399  				// Should overwrite the annotation from the MachineDeployment class.
  1400  				"fooAnnotation": "baz",
  1401  				// These annotations should not be propagated to the MachineDeployment.
  1402  				clusterv1.ClusterTopologyDeferUpgradeAnnotation:        "",
  1403  				clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  1404  			},
  1405  		},
  1406  		Class:                   "linux-worker",
  1407  		Name:                    "big-pool-of-machines",
  1408  		Replicas:                &replicas,
  1409  		FailureDomain:           &topologyFailureDomain,
  1410  		NodeDrainTimeout:        &topologyDuration,
  1411  		NodeVolumeDetachTimeout: &topologyDuration,
  1412  		NodeDeletionTimeout:     &topologyDuration,
  1413  		MinReadySeconds:         &topologyMinReadySeconds,
  1414  		Strategy:                &topologyStrategy,
  1415  	}
  1416  
  1417  	t.Run("Generates the machine deployment and the referenced templates", func(t *testing.T) {
  1418  		g := NewWithT(t)
  1419  		scope := scope.New(cluster)
  1420  		scope.Blueprint = blueprint
  1421  
  1422  		actual, err := computeMachineDeployment(ctx, scope, mdTopology)
  1423  		g.Expect(err).ToNot(HaveOccurred())
  1424  
  1425  		g.Expect(actual.BootstrapTemplate.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachineDeploymentNameLabel, "big-pool-of-machines"))
  1426  
  1427  		// Ensure Cluster ownership is added to generated BootstrapTemplate.
  1428  		g.Expect(actual.BootstrapTemplate.GetOwnerReferences()).To(HaveLen(1))
  1429  		g.Expect(actual.BootstrapTemplate.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
  1430  		g.Expect(actual.BootstrapTemplate.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
  1431  
  1432  		g.Expect(actual.InfrastructureMachineTemplate.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachineDeploymentNameLabel, "big-pool-of-machines"))
  1433  
  1434  		// Ensure Cluster ownership is added to generated InfrastructureMachineTemplate.
  1435  		g.Expect(actual.InfrastructureMachineTemplate.GetOwnerReferences()).To(HaveLen(1))
  1436  		g.Expect(actual.InfrastructureMachineTemplate.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
  1437  		g.Expect(actual.InfrastructureMachineTemplate.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
  1438  
  1439  		actualMd := actual.Object
  1440  		g.Expect(*actualMd.Spec.Replicas).To(Equal(replicas))
  1441  		g.Expect(*actualMd.Spec.MinReadySeconds).To(Equal(topologyMinReadySeconds))
  1442  		g.Expect(*actualMd.Spec.Strategy).To(BeComparableTo(topologyStrategy))
  1443  		g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain))
  1444  		g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration))
  1445  		g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration))
  1446  		g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration))
  1447  		g.Expect(actualMd.Spec.ClusterName).To(Equal("cluster1"))
  1448  		g.Expect(actualMd.Name).To(ContainSubstring("cluster1"))
  1449  		g.Expect(actualMd.Name).To(ContainSubstring("big-pool-of-machines"))
  1450  
  1451  		expectedAnnotations := util.MergeMap(mdTopology.Metadata.Annotations, md1.Template.Metadata.Annotations)
  1452  		delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1453  		delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1454  		g.Expect(actualMd.Annotations).To(Equal(expectedAnnotations))
  1455  		g.Expect(actualMd.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations))
  1456  
  1457  		g.Expect(actualMd.Labels).To(BeComparableTo(util.MergeMap(mdTopology.Metadata.Labels, md1.Template.Metadata.Labels, map[string]string{
  1458  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1459  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1460  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1461  		})))
  1462  		g.Expect(actualMd.Spec.Selector.MatchLabels).To(Equal(map[string]string{
  1463  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1464  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1465  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1466  		}))
  1467  		g.Expect(actualMd.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mdTopology.Metadata.Labels, md1.Template.Metadata.Labels, map[string]string{
  1468  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1469  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1470  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1471  		})))
  1472  
  1473  		g.Expect(actualMd.Spec.Template.Spec.InfrastructureRef.Name).ToNot(Equal("linux-worker-inframachinetemplate"))
  1474  		g.Expect(actualMd.Spec.Template.Spec.Bootstrap.ConfigRef.Name).ToNot(Equal("linux-worker-bootstraptemplate"))
  1475  	})
  1476  	t.Run("Generates the machine deployment and the referenced templates using ClusterClass defaults", func(t *testing.T) {
  1477  		g := NewWithT(t)
  1478  		scope := scope.New(cluster)
  1479  		scope.Blueprint = blueprint
  1480  
  1481  		mdTopology := clusterv1.MachineDeploymentTopology{
  1482  			Metadata: clusterv1.ObjectMeta{
  1483  				Labels: map[string]string{"foo": "baz"},
  1484  			},
  1485  			Class:    "linux-worker",
  1486  			Name:     "big-pool-of-machines",
  1487  			Replicas: &replicas,
  1488  			// missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy
  1489  		}
  1490  
  1491  		actual, err := computeMachineDeployment(ctx, scope, mdTopology)
  1492  		g.Expect(err).ToNot(HaveOccurred())
  1493  
  1494  		// checking only values from CC defaults
  1495  		actualMd := actual.Object
  1496  		g.Expect(*actualMd.Spec.MinReadySeconds).To(Equal(clusterClassMinReadySeconds))
  1497  		g.Expect(*actualMd.Spec.Strategy).To(BeComparableTo(clusterClassStrategy))
  1498  		g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(clusterClassFailureDomain))
  1499  		g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(clusterClassDuration))
  1500  		g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(clusterClassDuration))
  1501  		g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(clusterClassDuration))
  1502  	})
  1503  
  1504  	t.Run("If there is already a machine deployment, it preserves the object name and the reference names", func(t *testing.T) {
  1505  		g := NewWithT(t)
  1506  		s := scope.New(cluster)
  1507  		s.Blueprint = blueprint
  1508  
  1509  		currentReplicas := int32(3)
  1510  		currentMd := &clusterv1.MachineDeployment{
  1511  			ObjectMeta: metav1.ObjectMeta{
  1512  				Name: "existing-deployment-1",
  1513  			},
  1514  			Spec: clusterv1.MachineDeploymentSpec{
  1515  				Replicas: &currentReplicas,
  1516  				Template: clusterv1.MachineTemplateSpec{
  1517  					Spec: clusterv1.MachineSpec{
  1518  						Version: pointer.String(version),
  1519  						Bootstrap: clusterv1.Bootstrap{
  1520  							ConfigRef: contract.ObjToRef(workerBootstrapTemplate),
  1521  						},
  1522  						InfrastructureRef: *contract.ObjToRef(workerInfrastructureMachineTemplate),
  1523  					},
  1524  				},
  1525  			},
  1526  		}
  1527  		s.Current.MachineDeployments = map[string]*scope.MachineDeploymentState{
  1528  			"big-pool-of-machines": {
  1529  				Object:                        currentMd,
  1530  				BootstrapTemplate:             workerBootstrapTemplate,
  1531  				InfrastructureMachineTemplate: workerInfrastructureMachineTemplate,
  1532  			},
  1533  		}
  1534  
  1535  		actual, err := computeMachineDeployment(ctx, s, mdTopology)
  1536  		g.Expect(err).ToNot(HaveOccurred())
  1537  
  1538  		actualMd := actual.Object
  1539  
  1540  		g.Expect(*actualMd.Spec.Replicas).NotTo(Equal(currentReplicas))
  1541  		g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain))
  1542  		g.Expect(actualMd.Name).To(Equal("existing-deployment-1"))
  1543  
  1544  		expectedAnnotations := util.MergeMap(mdTopology.Metadata.Annotations, md1.Template.Metadata.Annotations)
  1545  		delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1546  		delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1547  		g.Expect(actualMd.Annotations).To(Equal(expectedAnnotations))
  1548  		g.Expect(actualMd.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations))
  1549  
  1550  		g.Expect(actualMd.Labels).To(BeComparableTo(util.MergeMap(mdTopology.Metadata.Labels, md1.Template.Metadata.Labels, map[string]string{
  1551  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1552  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1553  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1554  		})))
  1555  		g.Expect(actualMd.Spec.Selector.MatchLabels).To(BeComparableTo(map[string]string{
  1556  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1557  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1558  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1559  		}))
  1560  		g.Expect(actualMd.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mdTopology.Metadata.Labels, md1.Template.Metadata.Labels, map[string]string{
  1561  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1562  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1563  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1564  		})))
  1565  
  1566  		g.Expect(actualMd.Spec.Template.Spec.InfrastructureRef.Name).To(Equal("linux-worker-inframachinetemplate"))
  1567  		g.Expect(actualMd.Spec.Template.Spec.Bootstrap.ConfigRef.Name).To(Equal("linux-worker-bootstraptemplate"))
  1568  	})
  1569  
  1570  	t.Run("If a machine deployment references a topology class that does not exist, machine deployment generation fails", func(t *testing.T) {
  1571  		g := NewWithT(t)
  1572  		scope := scope.New(cluster)
  1573  		scope.Blueprint = blueprint
  1574  
  1575  		mdTopology = clusterv1.MachineDeploymentTopology{
  1576  			Metadata: clusterv1.ObjectMeta{
  1577  				Labels: map[string]string{"foo": "baz"},
  1578  			},
  1579  			Class: "windows-worker",
  1580  			Name:  "big-pool-of-machines",
  1581  		}
  1582  
  1583  		_, err := computeMachineDeployment(ctx, scope, mdTopology)
  1584  		g.Expect(err).To(HaveOccurred())
  1585  	})
  1586  
  1587  	t.Run("Should choose the correct version for machine deployment", func(t *testing.T) {
  1588  		controlPlaneStable123 := builder.ControlPlane("test1", "cp1").
  1589  			WithSpecFields(map[string]interface{}{
  1590  				"spec.version":  "v1.2.3",
  1591  				"spec.replicas": int64(2),
  1592  			}).
  1593  			WithStatusFields(map[string]interface{}{
  1594  				"status.version":         "v1.2.3",
  1595  				"status.replicas":        int64(2),
  1596  				"status.updatedReplicas": int64(2),
  1597  				"status.readyReplicas":   int64(2),
  1598  			}).
  1599  			Build()
  1600  
  1601  		// Note: in all the following tests we are setting it up so that the control plane is already
  1602  		// stable at the topology version.
  1603  		// A more extensive list of scenarios is tested in TestComputeMachineDeploymentVersion.
  1604  		tests := []struct {
  1605  			name                        string
  1606  			upgradingMachineDeployments []string
  1607  			currentMDVersion            *string
  1608  			upgradeConcurrency          string
  1609  			topologyVersion             string
  1610  			expectedVersion             string
  1611  		}{
  1612  			{
  1613  				name:                        "use cluster.spec.topology.version if creating a new machine deployment",
  1614  				upgradingMachineDeployments: []string{},
  1615  				upgradeConcurrency:          "1",
  1616  				currentMDVersion:            nil,
  1617  				topologyVersion:             "v1.2.3",
  1618  				expectedVersion:             "v1.2.3",
  1619  			},
  1620  			{
  1621  				name:                        "use cluster.spec.topology.version if creating a new machine deployment while another machine deployment is upgrading",
  1622  				upgradingMachineDeployments: []string{"upgrading-md1"},
  1623  				upgradeConcurrency:          "1",
  1624  				currentMDVersion:            nil,
  1625  				topologyVersion:             "v1.2.3",
  1626  				expectedVersion:             "v1.2.3",
  1627  			},
  1628  			{
  1629  				name:                        "use machine deployment's spec.template.spec.version if one of the machine deployments is upgrading, concurrency limit reached",
  1630  				upgradingMachineDeployments: []string{"upgrading-md1"},
  1631  				upgradeConcurrency:          "1",
  1632  				currentMDVersion:            pointer.String("v1.2.2"),
  1633  				topologyVersion:             "v1.2.3",
  1634  				expectedVersion:             "v1.2.2",
  1635  			},
  1636  			{
  1637  				name:                        "use cluster.spec.topology.version if one of the machine deployments is upgrading, concurrency limit not reached",
  1638  				upgradingMachineDeployments: []string{"upgrading-md1"},
  1639  				upgradeConcurrency:          "2",
  1640  				currentMDVersion:            pointer.String("v1.2.2"),
  1641  				topologyVersion:             "v1.2.3",
  1642  				expectedVersion:             "v1.2.3",
  1643  			},
  1644  		}
  1645  		for _, tt := range tests {
  1646  			t.Run(tt.name, func(t *testing.T) {
  1647  				g := NewWithT(t)
  1648  
  1649  				testCluster := cluster.DeepCopy()
  1650  				if testCluster.Annotations == nil {
  1651  					testCluster.Annotations = map[string]string{}
  1652  				}
  1653  				testCluster.Annotations[clusterv1.ClusterTopologyUpgradeConcurrencyAnnotation] = tt.upgradeConcurrency
  1654  
  1655  				s := scope.New(testCluster)
  1656  				s.Blueprint = blueprint
  1657  				s.Blueprint.Topology.Version = tt.topologyVersion
  1658  				s.Blueprint.Topology.ControlPlane = clusterv1.ControlPlaneTopology{
  1659  					Replicas: pointer.Int32(2),
  1660  				}
  1661  				s.Blueprint.Topology.Workers = &clusterv1.WorkersTopology{}
  1662  
  1663  				mdsState := scope.MachineDeploymentsStateMap{}
  1664  				if tt.currentMDVersion != nil {
  1665  					// testing a case with an existing machine deployment
  1666  					// add the stable machine deployment to the current machine deployments state
  1667  					md := builder.MachineDeployment("test-namespace", "big-pool-of-machines").
  1668  						WithGeneration(1).
  1669  						WithReplicas(2).
  1670  						WithVersion(*tt.currentMDVersion).
  1671  						WithStatus(clusterv1.MachineDeploymentStatus{
  1672  							ObservedGeneration: 2,
  1673  							Replicas:           2,
  1674  							ReadyReplicas:      2,
  1675  							UpdatedReplicas:    2,
  1676  							AvailableReplicas:  2,
  1677  						}).
  1678  						Build()
  1679  					mdsState = duplicateMachineDeploymentsState(mdsState)
  1680  					mdsState["big-pool-of-machines"] = &scope.MachineDeploymentState{
  1681  						Object: md,
  1682  					}
  1683  				}
  1684  				s.Current.MachineDeployments = mdsState
  1685  				s.Current.ControlPlane = &scope.ControlPlaneState{
  1686  					Object: controlPlaneStable123,
  1687  				}
  1688  
  1689  				mdTopology := clusterv1.MachineDeploymentTopology{
  1690  					Class:    "linux-worker",
  1691  					Name:     "big-pool-of-machines",
  1692  					Replicas: pointer.Int32(2),
  1693  				}
  1694  				s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...)
  1695  				obj, err := computeMachineDeployment(ctx, s, mdTopology)
  1696  				g.Expect(err).ToNot(HaveOccurred())
  1697  				g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion))
  1698  			})
  1699  		}
  1700  	})
  1701  
  1702  	t.Run("Should correctly generate a MachineHealthCheck for the MachineDeployment", func(t *testing.T) {
  1703  		g := NewWithT(t)
  1704  		scope := scope.New(cluster)
  1705  		scope.Blueprint = blueprint
  1706  		mdTopology := clusterv1.MachineDeploymentTopology{
  1707  			Class: "linux-worker",
  1708  			Name:  "big-pool-of-machines",
  1709  		}
  1710  
  1711  		actual, err := computeMachineDeployment(ctx, scope, mdTopology)
  1712  		g.Expect(err).ToNot(HaveOccurred())
  1713  		// Check that the ClusterName and selector are set properly for the MachineHealthCheck.
  1714  		g.Expect(actual.MachineHealthCheck.Spec.ClusterName).To(Equal(cluster.Name))
  1715  		g.Expect(actual.MachineHealthCheck.Spec.Selector).To(BeComparableTo(metav1.LabelSelector{MatchLabels: map[string]string{
  1716  			clusterv1.ClusterTopologyOwnedLabel:                 actual.Object.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyOwnedLabel],
  1717  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: actual.Object.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel],
  1718  		}}))
  1719  
  1720  		// Check that the NodeStartupTime is set as expected.
  1721  		g.Expect(actual.MachineHealthCheck.Spec.NodeStartupTimeout).To(Equal(nodeTimeoutDuration))
  1722  
  1723  		// Check that UnhealthyConditions are set as expected.
  1724  		g.Expect(actual.MachineHealthCheck.Spec.UnhealthyConditions).To(BeComparableTo(unhealthyConditions))
  1725  	})
  1726  }
  1727  
  1728  func TestComputeMachinePool(t *testing.T) {
  1729  	workerInfrastructureMachinePool := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "linux-worker-inframachinepool").
  1730  		Build()
  1731  	workerInfrastructureMachinePoolTemplate := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "linux-worker-inframachinepooltemplate").
  1732  		Build()
  1733  	workerBootstrapConfig := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstrap").
  1734  		Build()
  1735  	workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstraptemplate").
  1736  		Build()
  1737  	labels := map[string]string{"fizzLabel": "buzz", "fooLabel": "bar"}
  1738  	annotations := map[string]string{"fizzAnnotation": "buzz", "fooAnnotation": "bar"}
  1739  
  1740  	clusterClassDuration := metav1.Duration{Duration: 20 * time.Second}
  1741  	clusterClassFailureDomains := []string{"A", "B"}
  1742  	var clusterClassMinReadySeconds int32 = 20
  1743  	mp1 := builder.MachinePoolClass("linux-worker").
  1744  		WithLabels(labels).
  1745  		WithAnnotations(annotations).
  1746  		WithInfrastructureTemplate(workerInfrastructureMachinePoolTemplate).
  1747  		WithBootstrapTemplate(workerBootstrapTemplate).
  1748  		WithFailureDomains("A", "B").
  1749  		WithNodeDrainTimeout(&clusterClassDuration).
  1750  		WithNodeVolumeDetachTimeout(&clusterClassDuration).
  1751  		WithNodeDeletionTimeout(&clusterClassDuration).
  1752  		WithMinReadySeconds(&clusterClassMinReadySeconds).
  1753  		Build()
  1754  	mcps := []clusterv1.MachinePoolClass{*mp1}
  1755  	fakeClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
  1756  		WithWorkerMachinePoolClasses(mcps...).
  1757  		Build()
  1758  
  1759  	version := "v1.21.3"
  1760  	cluster := &clusterv1.Cluster{
  1761  		ObjectMeta: metav1.ObjectMeta{
  1762  			Name:      "cluster1",
  1763  			Namespace: metav1.NamespaceDefault,
  1764  		},
  1765  		Spec: clusterv1.ClusterSpec{
  1766  			Topology: &clusterv1.Topology{
  1767  				Version: version,
  1768  			},
  1769  		},
  1770  	}
  1771  
  1772  	blueprint := &scope.ClusterBlueprint{
  1773  		Topology:     cluster.Spec.Topology,
  1774  		ClusterClass: fakeClass,
  1775  		MachinePools: map[string]*scope.MachinePoolBlueprint{
  1776  			"linux-worker": {
  1777  				Metadata: clusterv1.ObjectMeta{
  1778  					Labels:      labels,
  1779  					Annotations: annotations,
  1780  				},
  1781  				BootstrapTemplate:                 workerBootstrapTemplate,
  1782  				InfrastructureMachinePoolTemplate: workerInfrastructureMachinePoolTemplate,
  1783  			},
  1784  		},
  1785  	}
  1786  
  1787  	replicas := int32(5)
  1788  	topologyFailureDomains := []string{"A", "B"}
  1789  	topologyDuration := metav1.Duration{Duration: 10 * time.Second}
  1790  	var topologyMinReadySeconds int32 = 10
  1791  	mpTopology := clusterv1.MachinePoolTopology{
  1792  		Metadata: clusterv1.ObjectMeta{
  1793  			Labels: map[string]string{
  1794  				// Should overwrite the label from the MachinePool class.
  1795  				"fooLabel": "baz",
  1796  			},
  1797  			Annotations: map[string]string{
  1798  				// Should overwrite the annotation from the MachinePool class.
  1799  				"fooAnnotation": "baz",
  1800  				// These annotations should not be propagated to the MachinePool.
  1801  				clusterv1.ClusterTopologyDeferUpgradeAnnotation:        "",
  1802  				clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  1803  			},
  1804  		},
  1805  		Class:                   "linux-worker",
  1806  		Name:                    "big-pool-of-machines",
  1807  		Replicas:                &replicas,
  1808  		FailureDomains:          topologyFailureDomains,
  1809  		NodeDrainTimeout:        &topologyDuration,
  1810  		NodeVolumeDetachTimeout: &topologyDuration,
  1811  		NodeDeletionTimeout:     &topologyDuration,
  1812  		MinReadySeconds:         &topologyMinReadySeconds,
  1813  	}
  1814  
  1815  	t.Run("Generates the machine pool and the referenced templates", func(t *testing.T) {
  1816  		g := NewWithT(t)
  1817  		scope := scope.New(cluster)
  1818  		scope.Blueprint = blueprint
  1819  
  1820  		actual, err := computeMachinePool(ctx, scope, mpTopology)
  1821  		g.Expect(err).ToNot(HaveOccurred())
  1822  
  1823  		g.Expect(actual.BootstrapObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines"))
  1824  
  1825  		// Ensure Cluster ownership is added to generated BootstrapObject.
  1826  		g.Expect(actual.BootstrapObject.GetOwnerReferences()).To(HaveLen(1))
  1827  		g.Expect(actual.BootstrapObject.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
  1828  		g.Expect(actual.BootstrapObject.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
  1829  
  1830  		g.Expect(actual.InfrastructureMachinePoolObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines"))
  1831  
  1832  		// Ensure Cluster ownership is added to generated InfrastructureMachinePool.
  1833  		g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()).To(HaveLen(1))
  1834  		g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
  1835  		g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
  1836  
  1837  		actualMp := actual.Object
  1838  		g.Expect(*actualMp.Spec.Replicas).To(Equal(replicas))
  1839  		g.Expect(*actualMp.Spec.MinReadySeconds).To(Equal(topologyMinReadySeconds))
  1840  		g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains))
  1841  		g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration))
  1842  		g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration))
  1843  		g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration))
  1844  		g.Expect(actualMp.Spec.ClusterName).To(Equal("cluster1"))
  1845  		g.Expect(actualMp.Name).To(ContainSubstring("cluster1"))
  1846  		g.Expect(actualMp.Name).To(ContainSubstring("big-pool-of-machines"))
  1847  
  1848  		expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations)
  1849  		delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1850  		delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1851  		g.Expect(actualMp.Annotations).To(Equal(expectedAnnotations))
  1852  		g.Expect(actualMp.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations))
  1853  
  1854  		g.Expect(actualMp.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{
  1855  			clusterv1.ClusterNameLabel:                    cluster.Name,
  1856  			clusterv1.ClusterTopologyOwnedLabel:           "",
  1857  			clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines",
  1858  		})))
  1859  		g.Expect(actualMp.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{
  1860  			clusterv1.ClusterNameLabel:                    cluster.Name,
  1861  			clusterv1.ClusterTopologyOwnedLabel:           "",
  1862  			clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines",
  1863  		})))
  1864  
  1865  		g.Expect(actualMp.Spec.Template.Spec.InfrastructureRef.Name).ToNot(Equal("linux-worker-inframachinetemplate"))
  1866  		g.Expect(actualMp.Spec.Template.Spec.Bootstrap.ConfigRef.Name).ToNot(Equal("linux-worker-bootstraptemplate"))
  1867  	})
  1868  	t.Run("Generates the machine pool and the referenced templates using ClusterClass defaults", func(t *testing.T) {
  1869  		g := NewWithT(t)
  1870  		scope := scope.New(cluster)
  1871  		scope.Blueprint = blueprint
  1872  
  1873  		mpTopology := clusterv1.MachinePoolTopology{
  1874  			Metadata: clusterv1.ObjectMeta{
  1875  				Labels: map[string]string{"foo": "baz"},
  1876  			},
  1877  			Class:    "linux-worker",
  1878  			Name:     "big-pool-of-machines",
  1879  			Replicas: &replicas,
  1880  			// missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy
  1881  		}
  1882  
  1883  		actual, err := computeMachinePool(ctx, scope, mpTopology)
  1884  		g.Expect(err).ToNot(HaveOccurred())
  1885  
  1886  		// checking only values from CC defaults
  1887  		actualMp := actual.Object
  1888  		g.Expect(*actualMp.Spec.MinReadySeconds).To(Equal(clusterClassMinReadySeconds))
  1889  		g.Expect(actualMp.Spec.FailureDomains).To(Equal(clusterClassFailureDomains))
  1890  		g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(clusterClassDuration))
  1891  		g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(clusterClassDuration))
  1892  		g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(clusterClassDuration))
  1893  	})
  1894  
  1895  	t.Run("If there is already a machine pool, it preserves the object name and the reference names", func(t *testing.T) {
  1896  		g := NewWithT(t)
  1897  		s := scope.New(cluster)
  1898  		s.Blueprint = blueprint
  1899  
  1900  		currentReplicas := int32(3)
  1901  		currentMp := &expv1.MachinePool{
  1902  			ObjectMeta: metav1.ObjectMeta{
  1903  				Name: "existing-pool-1",
  1904  			},
  1905  			Spec: expv1.MachinePoolSpec{
  1906  				Replicas: &currentReplicas,
  1907  				Template: clusterv1.MachineTemplateSpec{
  1908  					Spec: clusterv1.MachineSpec{
  1909  						Version: pointer.String(version),
  1910  						Bootstrap: clusterv1.Bootstrap{
  1911  							ConfigRef: contract.ObjToRef(workerBootstrapConfig),
  1912  						},
  1913  						InfrastructureRef: *contract.ObjToRef(workerInfrastructureMachinePool),
  1914  					},
  1915  				},
  1916  			},
  1917  		}
  1918  		s.Current.MachinePools = map[string]*scope.MachinePoolState{
  1919  			"big-pool-of-machines": {
  1920  				Object:                          currentMp,
  1921  				BootstrapObject:                 workerBootstrapConfig,
  1922  				InfrastructureMachinePoolObject: workerInfrastructureMachinePool,
  1923  			},
  1924  		}
  1925  
  1926  		actual, err := computeMachinePool(ctx, s, mpTopology)
  1927  		g.Expect(err).ToNot(HaveOccurred())
  1928  
  1929  		actualMp := actual.Object
  1930  
  1931  		g.Expect(*actualMp.Spec.Replicas).NotTo(Equal(currentReplicas))
  1932  		g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains))
  1933  		g.Expect(actualMp.Name).To(Equal("existing-pool-1"))
  1934  
  1935  		expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations)
  1936  		delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1937  		delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1938  		g.Expect(actualMp.Annotations).To(Equal(expectedAnnotations))
  1939  		g.Expect(actualMp.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations))
  1940  
  1941  		g.Expect(actualMp.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{
  1942  			clusterv1.ClusterNameLabel:                    cluster.Name,
  1943  			clusterv1.ClusterTopologyOwnedLabel:           "",
  1944  			clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines",
  1945  		})))
  1946  		g.Expect(actualMp.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{
  1947  			clusterv1.ClusterNameLabel:                    cluster.Name,
  1948  			clusterv1.ClusterTopologyOwnedLabel:           "",
  1949  			clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines",
  1950  		})))
  1951  
  1952  		g.Expect(actualMp.Spec.Template.Spec.InfrastructureRef.Name).To(Equal("linux-worker-inframachinepool"))
  1953  		g.Expect(actualMp.Spec.Template.Spec.Bootstrap.ConfigRef.Name).To(Equal("linux-worker-bootstrap"))
  1954  	})
  1955  
  1956  	t.Run("If a machine pool references a topology class that does not exist, machine pool generation fails", func(t *testing.T) {
  1957  		g := NewWithT(t)
  1958  		scope := scope.New(cluster)
  1959  		scope.Blueprint = blueprint
  1960  
  1961  		mpTopology = clusterv1.MachinePoolTopology{
  1962  			Metadata: clusterv1.ObjectMeta{
  1963  				Labels: map[string]string{"foo": "baz"},
  1964  			},
  1965  			Class: "windows-worker",
  1966  			Name:  "big-pool-of-machines",
  1967  		}
  1968  
  1969  		_, err := computeMachinePool(ctx, scope, mpTopology)
  1970  		g.Expect(err).To(HaveOccurred())
  1971  	})
  1972  
  1973  	t.Run("Should choose the correct version for machine pool", func(t *testing.T) {
  1974  		controlPlaneStable123 := builder.ControlPlane("test1", "cp1").
  1975  			WithSpecFields(map[string]interface{}{
  1976  				"spec.version":  "v1.2.3",
  1977  				"spec.replicas": int64(2),
  1978  			}).
  1979  			WithStatusFields(map[string]interface{}{
  1980  				"status.version":         "v1.2.3",
  1981  				"status.replicas":        int64(2),
  1982  				"status.updatedReplicas": int64(2),
  1983  				"status.readyReplicas":   int64(2),
  1984  			}).
  1985  			Build()
  1986  
  1987  		// Note: in all the following tests we are setting it up so that the control plane is already
  1988  		// stable at the topology version.
  1989  		// A more extensive list of scenarios is tested in TestComputeMachinePoolVersion.
  1990  		tests := []struct {
  1991  			name                  string
  1992  			upgradingMachinePools []string
  1993  			currentMPVersion      *string
  1994  			upgradeConcurrency    string
  1995  			topologyVersion       string
  1996  			expectedVersion       string
  1997  		}{
  1998  			{
  1999  				name:                  "use cluster.spec.topology.version if creating a new machine pool",
  2000  				upgradingMachinePools: []string{},
  2001  				upgradeConcurrency:    "1",
  2002  				currentMPVersion:      nil,
  2003  				topologyVersion:       "v1.2.3",
  2004  				expectedVersion:       "v1.2.3",
  2005  			},
  2006  			{
  2007  				name:                  "use cluster.spec.topology.version if creating a new machine pool while another machine pool is upgrading",
  2008  				upgradingMachinePools: []string{"upgrading-mp1"},
  2009  				upgradeConcurrency:    "1",
  2010  				currentMPVersion:      nil,
  2011  				topologyVersion:       "v1.2.3",
  2012  				expectedVersion:       "v1.2.3",
  2013  			},
  2014  			{
  2015  				name:                  "use machine pool's spec.template.spec.version if one of the machine pools is upgrading, concurrency limit reached",
  2016  				upgradingMachinePools: []string{"upgrading-mp1"},
  2017  				upgradeConcurrency:    "1",
  2018  				currentMPVersion:      pointer.String("v1.2.2"),
  2019  				topologyVersion:       "v1.2.3",
  2020  				expectedVersion:       "v1.2.2",
  2021  			},
  2022  			{
  2023  				name:                  "use cluster.spec.topology.version if one of the machine pools is upgrading, concurrency limit not reached",
  2024  				upgradingMachinePools: []string{"upgrading-mp1"},
  2025  				upgradeConcurrency:    "2",
  2026  				currentMPVersion:      pointer.String("v1.2.2"),
  2027  				topologyVersion:       "v1.2.3",
  2028  				expectedVersion:       "v1.2.3",
  2029  			},
  2030  		}
  2031  		for _, tt := range tests {
  2032  			t.Run(tt.name, func(t *testing.T) {
  2033  				g := NewWithT(t)
  2034  
  2035  				testCluster := cluster.DeepCopy()
  2036  				if testCluster.Annotations == nil {
  2037  					testCluster.Annotations = map[string]string{}
  2038  				}
  2039  				testCluster.Annotations[clusterv1.ClusterTopologyUpgradeConcurrencyAnnotation] = tt.upgradeConcurrency
  2040  
  2041  				s := scope.New(testCluster)
  2042  				s.Blueprint = blueprint
  2043  				s.Blueprint.Topology.Version = tt.topologyVersion
  2044  				s.Blueprint.Topology.ControlPlane = clusterv1.ControlPlaneTopology{
  2045  					Replicas: pointer.Int32(2),
  2046  				}
  2047  				s.Blueprint.Topology.Workers = &clusterv1.WorkersTopology{}
  2048  
  2049  				mpsState := scope.MachinePoolsStateMap{}
  2050  				if tt.currentMPVersion != nil {
  2051  					// testing a case with an existing machine pool
  2052  					// add the stable machine pool to the current machine pools state
  2053  					mp := builder.MachinePool("test-namespace", "big-pool-of-machines").
  2054  						WithReplicas(2).
  2055  						WithVersion(*tt.currentMPVersion).
  2056  						WithStatus(expv1.MachinePoolStatus{
  2057  							ObservedGeneration: 2,
  2058  							Replicas:           2,
  2059  							ReadyReplicas:      2,
  2060  							AvailableReplicas:  2,
  2061  						}).
  2062  						Build()
  2063  					mpsState = duplicateMachinePoolsState(mpsState)
  2064  					mpsState["big-pool-of-machines"] = &scope.MachinePoolState{
  2065  						Object: mp,
  2066  					}
  2067  				}
  2068  				s.Current.MachinePools = mpsState
  2069  				s.Current.ControlPlane = &scope.ControlPlaneState{
  2070  					Object: controlPlaneStable123,
  2071  				}
  2072  
  2073  				mpTopology := clusterv1.MachinePoolTopology{
  2074  					Class:    "linux-worker",
  2075  					Name:     "big-pool-of-machines",
  2076  					Replicas: pointer.Int32(2),
  2077  				}
  2078  				s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...)
  2079  				obj, err := computeMachinePool(ctx, s, mpTopology)
  2080  				g.Expect(err).ToNot(HaveOccurred())
  2081  				g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion))
  2082  			})
  2083  		}
  2084  	})
  2085  }
  2086  
  2087  func TestComputeMachineDeploymentVersion(t *testing.T) {
  2088  	controlPlaneObj := builder.ControlPlane("test1", "cp1").
  2089  		Build()
  2090  
  2091  	mdName := "md-1"
  2092  	currentMachineDeploymentState := &scope.MachineDeploymentState{Object: builder.MachineDeployment("test1", mdName).WithVersion("v1.2.2").Build()}
  2093  
  2094  	tests := []struct {
  2095  		name                                 string
  2096  		machineDeploymentTopology            clusterv1.MachineDeploymentTopology
  2097  		currentMachineDeploymentState        *scope.MachineDeploymentState
  2098  		upgradingMachineDeployments          []string
  2099  		upgradeConcurrency                   int
  2100  		controlPlaneStartingUpgrade          bool
  2101  		controlPlaneUpgrading                bool
  2102  		controlPlaneScaling                  bool
  2103  		controlPlaneProvisioning             bool
  2104  		afterControlPlaneUpgradeHookBlocking bool
  2105  		topologyVersion                      string
  2106  		expectedVersion                      string
  2107  		expectPendingCreate                  bool
  2108  		expectPendingUpgrade                 bool
  2109  	}{
  2110  		{
  2111  			name:                          "should return cluster.spec.topology.version if creating a new machine deployment and if control plane is stable - not marked as pending create",
  2112  			currentMachineDeploymentState: nil,
  2113  			machineDeploymentTopology: clusterv1.MachineDeploymentTopology{
  2114  				Name: "md-topology-1",
  2115  			},
  2116  			topologyVersion:     "v1.2.3",
  2117  			expectedVersion:     "v1.2.3",
  2118  			expectPendingCreate: false,
  2119  		},
  2120  		{
  2121  			name:                "should return cluster.spec.topology.version if creating a new machine deployment and if control plane is not stable - marked as pending create",
  2122  			controlPlaneScaling: true,
  2123  			machineDeploymentTopology: clusterv1.MachineDeploymentTopology{
  2124  				Name: "md-topology-1",
  2125  			},
  2126  			topologyVersion:     "v1.2.3",
  2127  			expectedVersion:     "v1.2.3",
  2128  			expectPendingCreate: true,
  2129  		},
  2130  		{
  2131  			name: "should return machine deployment's spec.template.spec.version if upgrade is deferred",
  2132  			machineDeploymentTopology: clusterv1.MachineDeploymentTopology{
  2133  				Metadata: clusterv1.ObjectMeta{
  2134  					Annotations: map[string]string{
  2135  						clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2136  					},
  2137  				},
  2138  			},
  2139  			currentMachineDeploymentState: currentMachineDeploymentState,
  2140  			upgradingMachineDeployments:   []string{},
  2141  			topologyVersion:               "v1.2.3",
  2142  			expectedVersion:               "v1.2.2",
  2143  			expectPendingUpgrade:          true,
  2144  		},
  2145  		{
  2146  			// Control plane is considered upgrading if the control plane's spec.version and status.version is not equal.
  2147  			name:                          "should return machine deployment's spec.template.spec.version if control plane is upgrading",
  2148  			currentMachineDeploymentState: currentMachineDeploymentState,
  2149  			upgradingMachineDeployments:   []string{},
  2150  			controlPlaneUpgrading:         true,
  2151  			topologyVersion:               "v1.2.3",
  2152  			expectedVersion:               "v1.2.2",
  2153  			expectPendingUpgrade:          true,
  2154  		},
  2155  		{
  2156  			// Control plane is considered ready to upgrade if spec.version of current and desired control planes are not equal.
  2157  			name:                          "should return machine deployment's spec.template.spec.version if control plane is starting upgrade",
  2158  			currentMachineDeploymentState: currentMachineDeploymentState,
  2159  			upgradingMachineDeployments:   []string{},
  2160  			controlPlaneStartingUpgrade:   true,
  2161  			topologyVersion:               "v1.2.3",
  2162  			expectedVersion:               "v1.2.2",
  2163  			expectPendingUpgrade:          true,
  2164  		},
  2165  		{
  2166  			// Control plane is considered scaling if its spec.replicas is not equal to any of status.replicas, status.readyReplicas or status.updatedReplicas.
  2167  			name:                          "should return machine deployment's spec.template.spec.version if control plane is scaling",
  2168  			currentMachineDeploymentState: currentMachineDeploymentState,
  2169  			upgradingMachineDeployments:   []string{},
  2170  			controlPlaneScaling:           true,
  2171  			topologyVersion:               "v1.2.3",
  2172  			expectedVersion:               "v1.2.2",
  2173  			expectPendingUpgrade:          true,
  2174  		},
  2175  		{
  2176  			name:                          "should return cluster.spec.topology.version if the control plane is not upgrading, not scaling, not ready to upgrade and none of the machine deployments are upgrading",
  2177  			currentMachineDeploymentState: currentMachineDeploymentState,
  2178  			upgradingMachineDeployments:   []string{},
  2179  			topologyVersion:               "v1.2.3",
  2180  			expectedVersion:               "v1.2.3",
  2181  			expectPendingUpgrade:          false,
  2182  		},
  2183  		{
  2184  			name:                                 "should return machine deployment's spec.template.spec.version if control plane is stable, other machine deployments are upgrading, concurrency limit not reached but AfterControlPlaneUpgrade hook is blocking",
  2185  			currentMachineDeploymentState:        currentMachineDeploymentState,
  2186  			upgradingMachineDeployments:          []string{"upgrading-md1"},
  2187  			upgradeConcurrency:                   2,
  2188  			afterControlPlaneUpgradeHookBlocking: true,
  2189  			topologyVersion:                      "v1.2.3",
  2190  			expectedVersion:                      "v1.2.2",
  2191  			expectPendingUpgrade:                 true,
  2192  		},
  2193  		{
  2194  			name:                          "should return cluster.spec.topology.version if control plane is stable, other machine deployments are upgrading, concurrency limit not reached",
  2195  			currentMachineDeploymentState: currentMachineDeploymentState,
  2196  			upgradingMachineDeployments:   []string{"upgrading-md1"},
  2197  			upgradeConcurrency:            2,
  2198  			topologyVersion:               "v1.2.3",
  2199  			expectedVersion:               "v1.2.3",
  2200  			expectPendingUpgrade:          false,
  2201  		},
  2202  		{
  2203  			name:                          "should return machine deployment's spec.template.spec.version if control plane is stable, other machine deployments are upgrading, concurrency limit reached",
  2204  			currentMachineDeploymentState: currentMachineDeploymentState,
  2205  			upgradingMachineDeployments:   []string{"upgrading-md1", "upgrading-md2"},
  2206  			upgradeConcurrency:            2,
  2207  			topologyVersion:               "v1.2.3",
  2208  			expectedVersion:               "v1.2.2",
  2209  			expectPendingUpgrade:          true,
  2210  		},
  2211  	}
  2212  
  2213  	for _, tt := range tests {
  2214  		t.Run(tt.name, func(t *testing.T) {
  2215  			g := NewWithT(t)
  2216  
  2217  			s := &scope.Scope{
  2218  				Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{
  2219  					Version: tt.topologyVersion,
  2220  					ControlPlane: clusterv1.ControlPlaneTopology{
  2221  						Replicas: pointer.Int32(2),
  2222  					},
  2223  					Workers: &clusterv1.WorkersTopology{},
  2224  				}},
  2225  				Current: &scope.ClusterState{
  2226  					ControlPlane: &scope.ControlPlaneState{Object: controlPlaneObj},
  2227  				},
  2228  				UpgradeTracker:      scope.NewUpgradeTracker(scope.MaxMDUpgradeConcurrency(tt.upgradeConcurrency)),
  2229  				HookResponseTracker: scope.NewHookResponseTracker(),
  2230  			}
  2231  			if tt.afterControlPlaneUpgradeHookBlocking {
  2232  				s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneUpgrade, &runtimehooksv1.AfterControlPlaneUpgradeResponse{
  2233  					CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
  2234  						RetryAfterSeconds: 10,
  2235  					},
  2236  				})
  2237  			}
  2238  			s.UpgradeTracker.ControlPlane.IsStartingUpgrade = tt.controlPlaneStartingUpgrade
  2239  			s.UpgradeTracker.ControlPlane.IsUpgrading = tt.controlPlaneUpgrading
  2240  			s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling
  2241  			s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning
  2242  			s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...)
  2243  			version := computeMachineDeploymentVersion(s, tt.machineDeploymentTopology, tt.currentMachineDeploymentState)
  2244  			g.Expect(version).To(Equal(tt.expectedVersion))
  2245  
  2246  			if tt.currentMachineDeploymentState != nil {
  2247  				// Verify that if the upgrade is pending it is captured in the upgrade tracker.
  2248  				if tt.expectPendingUpgrade {
  2249  					g.Expect(s.UpgradeTracker.MachineDeployments.IsPendingUpgrade(mdName)).To(BeTrue(), "MachineDeployment should be marked as pending upgrade")
  2250  				} else {
  2251  					g.Expect(s.UpgradeTracker.MachineDeployments.IsPendingUpgrade(mdName)).To(BeFalse(), "MachineDeployment should not be marked as pending upgrade")
  2252  				}
  2253  			} else {
  2254  				// Verify that if create the pending it is capture in the tracker.
  2255  				if tt.expectPendingCreate {
  2256  					g.Expect(s.UpgradeTracker.MachineDeployments.IsPendingCreate(tt.machineDeploymentTopology.Name)).To(BeTrue(), "MachineDeployment topology should be marked as pending create")
  2257  				} else {
  2258  					g.Expect(s.UpgradeTracker.MachineDeployments.IsPendingCreate(tt.machineDeploymentTopology.Name)).To(BeFalse(), "MachineDeployment topology should not be marked as pending create")
  2259  				}
  2260  			}
  2261  		})
  2262  	}
  2263  }
  2264  
  2265  func TestComputeMachinePoolVersion(t *testing.T) {
  2266  	controlPlaneObj := builder.ControlPlane("test1", "cp1").
  2267  		Build()
  2268  
  2269  	mpName := "mp-1"
  2270  	currentMachinePoolState := &scope.MachinePoolState{Object: builder.MachinePool("test1", mpName).WithVersion("v1.2.2").Build()}
  2271  
  2272  	tests := []struct {
  2273  		name                                 string
  2274  		machinePoolTopology                  clusterv1.MachinePoolTopology
  2275  		currentMachinePoolState              *scope.MachinePoolState
  2276  		upgradingMachinePools                []string
  2277  		upgradeConcurrency                   int
  2278  		controlPlaneStartingUpgrade          bool
  2279  		controlPlaneUpgrading                bool
  2280  		controlPlaneScaling                  bool
  2281  		controlPlaneProvisioning             bool
  2282  		afterControlPlaneUpgradeHookBlocking bool
  2283  		topologyVersion                      string
  2284  		expectedVersion                      string
  2285  		expectPendingCreate                  bool
  2286  		expectPendingUpgrade                 bool
  2287  	}{
  2288  		{
  2289  			name:                    "should return cluster.spec.topology.version if creating a new MachinePool and if control plane is stable - not marked as pending create",
  2290  			currentMachinePoolState: nil,
  2291  			machinePoolTopology: clusterv1.MachinePoolTopology{
  2292  				Name: "mp-topology-1",
  2293  			},
  2294  			topologyVersion:     "v1.2.3",
  2295  			expectedVersion:     "v1.2.3",
  2296  			expectPendingCreate: false,
  2297  		},
  2298  		{
  2299  			name:                "should return cluster.spec.topology.version if creating a new MachinePool and if control plane is not stable - marked as pending create",
  2300  			controlPlaneScaling: true,
  2301  			machinePoolTopology: clusterv1.MachinePoolTopology{
  2302  				Name: "mp-topology-1",
  2303  			},
  2304  			topologyVersion:     "v1.2.3",
  2305  			expectedVersion:     "v1.2.3",
  2306  			expectPendingCreate: true,
  2307  		},
  2308  		{
  2309  			name: "should return MachinePool's spec.template.spec.version if upgrade is deferred",
  2310  			machinePoolTopology: clusterv1.MachinePoolTopology{
  2311  				Metadata: clusterv1.ObjectMeta{
  2312  					Annotations: map[string]string{
  2313  						clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2314  					},
  2315  				},
  2316  			},
  2317  			currentMachinePoolState: currentMachinePoolState,
  2318  			upgradingMachinePools:   []string{},
  2319  			topologyVersion:         "v1.2.3",
  2320  			expectedVersion:         "v1.2.2",
  2321  			expectPendingUpgrade:    true,
  2322  		},
  2323  		{
  2324  			// Control plane is considered upgrading if the control plane's spec.version and status.version is not equal.
  2325  			name:                    "should return MachinePool's spec.template.spec.version if control plane is upgrading",
  2326  			currentMachinePoolState: currentMachinePoolState,
  2327  			upgradingMachinePools:   []string{},
  2328  			controlPlaneUpgrading:   true,
  2329  			topologyVersion:         "v1.2.3",
  2330  			expectedVersion:         "v1.2.2",
  2331  			expectPendingUpgrade:    true,
  2332  		},
  2333  		{
  2334  			// Control plane is considered ready to upgrade if spec.version of current and desired control planes are not equal.
  2335  			name:                        "should return MachinePool's spec.template.spec.version if control plane is starting upgrade",
  2336  			currentMachinePoolState:     currentMachinePoolState,
  2337  			upgradingMachinePools:       []string{},
  2338  			controlPlaneStartingUpgrade: true,
  2339  			topologyVersion:             "v1.2.3",
  2340  			expectedVersion:             "v1.2.2",
  2341  			expectPendingUpgrade:        true,
  2342  		},
  2343  		{
  2344  			// Control plane is considered scaling if its spec.replicas is not equal to any of status.replicas, status.readyReplicas or status.updatedReplicas.
  2345  			name:                    "should return MachinePool's spec.template.spec.version if control plane is scaling",
  2346  			currentMachinePoolState: currentMachinePoolState,
  2347  			upgradingMachinePools:   []string{},
  2348  			controlPlaneScaling:     true,
  2349  			topologyVersion:         "v1.2.3",
  2350  			expectedVersion:         "v1.2.2",
  2351  			expectPendingUpgrade:    true,
  2352  		},
  2353  		{
  2354  			name:                    "should return cluster.spec.topology.version if the control plane is not upgrading, not scaling, not ready to upgrade and none of the MachinePools are upgrading",
  2355  			currentMachinePoolState: currentMachinePoolState,
  2356  			upgradingMachinePools:   []string{},
  2357  			topologyVersion:         "v1.2.3",
  2358  			expectedVersion:         "v1.2.3",
  2359  			expectPendingUpgrade:    false,
  2360  		},
  2361  		{
  2362  			name:                                 "should return MachinePool's spec.template.spec.version if control plane is stable, other MachinePools are upgrading, concurrency limit not reached but AfterControlPlaneUpgrade hook is blocking",
  2363  			currentMachinePoolState:              currentMachinePoolState,
  2364  			upgradingMachinePools:                []string{"upgrading-mp1"},
  2365  			upgradeConcurrency:                   2,
  2366  			afterControlPlaneUpgradeHookBlocking: true,
  2367  			topologyVersion:                      "v1.2.3",
  2368  			expectedVersion:                      "v1.2.2",
  2369  			expectPendingUpgrade:                 true,
  2370  		},
  2371  		{
  2372  			name:                    "should return cluster.spec.topology.version if control plane is stable, other MachinePools are upgrading, concurrency limit not reached",
  2373  			currentMachinePoolState: currentMachinePoolState,
  2374  			upgradingMachinePools:   []string{"upgrading-mp1"},
  2375  			upgradeConcurrency:      2,
  2376  			topologyVersion:         "v1.2.3",
  2377  			expectedVersion:         "v1.2.3",
  2378  			expectPendingUpgrade:    false,
  2379  		},
  2380  		{
  2381  			name:                    "should return MachinePool's spec.template.spec.version if control plane is stable, other MachinePools are upgrading, concurrency limit reached",
  2382  			currentMachinePoolState: currentMachinePoolState,
  2383  			upgradingMachinePools:   []string{"upgrading-mp1", "upgrading-mp2"},
  2384  			upgradeConcurrency:      2,
  2385  			topologyVersion:         "v1.2.3",
  2386  			expectedVersion:         "v1.2.2",
  2387  			expectPendingUpgrade:    true,
  2388  		},
  2389  	}
  2390  
  2391  	for _, tt := range tests {
  2392  		t.Run(tt.name, func(t *testing.T) {
  2393  			g := NewWithT(t)
  2394  
  2395  			s := &scope.Scope{
  2396  				Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{
  2397  					Version: tt.topologyVersion,
  2398  					ControlPlane: clusterv1.ControlPlaneTopology{
  2399  						Replicas: pointer.Int32(2),
  2400  					},
  2401  					Workers: &clusterv1.WorkersTopology{},
  2402  				}},
  2403  				Current: &scope.ClusterState{
  2404  					ControlPlane: &scope.ControlPlaneState{Object: controlPlaneObj},
  2405  				},
  2406  				UpgradeTracker:      scope.NewUpgradeTracker(scope.MaxMPUpgradeConcurrency(tt.upgradeConcurrency)),
  2407  				HookResponseTracker: scope.NewHookResponseTracker(),
  2408  			}
  2409  			if tt.afterControlPlaneUpgradeHookBlocking {
  2410  				s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneUpgrade, &runtimehooksv1.AfterControlPlaneUpgradeResponse{
  2411  					CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
  2412  						RetryAfterSeconds: 10,
  2413  					},
  2414  				})
  2415  			}
  2416  			s.UpgradeTracker.ControlPlane.IsStartingUpgrade = tt.controlPlaneStartingUpgrade
  2417  			s.UpgradeTracker.ControlPlane.IsUpgrading = tt.controlPlaneUpgrading
  2418  			s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling
  2419  			s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning
  2420  			s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...)
  2421  			version := computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState)
  2422  			g.Expect(version).To(Equal(tt.expectedVersion))
  2423  
  2424  			if tt.currentMachinePoolState != nil {
  2425  				// Verify that if the upgrade is pending it is captured in the upgrade tracker.
  2426  				if tt.expectPendingUpgrade {
  2427  					g.Expect(s.UpgradeTracker.MachinePools.IsPendingUpgrade(mpName)).To(BeTrue(), "MachinePool should be marked as pending upgrade")
  2428  				} else {
  2429  					g.Expect(s.UpgradeTracker.MachinePools.IsPendingUpgrade(mpName)).To(BeFalse(), "MachinePool should not be marked as pending upgrade")
  2430  				}
  2431  			} else {
  2432  				// Verify that if create the pending it is capture in the tracker.
  2433  				if tt.expectPendingCreate {
  2434  					g.Expect(s.UpgradeTracker.MachinePools.IsPendingCreate(tt.machinePoolTopology.Name)).To(BeTrue(), "MachinePool topology should be marked as pending create")
  2435  				} else {
  2436  					g.Expect(s.UpgradeTracker.MachinePools.IsPendingCreate(tt.machinePoolTopology.Name)).To(BeFalse(), "MachinePool topology should not be marked as pending create")
  2437  				}
  2438  			}
  2439  		})
  2440  	}
  2441  }
  2442  
  2443  func TestIsMachineDeploymentDeferred(t *testing.T) {
  2444  	clusterTopology := &clusterv1.Topology{
  2445  		Workers: &clusterv1.WorkersTopology{
  2446  			MachineDeployments: []clusterv1.MachineDeploymentTopology{
  2447  				{
  2448  					Name: "md-with-defer-upgrade",
  2449  					Metadata: clusterv1.ObjectMeta{
  2450  						Annotations: map[string]string{
  2451  							clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2452  						},
  2453  					},
  2454  				},
  2455  				{
  2456  					Name: "md-without-annotations",
  2457  				},
  2458  				{
  2459  					Name: "md-with-hold-upgrade-sequence",
  2460  					Metadata: clusterv1.ObjectMeta{
  2461  						Annotations: map[string]string{
  2462  							clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  2463  						},
  2464  					},
  2465  				},
  2466  				{
  2467  					Name: "md-after-md-with-hold-upgrade-sequence",
  2468  				},
  2469  			},
  2470  		},
  2471  	}
  2472  
  2473  	tests := []struct {
  2474  		name       string
  2475  		mdTopology clusterv1.MachineDeploymentTopology
  2476  		deferred   bool
  2477  	}{
  2478  		{
  2479  			name: "MD with defer-upgrade annotation is deferred",
  2480  			mdTopology: clusterv1.MachineDeploymentTopology{
  2481  				Name: "md-with-defer-upgrade",
  2482  				Metadata: clusterv1.ObjectMeta{
  2483  					Annotations: map[string]string{
  2484  						clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2485  					},
  2486  				},
  2487  			},
  2488  			deferred: true,
  2489  		},
  2490  		{
  2491  			name: "MD without annotations is not deferred",
  2492  			mdTopology: clusterv1.MachineDeploymentTopology{
  2493  				Name: "md-without-annotations",
  2494  			},
  2495  			deferred: false,
  2496  		},
  2497  		{
  2498  			name: "MD with hold-upgrade-sequence annotation is deferred",
  2499  			mdTopology: clusterv1.MachineDeploymentTopology{
  2500  				Name: "md-with-hold-upgrade-sequence",
  2501  				Metadata: clusterv1.ObjectMeta{
  2502  					Annotations: map[string]string{
  2503  						clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  2504  					},
  2505  				},
  2506  			},
  2507  			deferred: true,
  2508  		},
  2509  		{
  2510  			name: "MD after MD with hold-upgrade-sequence is deferred",
  2511  			mdTopology: clusterv1.MachineDeploymentTopology{
  2512  				Name: "md-after-md-with-hold-upgrade-sequence",
  2513  			},
  2514  			deferred: true,
  2515  		},
  2516  	}
  2517  
  2518  	for _, tt := range tests {
  2519  		t.Run(tt.name, func(t *testing.T) {
  2520  			g := NewWithT(t)
  2521  			g.Expect(isMachineDeploymentDeferred(clusterTopology, tt.mdTopology)).To(Equal(tt.deferred))
  2522  		})
  2523  	}
  2524  }
  2525  
  2526  func TestIsMachinePoolDeferred(t *testing.T) {
  2527  	clusterTopology := &clusterv1.Topology{
  2528  		Workers: &clusterv1.WorkersTopology{
  2529  			MachinePools: []clusterv1.MachinePoolTopology{
  2530  				{
  2531  					Name: "mp-with-defer-upgrade",
  2532  					Metadata: clusterv1.ObjectMeta{
  2533  						Annotations: map[string]string{
  2534  							clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2535  						},
  2536  					},
  2537  				},
  2538  				{
  2539  					Name: "mp-without-annotations",
  2540  				},
  2541  				{
  2542  					Name: "mp-with-hold-upgrade-sequence",
  2543  					Metadata: clusterv1.ObjectMeta{
  2544  						Annotations: map[string]string{
  2545  							clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  2546  						},
  2547  					},
  2548  				},
  2549  				{
  2550  					Name: "mp-after-mp-with-hold-upgrade-sequence",
  2551  				},
  2552  			},
  2553  		},
  2554  	}
  2555  
  2556  	tests := []struct {
  2557  		name       string
  2558  		mpTopology clusterv1.MachinePoolTopology
  2559  		deferred   bool
  2560  	}{
  2561  		{
  2562  			name: "MP with defer-upgrade annotation is deferred",
  2563  			mpTopology: clusterv1.MachinePoolTopology{
  2564  				Name: "mp-with-defer-upgrade",
  2565  				Metadata: clusterv1.ObjectMeta{
  2566  					Annotations: map[string]string{
  2567  						clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2568  					},
  2569  				},
  2570  			},
  2571  			deferred: true,
  2572  		},
  2573  		{
  2574  			name: "MP without annotations is not deferred",
  2575  			mpTopology: clusterv1.MachinePoolTopology{
  2576  				Name: "mp-without-annotations",
  2577  			},
  2578  			deferred: false,
  2579  		},
  2580  		{
  2581  			name: "MP with hold-upgrade-sequence annotation is deferred",
  2582  			mpTopology: clusterv1.MachinePoolTopology{
  2583  				Name: "mp-with-hold-upgrade-sequence",
  2584  				Metadata: clusterv1.ObjectMeta{
  2585  					Annotations: map[string]string{
  2586  						clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  2587  					},
  2588  				},
  2589  			},
  2590  			deferred: true,
  2591  		},
  2592  		{
  2593  			name: "MP after mp with hold-upgrade-sequence is deferred",
  2594  			mpTopology: clusterv1.MachinePoolTopology{
  2595  				Name: "mp-after-mp-with-hold-upgrade-sequence",
  2596  			},
  2597  			deferred: true,
  2598  		},
  2599  	}
  2600  
  2601  	for _, tt := range tests {
  2602  		t.Run(tt.name, func(t *testing.T) {
  2603  			g := NewWithT(t)
  2604  			g.Expect(isMachinePoolDeferred(clusterTopology, tt.mpTopology)).To(Equal(tt.deferred))
  2605  		})
  2606  	}
  2607  }
  2608  
  2609  func TestTemplateToObject(t *testing.T) {
  2610  	template := builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infrastructureClusterTemplate").
  2611  		WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}).
  2612  		Build()
  2613  	cluster := &clusterv1.Cluster{
  2614  		ObjectMeta: metav1.ObjectMeta{
  2615  			Name:      "cluster1",
  2616  			Namespace: metav1.NamespaceDefault,
  2617  		},
  2618  	}
  2619  
  2620  	t.Run("Generates an object from a template", func(t *testing.T) {
  2621  		g := NewWithT(t)
  2622  		obj, err := templateToObject(templateToInput{
  2623  			template:              template,
  2624  			templateClonedFromRef: fakeRef1,
  2625  			cluster:               cluster,
  2626  			nameGenerator:         names.SimpleNameGenerator(cluster.Name),
  2627  			currentObjectRef:      nil,
  2628  		})
  2629  		g.Expect(err).ToNot(HaveOccurred())
  2630  		g.Expect(obj).ToNot(BeNil())
  2631  
  2632  		assertTemplateToObject(g, assertTemplateInput{
  2633  			cluster:     cluster,
  2634  			templateRef: fakeRef1,
  2635  			template:    template,
  2636  			currentRef:  nil,
  2637  			obj:         obj,
  2638  		})
  2639  	})
  2640  	t.Run("Overrides the generated name if there is already a reference", func(t *testing.T) {
  2641  		g := NewWithT(t)
  2642  		obj, err := templateToObject(templateToInput{
  2643  			template:              template,
  2644  			templateClonedFromRef: fakeRef1,
  2645  			cluster:               cluster,
  2646  			nameGenerator:         names.SimpleNameGenerator(cluster.Name),
  2647  			currentObjectRef:      fakeRef2,
  2648  		})
  2649  		g.Expect(err).ToNot(HaveOccurred())
  2650  		g.Expect(obj).ToNot(BeNil())
  2651  
  2652  		// ObjectMeta
  2653  		assertTemplateToObject(g, assertTemplateInput{
  2654  			cluster:     cluster,
  2655  			templateRef: fakeRef1,
  2656  			template:    template,
  2657  			currentRef:  fakeRef2,
  2658  			obj:         obj,
  2659  		})
  2660  	})
  2661  }
  2662  
  2663  func TestTemplateToTemplate(t *testing.T) {
  2664  	template := builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infrastructureClusterTemplate").
  2665  		WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}).
  2666  		Build()
  2667  	annotations := template.GetAnnotations()
  2668  	if annotations == nil {
  2669  		annotations = map[string]string{}
  2670  	}
  2671  	annotations[corev1.LastAppliedConfigAnnotation] = "foo"
  2672  	template.SetAnnotations(annotations)
  2673  
  2674  	cluster := &clusterv1.Cluster{
  2675  		ObjectMeta: metav1.ObjectMeta{
  2676  			Name:      "cluster1",
  2677  			Namespace: metav1.NamespaceDefault,
  2678  		},
  2679  	}
  2680  
  2681  	t.Run("Generates a template from a template", func(t *testing.T) {
  2682  		g := NewWithT(t)
  2683  		obj, err := templateToTemplate(templateToInput{
  2684  			template:              template,
  2685  			templateClonedFromRef: fakeRef1,
  2686  			cluster:               cluster,
  2687  			nameGenerator:         names.SimpleNameGenerator(cluster.Name),
  2688  			currentObjectRef:      nil,
  2689  		})
  2690  		g.Expect(err).ToNot(HaveOccurred())
  2691  		g.Expect(obj).ToNot(BeNil())
  2692  		assertTemplateToTemplate(g, assertTemplateInput{
  2693  			cluster:     cluster,
  2694  			templateRef: fakeRef1,
  2695  			template:    template,
  2696  			currentRef:  nil,
  2697  			obj:         obj,
  2698  		})
  2699  	})
  2700  	t.Run("Overrides the generated name if there is already a reference", func(t *testing.T) {
  2701  		g := NewWithT(t)
  2702  		obj, err := templateToTemplate(templateToInput{
  2703  			template:              template,
  2704  			templateClonedFromRef: fakeRef1,
  2705  			cluster:               cluster,
  2706  			nameGenerator:         names.SimpleNameGenerator(cluster.Name),
  2707  			currentObjectRef:      fakeRef2,
  2708  		})
  2709  		g.Expect(err).ToNot(HaveOccurred())
  2710  		g.Expect(obj).ToNot(BeNil())
  2711  		assertTemplateToTemplate(g, assertTemplateInput{
  2712  			cluster:     cluster,
  2713  			templateRef: fakeRef1,
  2714  			template:    template,
  2715  			currentRef:  fakeRef2,
  2716  			obj:         obj,
  2717  		})
  2718  	})
  2719  }
  2720  
  2721  type assertTemplateInput struct {
  2722  	cluster             *clusterv1.Cluster
  2723  	templateRef         *corev1.ObjectReference
  2724  	template            *unstructured.Unstructured
  2725  	labels, annotations map[string]string
  2726  	currentRef          *corev1.ObjectReference
  2727  	obj                 *unstructured.Unstructured
  2728  }
  2729  
  2730  func assertTemplateToObject(g *WithT, in assertTemplateInput) {
  2731  	// TypeMeta
  2732  	g.Expect(in.obj.GetAPIVersion()).To(Equal(in.template.GetAPIVersion()))
  2733  	g.Expect(in.obj.GetKind()).To(Equal(strings.TrimSuffix(in.template.GetKind(), "Template")))
  2734  
  2735  	// ObjectMeta
  2736  	if in.currentRef != nil {
  2737  		g.Expect(in.obj.GetName()).To(Equal(in.currentRef.Name))
  2738  	} else {
  2739  		g.Expect(in.obj.GetName()).To(HavePrefix(in.cluster.Name))
  2740  	}
  2741  	g.Expect(in.obj.GetNamespace()).To(Equal(in.cluster.Namespace))
  2742  	g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterNameLabel, in.cluster.Name))
  2743  	g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyOwnedLabel, ""))
  2744  	for k, v := range in.labels {
  2745  		g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(k, v))
  2746  	}
  2747  	g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, in.templateRef.GroupVersionKind().GroupKind().String()))
  2748  	g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, in.templateRef.Name))
  2749  	for k, v := range in.annotations {
  2750  		g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(k, v))
  2751  	}
  2752  	// Spec
  2753  	expectedSpec, ok, err := unstructured.NestedMap(in.template.UnstructuredContent(), "spec", "template", "spec")
  2754  	g.Expect(err).ToNot(HaveOccurred())
  2755  	g.Expect(ok).To(BeTrue())
  2756  
  2757  	cloneSpec, ok, err := unstructured.NestedMap(in.obj.UnstructuredContent(), "spec")
  2758  	g.Expect(err).ToNot(HaveOccurred())
  2759  	g.Expect(ok).To(BeTrue())
  2760  	for k, v := range expectedSpec {
  2761  		g.Expect(cloneSpec).To(HaveKeyWithValue(k, v))
  2762  	}
  2763  }
  2764  
  2765  func assertTemplateToTemplate(g *WithT, in assertTemplateInput) {
  2766  	// TypeMeta
  2767  	g.Expect(in.obj.GetAPIVersion()).To(Equal(in.template.GetAPIVersion()))
  2768  	g.Expect(in.obj.GetKind()).To(Equal(in.template.GetKind()))
  2769  
  2770  	// ObjectMeta
  2771  	if in.currentRef != nil {
  2772  		g.Expect(in.obj.GetName()).To(Equal(in.currentRef.Name))
  2773  	} else {
  2774  		g.Expect(in.obj.GetName()).To(HavePrefix(in.cluster.Name))
  2775  	}
  2776  	g.Expect(in.obj.GetNamespace()).To(Equal(in.cluster.Namespace))
  2777  	g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterNameLabel, in.cluster.Name))
  2778  	g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyOwnedLabel, ""))
  2779  	for k, v := range in.labels {
  2780  		g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(k, v))
  2781  	}
  2782  	g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, in.templateRef.GroupVersionKind().GroupKind().String()))
  2783  	g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, in.templateRef.Name))
  2784  	g.Expect(in.obj.GetAnnotations()).ToNot(HaveKey(corev1.LastAppliedConfigAnnotation))
  2785  	for k, v := range in.annotations {
  2786  		g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(k, v))
  2787  	}
  2788  	// Spec
  2789  	expectedSpec, ok, err := unstructured.NestedMap(in.template.UnstructuredContent(), "spec")
  2790  	g.Expect(err).ToNot(HaveOccurred())
  2791  	g.Expect(ok).To(BeTrue())
  2792  
  2793  	cloneSpec, ok, err := unstructured.NestedMap(in.obj.UnstructuredContent(), "spec")
  2794  	g.Expect(err).ToNot(HaveOccurred())
  2795  	g.Expect(ok).To(BeTrue())
  2796  	g.Expect(cloneSpec).To(BeComparableTo(expectedSpec))
  2797  }
  2798  
  2799  func assertNestedField(g *WithT, obj *unstructured.Unstructured, value interface{}, fields ...string) {
  2800  	v, ok, err := unstructured.NestedFieldCopy(obj.UnstructuredContent(), fields...)
  2801  
  2802  	g.Expect(err).ToNot(HaveOccurred())
  2803  	g.Expect(ok).To(BeTrue())
  2804  	g.Expect(v).To(BeComparableTo(value))
  2805  }
  2806  
  2807  func assertNestedFieldUnset(g *WithT, obj *unstructured.Unstructured, fields ...string) {
  2808  	_, ok, err := unstructured.NestedFieldCopy(obj.UnstructuredContent(), fields...)
  2809  
  2810  	g.Expect(err).ToNot(HaveOccurred())
  2811  	g.Expect(ok).To(BeFalse())
  2812  }
  2813  
  2814  func duplicateMachineDeploymentsState(s scope.MachineDeploymentsStateMap) scope.MachineDeploymentsStateMap {
  2815  	n := make(scope.MachineDeploymentsStateMap)
  2816  	for k, v := range s {
  2817  		n[k] = v
  2818  	}
  2819  	return n
  2820  }
  2821  
  2822  func duplicateMachinePoolsState(s scope.MachinePoolsStateMap) scope.MachinePoolsStateMap {
  2823  	n := make(scope.MachinePoolsStateMap)
  2824  	for k, v := range s {
  2825  		n[k] = v
  2826  	}
  2827  	return n
  2828  }
  2829  
  2830  func TestMergeMap(t *testing.T) {
  2831  	t.Run("Merge maps", func(t *testing.T) {
  2832  		g := NewWithT(t)
  2833  
  2834  		m := util.MergeMap(
  2835  			map[string]string{
  2836  				"a": "a",
  2837  				"b": "b",
  2838  			}, map[string]string{
  2839  				"a": "ax",
  2840  				"c": "c",
  2841  			},
  2842  		)
  2843  		g.Expect(m).To(HaveKeyWithValue("a", "a"))
  2844  		g.Expect(m).To(HaveKeyWithValue("b", "b"))
  2845  		g.Expect(m).To(HaveKeyWithValue("c", "c"))
  2846  	})
  2847  	t.Run("Nils empty maps", func(t *testing.T) {
  2848  		g := NewWithT(t)
  2849  
  2850  		m := util.MergeMap(map[string]string{}, map[string]string{})
  2851  		g.Expect(m).To(BeNil())
  2852  	})
  2853  }
  2854  
  2855  func Test_computeMachineHealthCheck(t *testing.T) {
  2856  	maxUnhealthyValue := intstr.FromString("100%")
  2857  	mhcSpec := &clusterv1.MachineHealthCheckClass{
  2858  		UnhealthyConditions: []clusterv1.UnhealthyCondition{
  2859  			{
  2860  				Type:    corev1.NodeReady,
  2861  				Status:  corev1.ConditionUnknown,
  2862  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  2863  			},
  2864  			{
  2865  				Type:    corev1.NodeReady,
  2866  				Status:  corev1.ConditionFalse,
  2867  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  2868  			},
  2869  		},
  2870  		NodeStartupTimeout: &metav1.Duration{
  2871  			Duration: time.Duration(1)},
  2872  	}
  2873  	selector := &metav1.LabelSelector{MatchLabels: map[string]string{
  2874  		"foo": "bar",
  2875  	}}
  2876  	healthCheckTarget := builder.MachineDeployment("ns1", "md1").Build()
  2877  	clusterName := "cluster1"
  2878  	want := &clusterv1.MachineHealthCheck{
  2879  		TypeMeta: metav1.TypeMeta{
  2880  			Kind:       clusterv1.GroupVersion.WithKind("MachineHealthCheck").Kind,
  2881  			APIVersion: clusterv1.GroupVersion.String(),
  2882  		},
  2883  		ObjectMeta: metav1.ObjectMeta{
  2884  			Name:      "md1",
  2885  			Namespace: "ns1",
  2886  			// Label is added by defaulting values using MachineHealthCheck.Default()
  2887  			Labels: map[string]string{
  2888  				"cluster.x-k8s.io/cluster-name":     "cluster1",
  2889  				clusterv1.ClusterTopologyOwnedLabel: "",
  2890  			},
  2891  		},
  2892  		Spec: clusterv1.MachineHealthCheckSpec{
  2893  			ClusterName: "cluster1",
  2894  			Selector: metav1.LabelSelector{MatchLabels: map[string]string{
  2895  				"foo": "bar",
  2896  			}},
  2897  			// MaxUnhealthy is added by defaulting values using MachineHealthCheck.Default()
  2898  			MaxUnhealthy: &maxUnhealthyValue,
  2899  			UnhealthyConditions: []clusterv1.UnhealthyCondition{
  2900  				{
  2901  					Type:    corev1.NodeReady,
  2902  					Status:  corev1.ConditionUnknown,
  2903  					Timeout: metav1.Duration{Duration: 5 * time.Minute},
  2904  				},
  2905  				{
  2906  					Type:    corev1.NodeReady,
  2907  					Status:  corev1.ConditionFalse,
  2908  					Timeout: metav1.Duration{Duration: 5 * time.Minute},
  2909  				},
  2910  			},
  2911  			NodeStartupTimeout: &metav1.Duration{
  2912  				Duration: time.Duration(1)},
  2913  		},
  2914  	}
  2915  
  2916  	t.Run("set all fields correctly", func(t *testing.T) {
  2917  		g := NewWithT(t)
  2918  
  2919  		got := computeMachineHealthCheck(ctx, healthCheckTarget, selector, clusterName, mhcSpec)
  2920  
  2921  		g.Expect(got).To(BeComparableTo(want), cmp.Diff(got, want))
  2922  	})
  2923  }
  2924  
  2925  func TestCalculateRefDesiredAPIVersion(t *testing.T) {
  2926  	tests := []struct {
  2927  		name                    string
  2928  		currentRef              *corev1.ObjectReference
  2929  		desiredReferencedObject *unstructured.Unstructured
  2930  		want                    *corev1.ObjectReference
  2931  		wantErr                 bool
  2932  	}{
  2933  		{
  2934  			name: "Return desired ref if current ref is nil",
  2935  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  2936  				"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  2937  				"kind":       "DockerCluster",
  2938  				"metadata": map[string]interface{}{
  2939  					"name":      "my-cluster-abc",
  2940  					"namespace": metav1.NamespaceDefault,
  2941  				},
  2942  			}},
  2943  			want: &corev1.ObjectReference{
  2944  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  2945  				Kind:       "DockerCluster",
  2946  				Name:       "my-cluster-abc",
  2947  				Namespace:  metav1.NamespaceDefault,
  2948  			},
  2949  		},
  2950  		{
  2951  			name: "Error for invalid apiVersion",
  2952  			currentRef: &corev1.ObjectReference{
  2953  				APIVersion: "invalid/api/version",
  2954  				Kind:       "DockerCluster",
  2955  				Name:       "my-cluster-abc",
  2956  				Namespace:  metav1.NamespaceDefault,
  2957  			},
  2958  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  2959  				"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  2960  				"kind":       "DockerCluster",
  2961  				"metadata": map[string]interface{}{
  2962  					"name":      "my-cluster-abc",
  2963  					"namespace": metav1.NamespaceDefault,
  2964  				},
  2965  			}},
  2966  			wantErr: true,
  2967  		},
  2968  		{
  2969  			name: "Return desired ref if group changed",
  2970  			currentRef: &corev1.ObjectReference{
  2971  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  2972  				Kind:       "DockerCluster",
  2973  				Name:       "my-cluster-abc",
  2974  				Namespace:  metav1.NamespaceDefault,
  2975  			},
  2976  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  2977  				"apiVersion": "infrastructure2.cluster.x-k8s.io/v1beta1",
  2978  				"kind":       "DockerCluster",
  2979  				"metadata": map[string]interface{}{
  2980  					"name":      "my-cluster-abc",
  2981  					"namespace": metav1.NamespaceDefault,
  2982  				},
  2983  			}},
  2984  			want: &corev1.ObjectReference{
  2985  				// Group changed => apiVersion is taken from desired.
  2986  				APIVersion: "infrastructure2.cluster.x-k8s.io/v1beta1",
  2987  				Kind:       "DockerCluster",
  2988  				Name:       "my-cluster-abc",
  2989  				Namespace:  metav1.NamespaceDefault,
  2990  			},
  2991  		},
  2992  		{
  2993  			name: "Return desired ref if kind changed",
  2994  			currentRef: &corev1.ObjectReference{
  2995  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  2996  				Kind:       "DockerCluster",
  2997  				Name:       "my-cluster-abc",
  2998  				Namespace:  metav1.NamespaceDefault,
  2999  			},
  3000  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  3001  				"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  3002  				"kind":       "DockerCluster2",
  3003  				"metadata": map[string]interface{}{
  3004  					"name":      "my-cluster-abc",
  3005  					"namespace": metav1.NamespaceDefault,
  3006  				},
  3007  			}},
  3008  			want: &corev1.ObjectReference{
  3009  				// Kind changed => apiVersion is taken from desired.
  3010  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  3011  				Kind:       "DockerCluster2",
  3012  				Name:       "my-cluster-abc",
  3013  				Namespace:  metav1.NamespaceDefault,
  3014  			},
  3015  		},
  3016  		{
  3017  			name: "Return current apiVersion if group and kind are the same",
  3018  			currentRef: &corev1.ObjectReference{
  3019  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2",
  3020  				Kind:       "DockerCluster",
  3021  				Name:       "my-cluster-abc",
  3022  				Namespace:  metav1.NamespaceDefault,
  3023  			},
  3024  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  3025  				"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  3026  				"kind":       "DockerCluster",
  3027  				"metadata": map[string]interface{}{
  3028  					"name":      "my-cluster-abc",
  3029  					"namespace": metav1.NamespaceDefault,
  3030  				},
  3031  			}},
  3032  			want: &corev1.ObjectReference{
  3033  				// Group and kind are the same => apiVersion is taken from currentRef.
  3034  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2",
  3035  				Kind:       "DockerCluster",
  3036  				Name:       "my-cluster-abc",
  3037  				Namespace:  metav1.NamespaceDefault,
  3038  			},
  3039  		},
  3040  	}
  3041  	for _, tt := range tests {
  3042  		t.Run(tt.name, func(t *testing.T) {
  3043  			g := NewWithT(t)
  3044  
  3045  			got, err := calculateRefDesiredAPIVersion(tt.currentRef, tt.desiredReferencedObject)
  3046  			if tt.wantErr {
  3047  				g.Expect(err).To(HaveOccurred())
  3048  				return
  3049  			}
  3050  			g.Expect(err).ToNot(HaveOccurred())
  3051  
  3052  			g.Expect(got).To(BeComparableTo(tt.want))
  3053  		})
  3054  	}
  3055  }