sigs.k8s.io/cluster-api@v1.7.1/exp/topology/desiredstate/desired_state_test.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package desiredstate
    18  
    19  import (
    20  	"strings"
    21  	"testing"
    22  	"time"
    23  
    24  	"github.com/google/go-cmp/cmp"
    25  	. "github.com/onsi/gomega"
    26  	corev1 "k8s.io/api/core/v1"
    27  	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
    28  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    29  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    30  	"k8s.io/apimachinery/pkg/runtime"
    31  	"k8s.io/apimachinery/pkg/util/intstr"
    32  	clientgoscheme "k8s.io/client-go/kubernetes/scheme"
    33  	utilfeature "k8s.io/component-base/featuregate/testing"
    34  	"k8s.io/utils/ptr"
    35  	ctrl "sigs.k8s.io/controller-runtime"
    36  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    37  
    38  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    39  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    40  	runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
    41  	runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
    42  	runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
    43  	"sigs.k8s.io/cluster-api/exp/topology/scope"
    44  	"sigs.k8s.io/cluster-api/feature"
    45  	"sigs.k8s.io/cluster-api/internal/contract"
    46  	"sigs.k8s.io/cluster-api/internal/hooks"
    47  	fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
    48  	"sigs.k8s.io/cluster-api/internal/test/builder"
    49  	"sigs.k8s.io/cluster-api/internal/topology/clustershim"
    50  	"sigs.k8s.io/cluster-api/internal/topology/names"
    51  	"sigs.k8s.io/cluster-api/internal/topology/ownerrefs"
    52  	"sigs.k8s.io/cluster-api/util"
    53  )
    54  
    55  var (
    56  	ctx        = ctrl.SetupSignalHandler()
    57  	fakeScheme = runtime.NewScheme()
    58  )
    59  
    60  func init() {
    61  	_ = clientgoscheme.AddToScheme(fakeScheme)
    62  	_ = clusterv1.AddToScheme(fakeScheme)
    63  	_ = apiextensionsv1.AddToScheme(fakeScheme)
    64  	_ = expv1.AddToScheme(fakeScheme)
    65  	_ = corev1.AddToScheme(fakeScheme)
    66  }
    67  
    68  var (
    69  	fakeRef1 = &corev1.ObjectReference{
    70  		Kind:       "refKind1",
    71  		Namespace:  "refNamespace1",
    72  		Name:       "refName1",
    73  		APIVersion: "refAPIVersion1",
    74  	}
    75  
    76  	fakeRef2 = &corev1.ObjectReference{
    77  		Kind:       "refKind2",
    78  		Namespace:  "refNamespace2",
    79  		Name:       "refName2",
    80  		APIVersion: "refAPIVersion2",
    81  	}
    82  )
    83  
    84  func TestComputeInfrastructureCluster(t *testing.T) {
    85  	// templates and ClusterClass
    86  	infrastructureClusterTemplate := builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "template1").
    87  		Build()
    88  	clusterClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
    89  		WithInfrastructureClusterTemplate(infrastructureClusterTemplate).
    90  		Build()
    91  
    92  	// aggregating templates and cluster class into a blueprint (simulating getBlueprint)
    93  	blueprint := &scope.ClusterBlueprint{
    94  		ClusterClass:                  clusterClass,
    95  		InfrastructureClusterTemplate: infrastructureClusterTemplate,
    96  	}
    97  
    98  	// current cluster objects
    99  	cluster := &clusterv1.Cluster{
   100  		ObjectMeta: metav1.ObjectMeta{
   101  			Name:      "cluster1",
   102  			Namespace: metav1.NamespaceDefault,
   103  		},
   104  	}
   105  
   106  	t.Run("Generates the infrastructureCluster from the template", func(t *testing.T) {
   107  		g := NewWithT(t)
   108  
   109  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   110  		scope := scope.New(cluster)
   111  		scope.Blueprint = blueprint
   112  
   113  		obj, err := computeInfrastructureCluster(ctx, scope)
   114  		g.Expect(err).ToNot(HaveOccurred())
   115  		g.Expect(obj).ToNot(BeNil())
   116  
   117  		assertTemplateToObject(g, assertTemplateInput{
   118  			cluster:     scope.Current.Cluster,
   119  			templateRef: blueprint.ClusterClass.Spec.Infrastructure.Ref,
   120  			template:    blueprint.InfrastructureClusterTemplate,
   121  			labels:      nil,
   122  			annotations: nil,
   123  			currentRef:  nil,
   124  			obj:         obj,
   125  		})
   126  
   127  		// Ensure no ownership is added to generated InfrastructureCluster.
   128  		g.Expect(obj.GetOwnerReferences()).To(BeEmpty())
   129  	})
   130  	t.Run("If there is already a reference to the infrastructureCluster, it preserves the reference name", func(t *testing.T) {
   131  		g := NewWithT(t)
   132  
   133  		// current cluster objects for the test scenario
   134  		clusterWithInfrastructureRef := cluster.DeepCopy()
   135  		clusterWithInfrastructureRef.Spec.InfrastructureRef = fakeRef1
   136  
   137  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   138  		scope := scope.New(clusterWithInfrastructureRef)
   139  		scope.Blueprint = blueprint
   140  
   141  		obj, err := computeInfrastructureCluster(ctx, scope)
   142  		g.Expect(err).ToNot(HaveOccurred())
   143  		g.Expect(obj).ToNot(BeNil())
   144  
   145  		assertTemplateToObject(g, assertTemplateInput{
   146  			cluster:     scope.Current.Cluster,
   147  			templateRef: blueprint.ClusterClass.Spec.Infrastructure.Ref,
   148  			template:    blueprint.InfrastructureClusterTemplate,
   149  			labels:      nil,
   150  			annotations: nil,
   151  			currentRef:  scope.Current.Cluster.Spec.InfrastructureRef,
   152  			obj:         obj,
   153  		})
   154  	})
   155  	t.Run("Carry over the owner reference to ClusterShim, if any", func(t *testing.T) {
   156  		g := NewWithT(t)
   157  		shim := clustershim.New(cluster)
   158  
   159  		// current cluster objects for the test scenario
   160  		clusterWithInfrastructureRef := cluster.DeepCopy()
   161  		clusterWithInfrastructureRef.Spec.InfrastructureRef = fakeRef1
   162  
   163  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   164  		scope := scope.New(clusterWithInfrastructureRef)
   165  		scope.Current.InfrastructureCluster = infrastructureClusterTemplate.DeepCopy()
   166  		scope.Current.InfrastructureCluster.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))})
   167  		scope.Blueprint = blueprint
   168  
   169  		obj, err := computeInfrastructureCluster(ctx, scope)
   170  		g.Expect(err).ToNot(HaveOccurred())
   171  		g.Expect(obj).ToNot(BeNil())
   172  		g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue())
   173  	})
   174  }
   175  
   176  func TestComputeControlPlaneInfrastructureMachineTemplate(t *testing.T) {
   177  	// templates and ClusterClass
   178  	labels := map[string]string{"l1": ""}
   179  	annotations := map[string]string{"a1": ""}
   180  
   181  	// current cluster objects
   182  	cluster := &clusterv1.Cluster{
   183  		ObjectMeta: metav1.ObjectMeta{
   184  			Name:      "cluster1",
   185  			Namespace: metav1.NamespaceDefault,
   186  		},
   187  		Spec: clusterv1.ClusterSpec{
   188  			Topology: &clusterv1.Topology{
   189  				ControlPlane: clusterv1.ControlPlaneTopology{
   190  					Metadata: clusterv1.ObjectMeta{
   191  						Labels:      map[string]string{"l2": ""},
   192  						Annotations: map[string]string{"a2": ""},
   193  					},
   194  				},
   195  			},
   196  		},
   197  	}
   198  
   199  	infrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "template1").
   200  		Build()
   201  	clusterClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
   202  		WithControlPlaneMetadata(labels, annotations).
   203  		WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate).Build()
   204  
   205  	// aggregating templates and cluster class into a blueprint (simulating getBlueprint)
   206  	blueprint := &scope.ClusterBlueprint{
   207  		Topology:     cluster.Spec.Topology,
   208  		ClusterClass: clusterClass,
   209  		ControlPlane: &scope.ControlPlaneBlueprint{
   210  			InfrastructureMachineTemplate: infrastructureMachineTemplate,
   211  		},
   212  	}
   213  
   214  	t.Run("Generates the infrastructureMachineTemplate from the template", func(t *testing.T) {
   215  		g := NewWithT(t)
   216  
   217  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   218  		scope := scope.New(cluster)
   219  		scope.Blueprint = blueprint
   220  
   221  		obj, err := computeControlPlaneInfrastructureMachineTemplate(ctx, scope)
   222  		g.Expect(err).ToNot(HaveOccurred())
   223  		g.Expect(obj).ToNot(BeNil())
   224  
   225  		assertTemplateToTemplate(g, assertTemplateInput{
   226  			cluster:     scope.Current.Cluster,
   227  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref,
   228  			template:    blueprint.ControlPlane.InfrastructureMachineTemplate,
   229  			currentRef:  nil,
   230  			obj:         obj,
   231  		})
   232  
   233  		// Ensure Cluster ownership is added to generated InfrastructureCluster.
   234  		g.Expect(obj.GetOwnerReferences()).To(HaveLen(1))
   235  		g.Expect(obj.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
   236  		g.Expect(obj.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
   237  	})
   238  	t.Run("If there is already a reference to the infrastructureMachineTemplate, it preserves the reference name", func(t *testing.T) {
   239  		g := NewWithT(t)
   240  
   241  		// current cluster objects for the test scenario
   242  		currentInfrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "cluster1-template1").Build()
   243  
   244  		controlPlane := &unstructured.Unstructured{Object: map[string]interface{}{}}
   245  		err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Set(controlPlane, currentInfrastructureMachineTemplate)
   246  		g.Expect(err).ToNot(HaveOccurred())
   247  
   248  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   249  		s := scope.New(cluster)
   250  		s.Current.ControlPlane = &scope.ControlPlaneState{
   251  			Object:                        controlPlane,
   252  			InfrastructureMachineTemplate: currentInfrastructureMachineTemplate,
   253  		}
   254  		s.Blueprint = blueprint
   255  
   256  		obj, err := computeControlPlaneInfrastructureMachineTemplate(ctx, s)
   257  		g.Expect(err).ToNot(HaveOccurred())
   258  		g.Expect(obj).ToNot(BeNil())
   259  
   260  		assertTemplateToTemplate(g, assertTemplateInput{
   261  			cluster:     s.Current.Cluster,
   262  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref,
   263  			template:    blueprint.ControlPlane.InfrastructureMachineTemplate,
   264  			currentRef:  contract.ObjToRef(currentInfrastructureMachineTemplate),
   265  			obj:         obj,
   266  		})
   267  	})
   268  }
   269  
   270  func TestComputeControlPlane(t *testing.T) {
   271  	// templates and ClusterClass
   272  	labels := map[string]string{"l1": ""}
   273  	annotations := map[string]string{"a1": ""}
   274  
   275  	controlPlaneTemplate := builder.ControlPlaneTemplate(metav1.NamespaceDefault, "template1").
   276  		Build()
   277  	controlPlaneMachineTemplateLabels := map[string]string{
   278  		"machineTemplateLabel": "machineTemplateLabelValue",
   279  	}
   280  	controlPlaneMachineTemplateAnnotations := map[string]string{
   281  		"machineTemplateAnnotation": "machineTemplateAnnotationValue",
   282  	}
   283  	controlPlaneTemplateWithMachineTemplate := controlPlaneTemplate.DeepCopy()
   284  	_ = contract.ControlPlaneTemplate().Template().MachineTemplate().Metadata().Set(controlPlaneTemplateWithMachineTemplate, &clusterv1.ObjectMeta{
   285  		Labels:      controlPlaneMachineTemplateLabels,
   286  		Annotations: controlPlaneMachineTemplateAnnotations,
   287  	})
   288  	clusterClassDuration := 20 * time.Second
   289  	clusterClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
   290  		WithControlPlaneMetadata(labels, annotations).
   291  		WithControlPlaneTemplate(controlPlaneTemplate).
   292  		WithControlPlaneNodeDrainTimeout(&metav1.Duration{Duration: clusterClassDuration}).
   293  		WithControlPlaneNodeVolumeDetachTimeout(&metav1.Duration{Duration: clusterClassDuration}).
   294  		WithControlPlaneNodeDeletionTimeout(&metav1.Duration{Duration: clusterClassDuration}).
   295  		Build()
   296  	// TODO: Replace with object builder.
   297  	// current cluster objects
   298  	version := "v1.21.2"
   299  	replicas := int32(3)
   300  	topologyDuration := 10 * time.Second
   301  	nodeDrainTimeout := metav1.Duration{Duration: topologyDuration}
   302  	nodeVolumeDetachTimeout := metav1.Duration{Duration: topologyDuration}
   303  	nodeDeletionTimeout := metav1.Duration{Duration: topologyDuration}
   304  	cluster := &clusterv1.Cluster{
   305  		ObjectMeta: metav1.ObjectMeta{
   306  			Name:      "cluster1",
   307  			Namespace: metav1.NamespaceDefault,
   308  		},
   309  		Spec: clusterv1.ClusterSpec{
   310  			Topology: &clusterv1.Topology{
   311  				Version: version,
   312  				ControlPlane: clusterv1.ControlPlaneTopology{
   313  					Metadata: clusterv1.ObjectMeta{
   314  						Labels:      map[string]string{"l2": ""},
   315  						Annotations: map[string]string{"a2": ""},
   316  					},
   317  					Replicas:                &replicas,
   318  					NodeDrainTimeout:        &nodeDrainTimeout,
   319  					NodeVolumeDetachTimeout: &nodeVolumeDetachTimeout,
   320  					NodeDeletionTimeout:     &nodeDeletionTimeout,
   321  				},
   322  			},
   323  		},
   324  	}
   325  
   326  	t.Run("Generates the ControlPlane from the template", func(t *testing.T) {
   327  		g := NewWithT(t)
   328  
   329  		blueprint := &scope.ClusterBlueprint{
   330  			Topology:     cluster.Spec.Topology,
   331  			ClusterClass: clusterClass,
   332  			ControlPlane: &scope.ControlPlaneBlueprint{
   333  				Template: controlPlaneTemplate,
   334  			},
   335  		}
   336  
   337  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   338  		scope := scope.New(cluster)
   339  		scope.Blueprint = blueprint
   340  
   341  		obj, err := (&generator{}).computeControlPlane(ctx, scope, nil)
   342  		g.Expect(err).ToNot(HaveOccurred())
   343  		g.Expect(obj).ToNot(BeNil())
   344  
   345  		assertTemplateToObject(g, assertTemplateInput{
   346  			cluster:     scope.Current.Cluster,
   347  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref,
   348  			template:    blueprint.ControlPlane.Template,
   349  			currentRef:  nil,
   350  			obj:         obj,
   351  			labels:      util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels),
   352  			annotations: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations),
   353  		})
   354  
   355  		assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...)
   356  		assertNestedField(g, obj, int64(replicas), contract.ControlPlane().Replicas().Path()...)
   357  		assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...)
   358  		assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...)
   359  		assertNestedField(g, obj, topologyDuration.String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...)
   360  		assertNestedFieldUnset(g, obj, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...)
   361  
   362  		// Ensure no ownership is added to generated ControlPlane.
   363  		g.Expect(obj.GetOwnerReferences()).To(BeEmpty())
   364  	})
   365  	t.Run("Generates the ControlPlane from the template using ClusterClass defaults", func(t *testing.T) {
   366  		g := NewWithT(t)
   367  
   368  		cluster := &clusterv1.Cluster{
   369  			ObjectMeta: metav1.ObjectMeta{
   370  				Name:      "cluster1",
   371  				Namespace: metav1.NamespaceDefault,
   372  			},
   373  			Spec: clusterv1.ClusterSpec{
   374  				Topology: &clusterv1.Topology{
   375  					Version: version,
   376  					ControlPlane: clusterv1.ControlPlaneTopology{
   377  						Metadata: clusterv1.ObjectMeta{
   378  							Labels:      map[string]string{"l2": ""},
   379  							Annotations: map[string]string{"a2": ""},
   380  						},
   381  						Replicas: &replicas,
   382  						// no values for NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout
   383  					},
   384  				},
   385  			},
   386  		}
   387  
   388  		blueprint := &scope.ClusterBlueprint{
   389  			Topology:     cluster.Spec.Topology,
   390  			ClusterClass: clusterClass,
   391  			ControlPlane: &scope.ControlPlaneBlueprint{
   392  				Template: controlPlaneTemplate,
   393  			},
   394  		}
   395  
   396  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   397  		scope := scope.New(cluster)
   398  		scope.Blueprint = blueprint
   399  
   400  		obj, err := (&generator{}).computeControlPlane(ctx, scope, nil)
   401  		g.Expect(err).ToNot(HaveOccurred())
   402  		g.Expect(obj).ToNot(BeNil())
   403  
   404  		// checking only values from CC defaults
   405  		assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Path()...)
   406  		assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Path()...)
   407  		assertNestedField(g, obj, clusterClassDuration.String(), contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Path()...)
   408  	})
   409  	t.Run("Skips setting replicas if required", func(t *testing.T) {
   410  		g := NewWithT(t)
   411  
   412  		// current cluster objects
   413  		clusterWithoutReplicas := cluster.DeepCopy()
   414  		clusterWithoutReplicas.Spec.Topology.ControlPlane.Replicas = nil
   415  
   416  		blueprint := &scope.ClusterBlueprint{
   417  			Topology:     clusterWithoutReplicas.Spec.Topology,
   418  			ClusterClass: clusterClass,
   419  			ControlPlane: &scope.ControlPlaneBlueprint{
   420  				Template: controlPlaneTemplate,
   421  			},
   422  		}
   423  
   424  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   425  		scope := scope.New(clusterWithoutReplicas)
   426  		scope.Blueprint = blueprint
   427  
   428  		obj, err := (&generator{}).computeControlPlane(ctx, scope, nil)
   429  		g.Expect(err).ToNot(HaveOccurred())
   430  		g.Expect(obj).ToNot(BeNil())
   431  
   432  		assertTemplateToObject(g, assertTemplateInput{
   433  			cluster:     scope.Current.Cluster,
   434  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref,
   435  			template:    blueprint.ControlPlane.Template,
   436  			currentRef:  nil,
   437  			obj:         obj,
   438  			labels:      util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels),
   439  			annotations: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations),
   440  		})
   441  
   442  		assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...)
   443  		assertNestedFieldUnset(g, obj, contract.ControlPlane().Replicas().Path()...)
   444  		assertNestedFieldUnset(g, obj, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...)
   445  	})
   446  	t.Run("Generates the ControlPlane from the template and adds the infrastructure machine template if required", func(t *testing.T) {
   447  		g := NewWithT(t)
   448  
   449  		// templates and ClusterClass
   450  		infrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "template1").Build()
   451  		clusterClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
   452  			WithControlPlaneMetadata(labels, annotations).
   453  			WithControlPlaneTemplate(controlPlaneTemplateWithMachineTemplate).
   454  			WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate).Build()
   455  
   456  		// aggregating templates and cluster class into a blueprint (simulating getBlueprint)
   457  		blueprint := &scope.ClusterBlueprint{
   458  			Topology:     cluster.Spec.Topology,
   459  			ClusterClass: clusterClass,
   460  			ControlPlane: &scope.ControlPlaneBlueprint{
   461  				Template:                      controlPlaneTemplateWithMachineTemplate,
   462  				InfrastructureMachineTemplate: infrastructureMachineTemplate,
   463  			},
   464  		}
   465  
   466  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   467  		s := scope.New(cluster)
   468  		s.Blueprint = blueprint
   469  		s.Current.ControlPlane = &scope.ControlPlaneState{}
   470  
   471  		obj, err := (&generator{}).computeControlPlane(ctx, s, infrastructureMachineTemplate)
   472  		g.Expect(err).ToNot(HaveOccurred())
   473  		g.Expect(obj).ToNot(BeNil())
   474  
   475  		// machineTemplate is removed from the template for assertion as we can't
   476  		// simply compare the machineTemplate in template with the one in object as
   477  		// computeControlPlane() adds additional fields like the timeouts to machineTemplate.
   478  		// Note: machineTemplate ia asserted further down below instead.
   479  		controlPlaneTemplateWithoutMachineTemplate := blueprint.ControlPlane.Template.DeepCopy()
   480  		unstructured.RemoveNestedField(controlPlaneTemplateWithoutMachineTemplate.Object, "spec", "template", "spec", "machineTemplate")
   481  
   482  		assertTemplateToObject(g, assertTemplateInput{
   483  			cluster:     s.Current.Cluster,
   484  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref,
   485  			template:    controlPlaneTemplateWithoutMachineTemplate,
   486  			currentRef:  nil,
   487  			obj:         obj,
   488  			labels:      util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels),
   489  			annotations: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations),
   490  		})
   491  		gotMetadata, err := contract.ControlPlane().MachineTemplate().Metadata().Get(obj)
   492  		g.Expect(err).ToNot(HaveOccurred())
   493  
   494  		expectedLabels := util.MergeMap(s.Current.Cluster.Spec.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels, controlPlaneMachineTemplateLabels)
   495  		expectedLabels[clusterv1.ClusterNameLabel] = cluster.Name
   496  		expectedLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
   497  		g.Expect(gotMetadata).To(BeComparableTo(&clusterv1.ObjectMeta{
   498  			Labels:      expectedLabels,
   499  			Annotations: util.MergeMap(s.Current.Cluster.Spec.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations, controlPlaneMachineTemplateAnnotations),
   500  		}))
   501  
   502  		assertNestedField(g, obj, version, contract.ControlPlane().Version().Path()...)
   503  		assertNestedField(g, obj, int64(replicas), contract.ControlPlane().Replicas().Path()...)
   504  		assertNestedField(g, obj, map[string]interface{}{
   505  			"kind":       infrastructureMachineTemplate.GetKind(),
   506  			"namespace":  infrastructureMachineTemplate.GetNamespace(),
   507  			"name":       infrastructureMachineTemplate.GetName(),
   508  			"apiVersion": infrastructureMachineTemplate.GetAPIVersion(),
   509  		}, contract.ControlPlane().MachineTemplate().InfrastructureRef().Path()...)
   510  	})
   511  	t.Run("If there is already a reference to the ControlPlane, it preserves the reference name", func(t *testing.T) {
   512  		g := NewWithT(t)
   513  
   514  		// current cluster objects for the test scenario
   515  		clusterWithControlPlaneRef := cluster.DeepCopy()
   516  		clusterWithControlPlaneRef.Spec.ControlPlaneRef = fakeRef1
   517  
   518  		blueprint := &scope.ClusterBlueprint{
   519  			Topology:     clusterWithControlPlaneRef.Spec.Topology,
   520  			ClusterClass: clusterClass,
   521  			ControlPlane: &scope.ControlPlaneBlueprint{
   522  				Template: controlPlaneTemplate,
   523  			},
   524  		}
   525  
   526  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   527  		scope := scope.New(clusterWithControlPlaneRef)
   528  		scope.Blueprint = blueprint
   529  
   530  		obj, err := (&generator{}).computeControlPlane(ctx, scope, nil)
   531  		g.Expect(err).ToNot(HaveOccurred())
   532  		g.Expect(obj).ToNot(BeNil())
   533  
   534  		assertTemplateToObject(g, assertTemplateInput{
   535  			cluster:     scope.Current.Cluster,
   536  			templateRef: blueprint.ClusterClass.Spec.ControlPlane.Ref,
   537  			template:    blueprint.ControlPlane.Template,
   538  			currentRef:  scope.Current.Cluster.Spec.ControlPlaneRef,
   539  			obj:         obj,
   540  			labels:      util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Labels, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Labels),
   541  			annotations: util.MergeMap(blueprint.Topology.ControlPlane.Metadata.Annotations, blueprint.ClusterClass.Spec.ControlPlane.Metadata.Annotations),
   542  		})
   543  	})
   544  	t.Run("Should choose the correct version for control plane", func(t *testing.T) {
   545  		// Note: in all of the following tests we are setting it up so that there are not machine deployments.
   546  		// A more extensive list of scenarios is tested in TestComputeControlPlaneVersion.
   547  		tests := []struct {
   548  			name                string
   549  			currentControlPlane *unstructured.Unstructured
   550  			topologyVersion     string
   551  			expectedVersion     string
   552  		}{
   553  			{
   554  				name:                "use cluster.spec.topology.version if creating a new control plane",
   555  				currentControlPlane: nil,
   556  				topologyVersion:     "v1.2.3",
   557  				expectedVersion:     "v1.2.3",
   558  			},
   559  			{
   560  				name: "use controlplane.spec.version if the control plane's spec.version is not equal to status.version",
   561  				currentControlPlane: builder.ControlPlane("test1", "cp1").
   562  					WithSpecFields(map[string]interface{}{
   563  						"spec.version": "v1.2.2",
   564  					}).
   565  					WithStatusFields(map[string]interface{}{
   566  						"status.version": "v1.2.1",
   567  					}).
   568  					Build(),
   569  				topologyVersion: "v1.2.3",
   570  				expectedVersion: "v1.2.2",
   571  			},
   572  		}
   573  
   574  		for _, tt := range tests {
   575  			t.Run(tt.name, func(t *testing.T) {
   576  				g := NewWithT(t)
   577  
   578  				// Current cluster objects for the test scenario.
   579  				clusterWithControlPlaneRef := cluster.DeepCopy()
   580  				clusterWithControlPlaneRef.Spec.ControlPlaneRef = fakeRef1
   581  				clusterWithControlPlaneRef.Spec.Topology.Version = tt.topologyVersion
   582  
   583  				blueprint := &scope.ClusterBlueprint{
   584  					Topology:     clusterWithControlPlaneRef.Spec.Topology,
   585  					ClusterClass: clusterClass,
   586  					ControlPlane: &scope.ControlPlaneBlueprint{
   587  						Template: controlPlaneTemplate,
   588  					},
   589  				}
   590  
   591  				// Aggregating current cluster objects into ClusterState (simulating getCurrentState).
   592  				s := scope.New(clusterWithControlPlaneRef)
   593  				s.Blueprint = blueprint
   594  				s.Current.ControlPlane = &scope.ControlPlaneState{
   595  					Object: tt.currentControlPlane,
   596  				}
   597  
   598  				obj, err := (&generator{}).computeControlPlane(ctx, s, nil)
   599  				g.Expect(err).ToNot(HaveOccurred())
   600  				g.Expect(obj).NotTo(BeNil())
   601  				assertNestedField(g, obj, tt.expectedVersion, contract.ControlPlane().Version().Path()...)
   602  			})
   603  		}
   604  	})
   605  	t.Run("Carry over the owner reference to ClusterShim, if any", func(t *testing.T) {
   606  		g := NewWithT(t)
   607  		shim := clustershim.New(cluster)
   608  
   609  		// current cluster objects
   610  		clusterWithoutReplicas := cluster.DeepCopy()
   611  		clusterWithoutReplicas.Spec.Topology.ControlPlane.Replicas = nil
   612  
   613  		blueprint := &scope.ClusterBlueprint{
   614  			Topology:     clusterWithoutReplicas.Spec.Topology,
   615  			ClusterClass: clusterClass,
   616  			ControlPlane: &scope.ControlPlaneBlueprint{
   617  				Template: controlPlaneTemplate,
   618  			},
   619  		}
   620  
   621  		// aggregating current cluster objects into ClusterState (simulating getCurrentState)
   622  		s := scope.New(clusterWithoutReplicas)
   623  		s.Current.ControlPlane = &scope.ControlPlaneState{
   624  			Object: builder.ControlPlane("test1", "cp1").
   625  				WithSpecFields(map[string]interface{}{
   626  					"spec.version": "v1.2.2",
   627  				}).
   628  				WithStatusFields(map[string]interface{}{
   629  					"status.version": "v1.2.1",
   630  				}).
   631  				Build(),
   632  		}
   633  		s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))})
   634  		s.Blueprint = blueprint
   635  
   636  		obj, err := (&generator{}).computeControlPlane(ctx, s, nil)
   637  		g.Expect(err).ToNot(HaveOccurred())
   638  		g.Expect(obj).ToNot(BeNil())
   639  		g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue())
   640  	})
   641  }
   642  
   643  func TestComputeControlPlaneVersion(t *testing.T) {
   644  	t.Run("Compute control plane version under various circumstances", func(t *testing.T) {
   645  		defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
   646  
   647  		nonBlockingBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{
   648  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   649  				CommonResponse: runtimehooksv1.CommonResponse{
   650  					Status: runtimehooksv1.ResponseStatusSuccess,
   651  				},
   652  			},
   653  		}
   654  
   655  		blockingBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{
   656  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   657  				CommonResponse: runtimehooksv1.CommonResponse{
   658  					Status: runtimehooksv1.ResponseStatusSuccess,
   659  				},
   660  				RetryAfterSeconds: int32(10),
   661  			},
   662  		}
   663  
   664  		failureBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{
   665  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   666  				CommonResponse: runtimehooksv1.CommonResponse{
   667  					Status: runtimehooksv1.ResponseStatusFailure,
   668  				},
   669  			},
   670  		}
   671  
   672  		catalog := runtimecatalog.New()
   673  		_ = runtimehooksv1.AddToCatalog(catalog)
   674  
   675  		beforeClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterUpgrade)
   676  		if err != nil {
   677  			panic("unable to compute GVH")
   678  		}
   679  
   680  		tests := []struct {
   681  			name                        string
   682  			hookResponse                *runtimehooksv1.BeforeClusterUpgradeResponse
   683  			topologyVersion             string
   684  			controlPlaneObj             *unstructured.Unstructured
   685  			upgradingMachineDeployments []string
   686  			upgradingMachinePools       []string
   687  			expectedVersion             string
   688  			wantErr                     bool
   689  		}{
   690  			{
   691  				name:            "should return cluster.spec.topology.version if creating a new control plane",
   692  				topologyVersion: "v1.2.3",
   693  				controlPlaneObj: nil,
   694  				expectedVersion: "v1.2.3",
   695  			},
   696  			{
   697  				// Control plane is not upgrading implies that controlplane.spec.version is equal to controlplane.status.version.
   698  				// Control plane is not scaling implies that controlplane.spec.replicas is equal to controlplane.status.replicas,
   699  				// Controlplane.status.updatedReplicas and controlplane.status.readyReplicas.
   700  				name:            "should return cluster.spec.topology.version if the control plane is not upgrading and not scaling",
   701  				hookResponse:    nonBlockingBeforeClusterUpgradeResponse,
   702  				topologyVersion: "v1.2.3",
   703  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   704  					WithSpecFields(map[string]interface{}{
   705  						"spec.version":  "v1.2.2",
   706  						"spec.replicas": int64(2),
   707  					}).
   708  					WithStatusFields(map[string]interface{}{
   709  						"status.version":             "v1.2.2",
   710  						"status.replicas":            int64(2),
   711  						"status.updatedReplicas":     int64(2),
   712  						"status.readyReplicas":       int64(2),
   713  						"status.unavailableReplicas": int64(0),
   714  					}).
   715  					Build(),
   716  				expectedVersion: "v1.2.3",
   717  			},
   718  			{
   719  				// Control plane is considered upgrading if controlplane.spec.version is not equal to controlplane.status.version.
   720  				name:            "should return controlplane.spec.version if the control plane is upgrading",
   721  				topologyVersion: "v1.2.3",
   722  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   723  					WithSpecFields(map[string]interface{}{
   724  						"spec.version": "v1.2.2",
   725  					}).
   726  					WithStatusFields(map[string]interface{}{
   727  						"status.version": "v1.2.1",
   728  					}).
   729  					Build(),
   730  				expectedVersion: "v1.2.2",
   731  			},
   732  			{
   733  				// Control plane is considered scaling if controlplane.spec.replicas is not equal to any of
   734  				// controlplane.status.replicas, controlplane.status.readyReplicas, controlplane.status.updatedReplicas.
   735  				name:            "should return controlplane.spec.version if the control plane is scaling",
   736  				topologyVersion: "v1.2.3",
   737  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   738  					WithSpecFields(map[string]interface{}{
   739  						"spec.version":  "v1.2.2",
   740  						"spec.replicas": int64(2),
   741  					}).
   742  					WithStatusFields(map[string]interface{}{
   743  						"status.version":             "v1.2.2",
   744  						"status.replicas":            int64(1),
   745  						"status.updatedReplicas":     int64(1),
   746  						"status.readyReplicas":       int64(1),
   747  						"status.unavailableReplicas": int64(0),
   748  					}).
   749  					Build(),
   750  				expectedVersion: "v1.2.2",
   751  			},
   752  			{
   753  				name:            "should return controlplane.spec.version if control plane is not upgrading and not scaling and one of the MachineDeployments and one of the MachinePools is upgrading",
   754  				topologyVersion: "v1.2.3",
   755  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   756  					WithSpecFields(map[string]interface{}{
   757  						"spec.version":  "v1.2.2",
   758  						"spec.replicas": int64(2),
   759  					}).
   760  					WithStatusFields(map[string]interface{}{
   761  						"status.version":             "v1.2.2",
   762  						"status.replicas":            int64(2),
   763  						"status.updatedReplicas":     int64(2),
   764  						"status.readyReplicas":       int64(2),
   765  						"status.unavailableReplicas": int64(0),
   766  					}).
   767  					Build(),
   768  				upgradingMachineDeployments: []string{"md1"},
   769  				upgradingMachinePools:       []string{"mp1"},
   770  				expectedVersion:             "v1.2.2",
   771  			},
   772  			{
   773  				name:            "should return cluster.spec.topology.version if control plane is not upgrading and not scaling and none of the MachineDeployments and MachinePools are upgrading - hook returns non blocking response",
   774  				hookResponse:    nonBlockingBeforeClusterUpgradeResponse,
   775  				topologyVersion: "v1.2.3",
   776  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   777  					WithSpecFields(map[string]interface{}{
   778  						"spec.version":  "v1.2.2",
   779  						"spec.replicas": int64(2),
   780  					}).
   781  					WithStatusFields(map[string]interface{}{
   782  						"status.version":             "v1.2.2",
   783  						"status.replicas":            int64(2),
   784  						"status.updatedReplicas":     int64(2),
   785  						"status.readyReplicas":       int64(2),
   786  						"status.unavailableReplicas": int64(0),
   787  					}).
   788  					Build(),
   789  				upgradingMachineDeployments: []string{},
   790  				upgradingMachinePools:       []string{},
   791  				expectedVersion:             "v1.2.3",
   792  			},
   793  			{
   794  				name:            "should return the controlplane.spec.version if the BeforeClusterUpgrade hooks returns a blocking response",
   795  				hookResponse:    blockingBeforeClusterUpgradeResponse,
   796  				topologyVersion: "v1.2.3",
   797  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   798  					WithSpecFields(map[string]interface{}{
   799  						"spec.version":  "v1.2.2",
   800  						"spec.replicas": int64(2),
   801  					}).
   802  					WithStatusFields(map[string]interface{}{
   803  						"status.version":             "v1.2.2",
   804  						"status.replicas":            int64(2),
   805  						"status.updatedReplicas":     int64(2),
   806  						"status.readyReplicas":       int64(2),
   807  						"status.unavailableReplicas": int64(0),
   808  					}).
   809  					Build(),
   810  				expectedVersion: "v1.2.2",
   811  			},
   812  			{
   813  				name:            "should fail if the BeforeClusterUpgrade hooks returns a failure response",
   814  				hookResponse:    failureBeforeClusterUpgradeResponse,
   815  				topologyVersion: "v1.2.3",
   816  				controlPlaneObj: builder.ControlPlane("test1", "cp1").
   817  					WithSpecFields(map[string]interface{}{
   818  						"spec.version":  "v1.2.2",
   819  						"spec.replicas": int64(2),
   820  					}).
   821  					WithStatusFields(map[string]interface{}{
   822  						"status.version":             "v1.2.2",
   823  						"status.replicas":            int64(2),
   824  						"status.updatedReplicas":     int64(2),
   825  						"status.readyReplicas":       int64(2),
   826  						"status.unavailableReplicas": int64(0),
   827  					}).
   828  					Build(),
   829  				expectedVersion: "v1.2.2",
   830  				wantErr:         true,
   831  			},
   832  		}
   833  		for _, tt := range tests {
   834  			t.Run(tt.name, func(t *testing.T) {
   835  				g := NewWithT(t)
   836  
   837  				s := &scope.Scope{
   838  					Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{
   839  						Version: tt.topologyVersion,
   840  						ControlPlane: clusterv1.ControlPlaneTopology{
   841  							Replicas: ptr.To[int32](2),
   842  						},
   843  					}},
   844  					Current: &scope.ClusterState{
   845  						Cluster: &clusterv1.Cluster{
   846  							ObjectMeta: metav1.ObjectMeta{
   847  								Name:      "test-cluster",
   848  								Namespace: "test-ns",
   849  							},
   850  						},
   851  						ControlPlane: &scope.ControlPlaneState{Object: tt.controlPlaneObj},
   852  					},
   853  					UpgradeTracker:      scope.NewUpgradeTracker(),
   854  					HookResponseTracker: scope.NewHookResponseTracker(),
   855  				}
   856  				if len(tt.upgradingMachineDeployments) > 0 {
   857  					s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...)
   858  				}
   859  				if len(tt.upgradingMachinePools) > 0 {
   860  					s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...)
   861  				}
   862  
   863  				runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
   864  					WithCatalog(catalog).
   865  					WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
   866  						beforeClusterUpgradeGVH: tt.hookResponse,
   867  					}).
   868  					Build()
   869  
   870  				fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(s.Current.Cluster).Build()
   871  
   872  				r := &generator{
   873  					Client:        fakeClient,
   874  					RuntimeClient: runtimeClient,
   875  				}
   876  				version, err := r.computeControlPlaneVersion(ctx, s)
   877  				if tt.wantErr {
   878  					g.Expect(err).To(HaveOccurred())
   879  				} else {
   880  					g.Expect(err).ToNot(HaveOccurred())
   881  					g.Expect(version).To(Equal(tt.expectedVersion))
   882  					// Verify that if the upgrade is pending it is captured in the upgrade tracker.
   883  					upgradePending := tt.expectedVersion != tt.topologyVersion
   884  					g.Expect(s.UpgradeTracker.ControlPlane.IsPendingUpgrade).To(Equal(upgradePending))
   885  				}
   886  			})
   887  		}
   888  	})
   889  
   890  	t.Run("Calling AfterControlPlaneUpgrade hook", func(t *testing.T) {
   891  		defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
   892  
   893  		catalog := runtimecatalog.New()
   894  		_ = runtimehooksv1.AddToCatalog(catalog)
   895  
   896  		afterControlPlaneUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterControlPlaneUpgrade)
   897  		if err != nil {
   898  			panic(err)
   899  		}
   900  
   901  		blockingResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{
   902  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   903  				RetryAfterSeconds: int32(10),
   904  				CommonResponse: runtimehooksv1.CommonResponse{
   905  					Status: runtimehooksv1.ResponseStatusSuccess,
   906  				},
   907  			},
   908  		}
   909  		nonBlockingResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{
   910  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   911  				RetryAfterSeconds: int32(0),
   912  				CommonResponse: runtimehooksv1.CommonResponse{
   913  					Status: runtimehooksv1.ResponseStatusSuccess,
   914  				},
   915  			},
   916  		}
   917  		failureResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{
   918  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
   919  				CommonResponse: runtimehooksv1.CommonResponse{
   920  					Status: runtimehooksv1.ResponseStatusFailure,
   921  				},
   922  			},
   923  		}
   924  
   925  		topologyVersion := "v1.2.3"
   926  		lowerVersion := "v1.2.2"
   927  		controlPlaneStable := builder.ControlPlane("test-ns", "cp1").
   928  			WithSpecFields(map[string]interface{}{
   929  				"spec.version":  topologyVersion,
   930  				"spec.replicas": int64(2),
   931  			}).
   932  			WithStatusFields(map[string]interface{}{
   933  				"status.version":         topologyVersion,
   934  				"status.replicas":        int64(2),
   935  				"status.updatedReplicas": int64(2),
   936  				"status.readyReplicas":   int64(2),
   937  			}).
   938  			Build()
   939  
   940  		controlPlaneUpgrading := builder.ControlPlane("test-ns", "cp1").
   941  			WithSpecFields(map[string]interface{}{
   942  				"spec.version":  topologyVersion,
   943  				"spec.replicas": int64(2),
   944  			}).
   945  			WithStatusFields(map[string]interface{}{
   946  				"status.version":         lowerVersion,
   947  				"status.replicas":        int64(2),
   948  				"status.updatedReplicas": int64(2),
   949  				"status.readyReplicas":   int64(2),
   950  			}).
   951  			Build()
   952  
   953  		controlPlaneProvisioning := builder.ControlPlane("test-ns", "cp1").
   954  			WithSpecFields(map[string]interface{}{
   955  				"spec.version":  "v1.2.2",
   956  				"spec.replicas": int64(2),
   957  			}).
   958  			WithStatusFields(map[string]interface{}{
   959  				"status.version": "",
   960  			}).
   961  			Build()
   962  
   963  		tests := []struct {
   964  			name               string
   965  			s                  *scope.Scope
   966  			hookResponse       *runtimehooksv1.AfterControlPlaneUpgradeResponse
   967  			wantIntentToCall   bool
   968  			wantHookToBeCalled bool
   969  			wantHookToBlock    bool
   970  			wantErr            bool
   971  		}{
   972  			{
   973  				name: "should not call hook if it is not marked",
   974  				s: &scope.Scope{
   975  					Blueprint: &scope.ClusterBlueprint{
   976  						Topology: &clusterv1.Topology{
   977  							Version:      topologyVersion,
   978  							ControlPlane: clusterv1.ControlPlaneTopology{},
   979  						},
   980  					},
   981  					Current: &scope.ClusterState{
   982  						Cluster: &clusterv1.Cluster{
   983  							ObjectMeta: metav1.ObjectMeta{
   984  								Name:      "test-cluster",
   985  								Namespace: "test-ns",
   986  							},
   987  							Spec: clusterv1.ClusterSpec{},
   988  						},
   989  						ControlPlane: &scope.ControlPlaneState{
   990  							Object: controlPlaneStable,
   991  						},
   992  					},
   993  					UpgradeTracker:      scope.NewUpgradeTracker(),
   994  					HookResponseTracker: scope.NewHookResponseTracker(),
   995  				},
   996  				wantIntentToCall:   false,
   997  				wantHookToBeCalled: false,
   998  				wantErr:            false,
   999  			},
  1000  			{
  1001  				name: "should not call hook if the control plane is provisioning - there is intent to call hook",
  1002  				s: &scope.Scope{
  1003  					Blueprint: &scope.ClusterBlueprint{
  1004  						Topology: &clusterv1.Topology{
  1005  							Version:      topologyVersion,
  1006  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1007  						},
  1008  					},
  1009  					Current: &scope.ClusterState{
  1010  						Cluster: &clusterv1.Cluster{
  1011  							ObjectMeta: metav1.ObjectMeta{
  1012  								Name:      "test-cluster",
  1013  								Namespace: "test-ns",
  1014  								Annotations: map[string]string{
  1015  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1016  								},
  1017  							},
  1018  							Spec: clusterv1.ClusterSpec{},
  1019  						},
  1020  						ControlPlane: &scope.ControlPlaneState{
  1021  							Object: controlPlaneProvisioning,
  1022  						},
  1023  					},
  1024  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1025  					HookResponseTracker: scope.NewHookResponseTracker(),
  1026  				},
  1027  				wantIntentToCall:   true,
  1028  				wantHookToBeCalled: false,
  1029  				wantErr:            false,
  1030  			},
  1031  			{
  1032  				name: "should not call hook if the control plane is upgrading - there is intent to call hook",
  1033  				s: &scope.Scope{
  1034  					Blueprint: &scope.ClusterBlueprint{
  1035  						Topology: &clusterv1.Topology{
  1036  							Version:      topologyVersion,
  1037  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1038  						},
  1039  					},
  1040  					Current: &scope.ClusterState{
  1041  						Cluster: &clusterv1.Cluster{
  1042  							ObjectMeta: metav1.ObjectMeta{
  1043  								Name:      "test-cluster",
  1044  								Namespace: "test-ns",
  1045  								Annotations: map[string]string{
  1046  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1047  								},
  1048  							},
  1049  							Spec: clusterv1.ClusterSpec{},
  1050  						},
  1051  						ControlPlane: &scope.ControlPlaneState{
  1052  							Object: controlPlaneUpgrading,
  1053  						},
  1054  					},
  1055  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1056  					HookResponseTracker: scope.NewHookResponseTracker(),
  1057  				},
  1058  				wantIntentToCall:   true,
  1059  				wantHookToBeCalled: false,
  1060  				wantErr:            false,
  1061  			},
  1062  			{
  1063  				name: "should call hook if the control plane is at desired version - non blocking response should remove hook from pending hooks list and allow MD upgrades",
  1064  				s: &scope.Scope{
  1065  					Blueprint: &scope.ClusterBlueprint{
  1066  						Topology: &clusterv1.Topology{
  1067  							Version:      topologyVersion,
  1068  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1069  						},
  1070  					},
  1071  					Current: &scope.ClusterState{
  1072  						Cluster: &clusterv1.Cluster{
  1073  							ObjectMeta: metav1.ObjectMeta{
  1074  								Name:      "test-cluster",
  1075  								Namespace: "test-ns",
  1076  								Annotations: map[string]string{
  1077  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1078  								},
  1079  							},
  1080  							Spec: clusterv1.ClusterSpec{},
  1081  						},
  1082  						ControlPlane: &scope.ControlPlaneState{
  1083  							Object: controlPlaneStable,
  1084  						},
  1085  					},
  1086  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1087  					HookResponseTracker: scope.NewHookResponseTracker(),
  1088  				},
  1089  				hookResponse:       nonBlockingResponse,
  1090  				wantIntentToCall:   false,
  1091  				wantHookToBeCalled: true,
  1092  				wantHookToBlock:    false,
  1093  				wantErr:            false,
  1094  			},
  1095  			{
  1096  				name: "should call hook if the control plane is at desired version - blocking response should leave the hook in pending hooks list and block MD upgrades",
  1097  				s: &scope.Scope{
  1098  					Blueprint: &scope.ClusterBlueprint{
  1099  						Topology: &clusterv1.Topology{
  1100  							Version:      topologyVersion,
  1101  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1102  						},
  1103  					},
  1104  					Current: &scope.ClusterState{
  1105  						Cluster: &clusterv1.Cluster{
  1106  							ObjectMeta: metav1.ObjectMeta{
  1107  								Name:      "test-cluster",
  1108  								Namespace: "test-ns",
  1109  								Annotations: map[string]string{
  1110  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1111  								},
  1112  							},
  1113  							Spec: clusterv1.ClusterSpec{},
  1114  						},
  1115  						ControlPlane: &scope.ControlPlaneState{
  1116  							Object: controlPlaneStable,
  1117  						},
  1118  					},
  1119  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1120  					HookResponseTracker: scope.NewHookResponseTracker(),
  1121  				},
  1122  				hookResponse:       blockingResponse,
  1123  				wantIntentToCall:   true,
  1124  				wantHookToBeCalled: true,
  1125  				wantHookToBlock:    true,
  1126  				wantErr:            false,
  1127  			},
  1128  			{
  1129  				name: "should call hook if the control plane is at desired version - failure response should leave the hook in pending hooks list",
  1130  				s: &scope.Scope{
  1131  					Blueprint: &scope.ClusterBlueprint{
  1132  						Topology: &clusterv1.Topology{
  1133  							Version:      topologyVersion,
  1134  							ControlPlane: clusterv1.ControlPlaneTopology{},
  1135  						},
  1136  					},
  1137  					Current: &scope.ClusterState{
  1138  						Cluster: &clusterv1.Cluster{
  1139  							ObjectMeta: metav1.ObjectMeta{
  1140  								Name:      "test-cluster",
  1141  								Namespace: "test-ns",
  1142  								Annotations: map[string]string{
  1143  									runtimev1.PendingHooksAnnotation: "AfterControlPlaneUpgrade",
  1144  								},
  1145  							},
  1146  							Spec: clusterv1.ClusterSpec{},
  1147  						},
  1148  						ControlPlane: &scope.ControlPlaneState{
  1149  							Object: controlPlaneStable,
  1150  						},
  1151  					},
  1152  					UpgradeTracker:      scope.NewUpgradeTracker(),
  1153  					HookResponseTracker: scope.NewHookResponseTracker(),
  1154  				},
  1155  				hookResponse:       failureResponse,
  1156  				wantIntentToCall:   true,
  1157  				wantHookToBeCalled: true,
  1158  				wantErr:            true,
  1159  			},
  1160  		}
  1161  
  1162  		for _, tt := range tests {
  1163  			t.Run(tt.name, func(t *testing.T) {
  1164  				g := NewWithT(t)
  1165  
  1166  				fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
  1167  					WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
  1168  						afterControlPlaneUpgradeGVH: tt.hookResponse,
  1169  					}).
  1170  					WithCatalog(catalog).
  1171  					Build()
  1172  
  1173  				fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(tt.s.Current.Cluster).Build()
  1174  
  1175  				r := &generator{
  1176  					Client:        fakeClient,
  1177  					RuntimeClient: fakeRuntimeClient,
  1178  				}
  1179  
  1180  				_, err := r.computeControlPlaneVersion(ctx, tt.s)
  1181  				g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.AfterControlPlaneUpgrade) == 1).To(Equal(tt.wantHookToBeCalled))
  1182  				g.Expect(hooks.IsPending(runtimehooksv1.AfterControlPlaneUpgrade, tt.s.Current.Cluster)).To(Equal(tt.wantIntentToCall))
  1183  				g.Expect(err != nil).To(Equal(tt.wantErr))
  1184  				if tt.wantHookToBeCalled && !tt.wantErr {
  1185  					g.Expect(tt.s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade)).To(Equal(tt.wantHookToBlock))
  1186  				}
  1187  			})
  1188  		}
  1189  	})
  1190  
  1191  	t.Run("register intent to call AfterClusterUpgrade and AfterControlPlaneUpgrade hooks", func(t *testing.T) {
  1192  		defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
  1193  
  1194  		catalog := runtimecatalog.New()
  1195  		_ = runtimehooksv1.AddToCatalog(catalog)
  1196  		beforeClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterUpgrade)
  1197  		if err != nil {
  1198  			panic("unable to compute GVH")
  1199  		}
  1200  		beforeClusterUpgradeNonBlockingResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{
  1201  			CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
  1202  				CommonResponse: runtimehooksv1.CommonResponse{
  1203  					Status: runtimehooksv1.ResponseStatusSuccess,
  1204  				},
  1205  			},
  1206  		}
  1207  
  1208  		controlPlaneStable := builder.ControlPlane("test-ns", "cp1").
  1209  			WithSpecFields(map[string]interface{}{
  1210  				"spec.version":  "v1.2.2",
  1211  				"spec.replicas": int64(2),
  1212  			}).
  1213  			WithStatusFields(map[string]interface{}{
  1214  				"status.version":             "v1.2.2",
  1215  				"status.replicas":            int64(2),
  1216  				"status.updatedReplicas":     int64(2),
  1217  				"status.readyReplicas":       int64(2),
  1218  				"status.unavailableReplicas": int64(0),
  1219  			}).
  1220  			Build()
  1221  
  1222  		s := &scope.Scope{
  1223  			Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{
  1224  				Version: "v1.2.3",
  1225  				ControlPlane: clusterv1.ControlPlaneTopology{
  1226  					Replicas: ptr.To[int32](2),
  1227  				},
  1228  			}},
  1229  			Current: &scope.ClusterState{
  1230  				Cluster: &clusterv1.Cluster{
  1231  					ObjectMeta: metav1.ObjectMeta{
  1232  						Name:      "test-cluster",
  1233  						Namespace: "test-ns",
  1234  					},
  1235  				},
  1236  				ControlPlane: &scope.ControlPlaneState{Object: controlPlaneStable},
  1237  			},
  1238  			UpgradeTracker:      scope.NewUpgradeTracker(),
  1239  			HookResponseTracker: scope.NewHookResponseTracker(),
  1240  		}
  1241  
  1242  		runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
  1243  			WithCatalog(catalog).
  1244  			WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
  1245  				beforeClusterUpgradeGVH: beforeClusterUpgradeNonBlockingResponse,
  1246  			}).
  1247  			Build()
  1248  
  1249  		fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(s.Current.Cluster).Build()
  1250  
  1251  		r := &generator{
  1252  			Client:        fakeClient,
  1253  			RuntimeClient: runtimeClient,
  1254  		}
  1255  
  1256  		desiredVersion, err := r.computeControlPlaneVersion(ctx, s)
  1257  		g := NewWithT(t)
  1258  		g.Expect(err).ToNot(HaveOccurred())
  1259  		// When successfully picking up the new version the intent to call AfterControlPlaneUpgrade and AfterClusterUpgrade hooks should be registered.
  1260  		g.Expect(desiredVersion).To(Equal("v1.2.3"))
  1261  		g.Expect(hooks.IsPending(runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster)).To(BeTrue())
  1262  		g.Expect(hooks.IsPending(runtimehooksv1.AfterClusterUpgrade, s.Current.Cluster)).To(BeTrue())
  1263  	})
  1264  }
  1265  
  1266  func TestComputeCluster(t *testing.T) {
  1267  	g := NewWithT(t)
  1268  
  1269  	// generated objects
  1270  	infrastructureCluster := builder.InfrastructureCluster(metav1.NamespaceDefault, "infrastructureCluster1").
  1271  		Build()
  1272  	controlPlane := builder.ControlPlane(metav1.NamespaceDefault, "controlplane1").
  1273  		Build()
  1274  
  1275  	// current cluster objects
  1276  	cluster := &clusterv1.Cluster{
  1277  		ObjectMeta: metav1.ObjectMeta{
  1278  			Name:      "cluster1",
  1279  			Namespace: metav1.NamespaceDefault,
  1280  		},
  1281  	}
  1282  
  1283  	// aggregating current cluster objects into ClusterState (simulating getCurrentState)
  1284  	scope := scope.New(cluster)
  1285  
  1286  	obj, err := computeCluster(ctx, scope, infrastructureCluster, controlPlane)
  1287  	g.Expect(err).ToNot(HaveOccurred())
  1288  	g.Expect(obj).ToNot(BeNil())
  1289  
  1290  	// TypeMeta
  1291  	g.Expect(obj.APIVersion).To(Equal(cluster.APIVersion))
  1292  	g.Expect(obj.Kind).To(Equal(cluster.Kind))
  1293  
  1294  	// ObjectMeta
  1295  	g.Expect(obj.Name).To(Equal(cluster.Name))
  1296  	g.Expect(obj.Namespace).To(Equal(cluster.Namespace))
  1297  	g.Expect(obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterNameLabel, cluster.Name))
  1298  	g.Expect(obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyOwnedLabel, ""))
  1299  
  1300  	// Spec
  1301  	g.Expect(obj.Spec.InfrastructureRef).To(BeComparableTo(contract.ObjToRef(infrastructureCluster)))
  1302  	g.Expect(obj.Spec.ControlPlaneRef).To(BeComparableTo(contract.ObjToRef(controlPlane)))
  1303  }
  1304  
  1305  func TestComputeMachineDeployment(t *testing.T) {
  1306  	workerInfrastructureMachineTemplate := builder.InfrastructureMachineTemplate(metav1.NamespaceDefault, "linux-worker-inframachinetemplate").
  1307  		Build()
  1308  	workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstraptemplate").
  1309  		Build()
  1310  	labels := map[string]string{"fizzLabel": "buzz", "fooLabel": "bar"}
  1311  	annotations := map[string]string{"fizzAnnotation": "buzz", "fooAnnotation": "bar"}
  1312  
  1313  	unhealthyConditions := []clusterv1.UnhealthyCondition{
  1314  		{
  1315  			Type:    corev1.NodeReady,
  1316  			Status:  corev1.ConditionUnknown,
  1317  			Timeout: metav1.Duration{Duration: 5 * time.Minute},
  1318  		},
  1319  		{
  1320  			Type:    corev1.NodeReady,
  1321  			Status:  corev1.ConditionFalse,
  1322  			Timeout: metav1.Duration{Duration: 5 * time.Minute},
  1323  		},
  1324  	}
  1325  	nodeTimeoutDuration := &metav1.Duration{Duration: time.Duration(1)}
  1326  
  1327  	clusterClassFailureDomain := "A"
  1328  	clusterClassDuration := metav1.Duration{Duration: 20 * time.Second}
  1329  	var clusterClassMinReadySeconds int32 = 20
  1330  	clusterClassStrategy := clusterv1.MachineDeploymentStrategy{
  1331  		Type: clusterv1.OnDeleteMachineDeploymentStrategyType,
  1332  	}
  1333  	md1 := builder.MachineDeploymentClass("linux-worker").
  1334  		WithLabels(labels).
  1335  		WithAnnotations(annotations).
  1336  		WithInfrastructureTemplate(workerInfrastructureMachineTemplate).
  1337  		WithBootstrapTemplate(workerBootstrapTemplate).
  1338  		WithMachineHealthCheckClass(&clusterv1.MachineHealthCheckClass{
  1339  			UnhealthyConditions: unhealthyConditions,
  1340  			NodeStartupTimeout:  nodeTimeoutDuration,
  1341  		}).
  1342  		WithFailureDomain(&clusterClassFailureDomain).
  1343  		WithNodeDrainTimeout(&clusterClassDuration).
  1344  		WithNodeVolumeDetachTimeout(&clusterClassDuration).
  1345  		WithNodeDeletionTimeout(&clusterClassDuration).
  1346  		WithMinReadySeconds(&clusterClassMinReadySeconds).
  1347  		WithStrategy(&clusterClassStrategy).
  1348  		Build()
  1349  	mcds := []clusterv1.MachineDeploymentClass{*md1}
  1350  	fakeClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
  1351  		WithWorkerMachineDeploymentClasses(mcds...).
  1352  		Build()
  1353  
  1354  	version := "v1.21.2"
  1355  	cluster := &clusterv1.Cluster{
  1356  		ObjectMeta: metav1.ObjectMeta{
  1357  			Name:      "cluster1",
  1358  			Namespace: metav1.NamespaceDefault,
  1359  		},
  1360  		Spec: clusterv1.ClusterSpec{
  1361  			Topology: &clusterv1.Topology{
  1362  				Version: version,
  1363  			},
  1364  		},
  1365  	}
  1366  
  1367  	blueprint := &scope.ClusterBlueprint{
  1368  		Topology:     cluster.Spec.Topology,
  1369  		ClusterClass: fakeClass,
  1370  		MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{
  1371  			"linux-worker": {
  1372  				Metadata: clusterv1.ObjectMeta{
  1373  					Labels:      labels,
  1374  					Annotations: annotations,
  1375  				},
  1376  				BootstrapTemplate:             workerBootstrapTemplate,
  1377  				InfrastructureMachineTemplate: workerInfrastructureMachineTemplate,
  1378  				MachineHealthCheck: &clusterv1.MachineHealthCheckClass{
  1379  					UnhealthyConditions: unhealthyConditions,
  1380  					NodeStartupTimeout: &metav1.Duration{
  1381  						Duration: time.Duration(1)},
  1382  				},
  1383  			},
  1384  		},
  1385  	}
  1386  
  1387  	replicas := int32(5)
  1388  	topologyFailureDomain := "B"
  1389  	topologyDuration := metav1.Duration{Duration: 10 * time.Second}
  1390  	var topologyMinReadySeconds int32 = 10
  1391  	topologyStrategy := clusterv1.MachineDeploymentStrategy{
  1392  		Type: clusterv1.RollingUpdateMachineDeploymentStrategyType,
  1393  	}
  1394  	mdTopology := clusterv1.MachineDeploymentTopology{
  1395  		Metadata: clusterv1.ObjectMeta{
  1396  			Labels: map[string]string{
  1397  				// Should overwrite the label from the MachineDeployment class.
  1398  				"fooLabel": "baz",
  1399  			},
  1400  			Annotations: map[string]string{
  1401  				// Should overwrite the annotation from the MachineDeployment class.
  1402  				"fooAnnotation": "baz",
  1403  				// These annotations should not be propagated to the MachineDeployment.
  1404  				clusterv1.ClusterTopologyDeferUpgradeAnnotation:        "",
  1405  				clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  1406  			},
  1407  		},
  1408  		Class:                   "linux-worker",
  1409  		Name:                    "big-pool-of-machines",
  1410  		Replicas:                &replicas,
  1411  		FailureDomain:           &topologyFailureDomain,
  1412  		NodeDrainTimeout:        &topologyDuration,
  1413  		NodeVolumeDetachTimeout: &topologyDuration,
  1414  		NodeDeletionTimeout:     &topologyDuration,
  1415  		MinReadySeconds:         &topologyMinReadySeconds,
  1416  		Strategy:                &topologyStrategy,
  1417  	}
  1418  
  1419  	t.Run("Generates the machine deployment and the referenced templates", func(t *testing.T) {
  1420  		g := NewWithT(t)
  1421  		scope := scope.New(cluster)
  1422  		scope.Blueprint = blueprint
  1423  
  1424  		e := generator{}
  1425  
  1426  		actual, err := e.computeMachineDeployment(ctx, scope, mdTopology)
  1427  		g.Expect(err).ToNot(HaveOccurred())
  1428  
  1429  		g.Expect(actual.BootstrapTemplate.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachineDeploymentNameLabel, "big-pool-of-machines"))
  1430  
  1431  		// Ensure Cluster ownership is added to generated BootstrapTemplate.
  1432  		g.Expect(actual.BootstrapTemplate.GetOwnerReferences()).To(HaveLen(1))
  1433  		g.Expect(actual.BootstrapTemplate.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
  1434  		g.Expect(actual.BootstrapTemplate.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
  1435  
  1436  		g.Expect(actual.InfrastructureMachineTemplate.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachineDeploymentNameLabel, "big-pool-of-machines"))
  1437  
  1438  		// Ensure Cluster ownership is added to generated InfrastructureMachineTemplate.
  1439  		g.Expect(actual.InfrastructureMachineTemplate.GetOwnerReferences()).To(HaveLen(1))
  1440  		g.Expect(actual.InfrastructureMachineTemplate.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
  1441  		g.Expect(actual.InfrastructureMachineTemplate.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
  1442  
  1443  		actualMd := actual.Object
  1444  		g.Expect(*actualMd.Spec.Replicas).To(Equal(replicas))
  1445  		g.Expect(*actualMd.Spec.MinReadySeconds).To(Equal(topologyMinReadySeconds))
  1446  		g.Expect(*actualMd.Spec.Strategy).To(BeComparableTo(topologyStrategy))
  1447  		g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain))
  1448  		g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration))
  1449  		g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration))
  1450  		g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration))
  1451  		g.Expect(actualMd.Spec.ClusterName).To(Equal("cluster1"))
  1452  		g.Expect(actualMd.Name).To(ContainSubstring("cluster1"))
  1453  		g.Expect(actualMd.Name).To(ContainSubstring("big-pool-of-machines"))
  1454  
  1455  		expectedAnnotations := util.MergeMap(mdTopology.Metadata.Annotations, md1.Template.Metadata.Annotations)
  1456  		delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1457  		delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1458  		g.Expect(actualMd.Annotations).To(Equal(expectedAnnotations))
  1459  		g.Expect(actualMd.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations))
  1460  
  1461  		g.Expect(actualMd.Labels).To(BeComparableTo(util.MergeMap(mdTopology.Metadata.Labels, md1.Template.Metadata.Labels, map[string]string{
  1462  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1463  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1464  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1465  		})))
  1466  		g.Expect(actualMd.Spec.Selector.MatchLabels).To(Equal(map[string]string{
  1467  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1468  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1469  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1470  		}))
  1471  		g.Expect(actualMd.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mdTopology.Metadata.Labels, md1.Template.Metadata.Labels, map[string]string{
  1472  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1473  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1474  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1475  		})))
  1476  
  1477  		g.Expect(actualMd.Spec.Template.Spec.InfrastructureRef.Name).ToNot(Equal("linux-worker-inframachinetemplate"))
  1478  		g.Expect(actualMd.Spec.Template.Spec.Bootstrap.ConfigRef.Name).ToNot(Equal("linux-worker-bootstraptemplate"))
  1479  	})
  1480  	t.Run("Generates the machine deployment and the referenced templates using ClusterClass defaults", func(t *testing.T) {
  1481  		g := NewWithT(t)
  1482  		scope := scope.New(cluster)
  1483  		scope.Blueprint = blueprint
  1484  
  1485  		mdTopology := clusterv1.MachineDeploymentTopology{
  1486  			Metadata: clusterv1.ObjectMeta{
  1487  				Labels: map[string]string{"foo": "baz"},
  1488  			},
  1489  			Class:    "linux-worker",
  1490  			Name:     "big-pool-of-machines",
  1491  			Replicas: &replicas,
  1492  			// missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy
  1493  		}
  1494  
  1495  		e := generator{}
  1496  
  1497  		actual, err := e.computeMachineDeployment(ctx, scope, mdTopology)
  1498  		g.Expect(err).ToNot(HaveOccurred())
  1499  
  1500  		// checking only values from CC defaults
  1501  		actualMd := actual.Object
  1502  		g.Expect(*actualMd.Spec.MinReadySeconds).To(Equal(clusterClassMinReadySeconds))
  1503  		g.Expect(*actualMd.Spec.Strategy).To(BeComparableTo(clusterClassStrategy))
  1504  		g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(clusterClassFailureDomain))
  1505  		g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(clusterClassDuration))
  1506  		g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(clusterClassDuration))
  1507  		g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(clusterClassDuration))
  1508  	})
  1509  
  1510  	t.Run("If there is already a machine deployment, it preserves the object name and the reference names", func(t *testing.T) {
  1511  		g := NewWithT(t)
  1512  		s := scope.New(cluster)
  1513  		s.Blueprint = blueprint
  1514  
  1515  		currentReplicas := int32(3)
  1516  		currentMd := &clusterv1.MachineDeployment{
  1517  			ObjectMeta: metav1.ObjectMeta{
  1518  				Name: "existing-deployment-1",
  1519  			},
  1520  			Spec: clusterv1.MachineDeploymentSpec{
  1521  				Replicas: &currentReplicas,
  1522  				Template: clusterv1.MachineTemplateSpec{
  1523  					Spec: clusterv1.MachineSpec{
  1524  						Version: ptr.To(version),
  1525  						Bootstrap: clusterv1.Bootstrap{
  1526  							ConfigRef: contract.ObjToRef(workerBootstrapTemplate),
  1527  						},
  1528  						InfrastructureRef: *contract.ObjToRef(workerInfrastructureMachineTemplate),
  1529  					},
  1530  				},
  1531  			},
  1532  		}
  1533  		s.Current.MachineDeployments = map[string]*scope.MachineDeploymentState{
  1534  			"big-pool-of-machines": {
  1535  				Object:                        currentMd,
  1536  				BootstrapTemplate:             workerBootstrapTemplate,
  1537  				InfrastructureMachineTemplate: workerInfrastructureMachineTemplate,
  1538  			},
  1539  		}
  1540  
  1541  		e := generator{}
  1542  
  1543  		actual, err := e.computeMachineDeployment(ctx, s, mdTopology)
  1544  		g.Expect(err).ToNot(HaveOccurred())
  1545  
  1546  		actualMd := actual.Object
  1547  
  1548  		g.Expect(*actualMd.Spec.Replicas).NotTo(Equal(currentReplicas))
  1549  		g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain))
  1550  		g.Expect(actualMd.Name).To(Equal("existing-deployment-1"))
  1551  
  1552  		expectedAnnotations := util.MergeMap(mdTopology.Metadata.Annotations, md1.Template.Metadata.Annotations)
  1553  		delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1554  		delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1555  		g.Expect(actualMd.Annotations).To(Equal(expectedAnnotations))
  1556  		g.Expect(actualMd.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations))
  1557  
  1558  		g.Expect(actualMd.Labels).To(BeComparableTo(util.MergeMap(mdTopology.Metadata.Labels, md1.Template.Metadata.Labels, map[string]string{
  1559  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1560  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1561  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1562  		})))
  1563  		g.Expect(actualMd.Spec.Selector.MatchLabels).To(BeComparableTo(map[string]string{
  1564  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1565  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1566  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1567  		}))
  1568  		g.Expect(actualMd.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mdTopology.Metadata.Labels, md1.Template.Metadata.Labels, map[string]string{
  1569  			clusterv1.ClusterNameLabel:                          cluster.Name,
  1570  			clusterv1.ClusterTopologyOwnedLabel:                 "",
  1571  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "big-pool-of-machines",
  1572  		})))
  1573  
  1574  		g.Expect(actualMd.Spec.Template.Spec.InfrastructureRef.Name).To(Equal("linux-worker-inframachinetemplate"))
  1575  		g.Expect(actualMd.Spec.Template.Spec.Bootstrap.ConfigRef.Name).To(Equal("linux-worker-bootstraptemplate"))
  1576  	})
  1577  
  1578  	t.Run("If a machine deployment references a topology class that does not exist, machine deployment generation fails", func(t *testing.T) {
  1579  		g := NewWithT(t)
  1580  		scope := scope.New(cluster)
  1581  		scope.Blueprint = blueprint
  1582  
  1583  		mdTopology = clusterv1.MachineDeploymentTopology{
  1584  			Metadata: clusterv1.ObjectMeta{
  1585  				Labels: map[string]string{"foo": "baz"},
  1586  			},
  1587  			Class: "windows-worker",
  1588  			Name:  "big-pool-of-machines",
  1589  		}
  1590  
  1591  		e := generator{}
  1592  
  1593  		_, err := e.computeMachineDeployment(ctx, scope, mdTopology)
  1594  		g.Expect(err).To(HaveOccurred())
  1595  	})
  1596  
  1597  	t.Run("Should choose the correct version for machine deployment", func(t *testing.T) {
  1598  		controlPlaneStable123 := builder.ControlPlane("test1", "cp1").
  1599  			WithSpecFields(map[string]interface{}{
  1600  				"spec.version":  "v1.2.3",
  1601  				"spec.replicas": int64(2),
  1602  			}).
  1603  			WithStatusFields(map[string]interface{}{
  1604  				"status.version":         "v1.2.3",
  1605  				"status.replicas":        int64(2),
  1606  				"status.updatedReplicas": int64(2),
  1607  				"status.readyReplicas":   int64(2),
  1608  			}).
  1609  			Build()
  1610  
  1611  		// Note: in all the following tests we are setting it up so that the control plane is already
  1612  		// stable at the topology version.
  1613  		// A more extensive list of scenarios is tested in TestComputeMachineDeploymentVersion.
  1614  		tests := []struct {
  1615  			name                        string
  1616  			upgradingMachineDeployments []string
  1617  			currentMDVersion            *string
  1618  			upgradeConcurrency          string
  1619  			topologyVersion             string
  1620  			expectedVersion             string
  1621  		}{
  1622  			{
  1623  				name:                        "use cluster.spec.topology.version if creating a new machine deployment",
  1624  				upgradingMachineDeployments: []string{},
  1625  				upgradeConcurrency:          "1",
  1626  				currentMDVersion:            nil,
  1627  				topologyVersion:             "v1.2.3",
  1628  				expectedVersion:             "v1.2.3",
  1629  			},
  1630  			{
  1631  				name:                        "use cluster.spec.topology.version if creating a new machine deployment while another machine deployment is upgrading",
  1632  				upgradingMachineDeployments: []string{"upgrading-md1"},
  1633  				upgradeConcurrency:          "1",
  1634  				currentMDVersion:            nil,
  1635  				topologyVersion:             "v1.2.3",
  1636  				expectedVersion:             "v1.2.3",
  1637  			},
  1638  			{
  1639  				name:                        "use machine deployment's spec.template.spec.version if one of the machine deployments is upgrading, concurrency limit reached",
  1640  				upgradingMachineDeployments: []string{"upgrading-md1"},
  1641  				upgradeConcurrency:          "1",
  1642  				currentMDVersion:            ptr.To("v1.2.2"),
  1643  				topologyVersion:             "v1.2.3",
  1644  				expectedVersion:             "v1.2.2",
  1645  			},
  1646  			{
  1647  				name:                        "use cluster.spec.topology.version if one of the machine deployments is upgrading, concurrency limit not reached",
  1648  				upgradingMachineDeployments: []string{"upgrading-md1"},
  1649  				upgradeConcurrency:          "2",
  1650  				currentMDVersion:            ptr.To("v1.2.2"),
  1651  				topologyVersion:             "v1.2.3",
  1652  				expectedVersion:             "v1.2.3",
  1653  			},
  1654  		}
  1655  		for _, tt := range tests {
  1656  			t.Run(tt.name, func(t *testing.T) {
  1657  				g := NewWithT(t)
  1658  
  1659  				testCluster := cluster.DeepCopy()
  1660  				if testCluster.Annotations == nil {
  1661  					testCluster.Annotations = map[string]string{}
  1662  				}
  1663  				testCluster.Annotations[clusterv1.ClusterTopologyUpgradeConcurrencyAnnotation] = tt.upgradeConcurrency
  1664  
  1665  				s := scope.New(testCluster)
  1666  				s.Blueprint = blueprint
  1667  				s.Blueprint.Topology.Version = tt.topologyVersion
  1668  				s.Blueprint.Topology.ControlPlane = clusterv1.ControlPlaneTopology{
  1669  					Replicas: ptr.To[int32](2),
  1670  				}
  1671  				s.Blueprint.Topology.Workers = &clusterv1.WorkersTopology{}
  1672  
  1673  				mdsState := scope.MachineDeploymentsStateMap{}
  1674  				if tt.currentMDVersion != nil {
  1675  					// testing a case with an existing machine deployment
  1676  					// add the stable machine deployment to the current machine deployments state
  1677  					md := builder.MachineDeployment("test-namespace", "big-pool-of-machines").
  1678  						WithGeneration(1).
  1679  						WithReplicas(2).
  1680  						WithVersion(*tt.currentMDVersion).
  1681  						WithStatus(clusterv1.MachineDeploymentStatus{
  1682  							ObservedGeneration: 2,
  1683  							Replicas:           2,
  1684  							ReadyReplicas:      2,
  1685  							UpdatedReplicas:    2,
  1686  							AvailableReplicas:  2,
  1687  						}).
  1688  						Build()
  1689  					mdsState = duplicateMachineDeploymentsState(mdsState)
  1690  					mdsState["big-pool-of-machines"] = &scope.MachineDeploymentState{
  1691  						Object: md,
  1692  					}
  1693  				}
  1694  				s.Current.MachineDeployments = mdsState
  1695  				s.Current.ControlPlane = &scope.ControlPlaneState{
  1696  					Object: controlPlaneStable123,
  1697  				}
  1698  
  1699  				mdTopology := clusterv1.MachineDeploymentTopology{
  1700  					Class:    "linux-worker",
  1701  					Name:     "big-pool-of-machines",
  1702  					Replicas: ptr.To[int32](2),
  1703  				}
  1704  				s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...)
  1705  
  1706  				e := generator{}
  1707  
  1708  				obj, err := e.computeMachineDeployment(ctx, s, mdTopology)
  1709  				g.Expect(err).ToNot(HaveOccurred())
  1710  				g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion))
  1711  			})
  1712  		}
  1713  	})
  1714  
  1715  	t.Run("Should correctly generate a MachineHealthCheck for the MachineDeployment", func(t *testing.T) {
  1716  		g := NewWithT(t)
  1717  		scope := scope.New(cluster)
  1718  		scope.Blueprint = blueprint
  1719  		mdTopology := clusterv1.MachineDeploymentTopology{
  1720  			Class: "linux-worker",
  1721  			Name:  "big-pool-of-machines",
  1722  		}
  1723  
  1724  		e := generator{}
  1725  
  1726  		actual, err := e.computeMachineDeployment(ctx, scope, mdTopology)
  1727  		g.Expect(err).ToNot(HaveOccurred())
  1728  		// Check that the ClusterName and selector are set properly for the MachineHealthCheck.
  1729  		g.Expect(actual.MachineHealthCheck.Spec.ClusterName).To(Equal(cluster.Name))
  1730  		g.Expect(actual.MachineHealthCheck.Spec.Selector).To(BeComparableTo(metav1.LabelSelector{MatchLabels: map[string]string{
  1731  			clusterv1.ClusterTopologyOwnedLabel:                 actual.Object.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyOwnedLabel],
  1732  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: actual.Object.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel],
  1733  		}}))
  1734  
  1735  		// Check that the NodeStartupTime is set as expected.
  1736  		g.Expect(actual.MachineHealthCheck.Spec.NodeStartupTimeout).To(Equal(nodeTimeoutDuration))
  1737  
  1738  		// Check that UnhealthyConditions are set as expected.
  1739  		g.Expect(actual.MachineHealthCheck.Spec.UnhealthyConditions).To(BeComparableTo(unhealthyConditions))
  1740  	})
  1741  }
  1742  
  1743  func TestComputeMachinePool(t *testing.T) {
  1744  	workerInfrastructureMachinePool := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "linux-worker-inframachinepool").
  1745  		Build()
  1746  	workerInfrastructureMachinePoolTemplate := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "linux-worker-inframachinepooltemplate").
  1747  		Build()
  1748  	workerBootstrapConfig := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstrap").
  1749  		Build()
  1750  	workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstraptemplate").
  1751  		Build()
  1752  	labels := map[string]string{"fizzLabel": "buzz", "fooLabel": "bar"}
  1753  	annotations := map[string]string{"fizzAnnotation": "buzz", "fooAnnotation": "bar"}
  1754  
  1755  	clusterClassDuration := metav1.Duration{Duration: 20 * time.Second}
  1756  	clusterClassFailureDomains := []string{"A", "B"}
  1757  	var clusterClassMinReadySeconds int32 = 20
  1758  	mp1 := builder.MachinePoolClass("linux-worker").
  1759  		WithLabels(labels).
  1760  		WithAnnotations(annotations).
  1761  		WithInfrastructureTemplate(workerInfrastructureMachinePoolTemplate).
  1762  		WithBootstrapTemplate(workerBootstrapTemplate).
  1763  		WithFailureDomains("A", "B").
  1764  		WithNodeDrainTimeout(&clusterClassDuration).
  1765  		WithNodeVolumeDetachTimeout(&clusterClassDuration).
  1766  		WithNodeDeletionTimeout(&clusterClassDuration).
  1767  		WithMinReadySeconds(&clusterClassMinReadySeconds).
  1768  		Build()
  1769  	mcps := []clusterv1.MachinePoolClass{*mp1}
  1770  	fakeClass := builder.ClusterClass(metav1.NamespaceDefault, "class1").
  1771  		WithWorkerMachinePoolClasses(mcps...).
  1772  		Build()
  1773  
  1774  	version := "v1.21.3"
  1775  	cluster := &clusterv1.Cluster{
  1776  		ObjectMeta: metav1.ObjectMeta{
  1777  			Name:      "cluster1",
  1778  			Namespace: metav1.NamespaceDefault,
  1779  		},
  1780  		Spec: clusterv1.ClusterSpec{
  1781  			Topology: &clusterv1.Topology{
  1782  				Version: version,
  1783  			},
  1784  		},
  1785  	}
  1786  
  1787  	blueprint := &scope.ClusterBlueprint{
  1788  		Topology:     cluster.Spec.Topology,
  1789  		ClusterClass: fakeClass,
  1790  		MachinePools: map[string]*scope.MachinePoolBlueprint{
  1791  			"linux-worker": {
  1792  				Metadata: clusterv1.ObjectMeta{
  1793  					Labels:      labels,
  1794  					Annotations: annotations,
  1795  				},
  1796  				BootstrapTemplate:                 workerBootstrapTemplate,
  1797  				InfrastructureMachinePoolTemplate: workerInfrastructureMachinePoolTemplate,
  1798  			},
  1799  		},
  1800  	}
  1801  
  1802  	replicas := int32(5)
  1803  	topologyFailureDomains := []string{"A", "B"}
  1804  	topologyDuration := metav1.Duration{Duration: 10 * time.Second}
  1805  	var topologyMinReadySeconds int32 = 10
  1806  	mpTopology := clusterv1.MachinePoolTopology{
  1807  		Metadata: clusterv1.ObjectMeta{
  1808  			Labels: map[string]string{
  1809  				// Should overwrite the label from the MachinePool class.
  1810  				"fooLabel": "baz",
  1811  			},
  1812  			Annotations: map[string]string{
  1813  				// Should overwrite the annotation from the MachinePool class.
  1814  				"fooAnnotation": "baz",
  1815  				// These annotations should not be propagated to the MachinePool.
  1816  				clusterv1.ClusterTopologyDeferUpgradeAnnotation:        "",
  1817  				clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  1818  			},
  1819  		},
  1820  		Class:                   "linux-worker",
  1821  		Name:                    "big-pool-of-machines",
  1822  		Replicas:                &replicas,
  1823  		FailureDomains:          topologyFailureDomains,
  1824  		NodeDrainTimeout:        &topologyDuration,
  1825  		NodeVolumeDetachTimeout: &topologyDuration,
  1826  		NodeDeletionTimeout:     &topologyDuration,
  1827  		MinReadySeconds:         &topologyMinReadySeconds,
  1828  	}
  1829  
  1830  	t.Run("Generates the machine pool and the referenced templates", func(t *testing.T) {
  1831  		g := NewWithT(t)
  1832  		scope := scope.New(cluster)
  1833  		scope.Blueprint = blueprint
  1834  
  1835  		e := generator{}
  1836  
  1837  		actual, err := e.computeMachinePool(ctx, scope, mpTopology)
  1838  		g.Expect(err).ToNot(HaveOccurred())
  1839  
  1840  		g.Expect(actual.BootstrapObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines"))
  1841  
  1842  		// Ensure Cluster ownership is added to generated BootstrapObject.
  1843  		g.Expect(actual.BootstrapObject.GetOwnerReferences()).To(HaveLen(1))
  1844  		g.Expect(actual.BootstrapObject.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
  1845  		g.Expect(actual.BootstrapObject.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
  1846  
  1847  		g.Expect(actual.InfrastructureMachinePoolObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines"))
  1848  
  1849  		// Ensure Cluster ownership is added to generated InfrastructureMachinePool.
  1850  		g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()).To(HaveLen(1))
  1851  		g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()[0].Kind).To(Equal("Cluster"))
  1852  		g.Expect(actual.InfrastructureMachinePoolObject.GetOwnerReferences()[0].Name).To(Equal(cluster.Name))
  1853  
  1854  		actualMp := actual.Object
  1855  		g.Expect(*actualMp.Spec.Replicas).To(Equal(replicas))
  1856  		g.Expect(*actualMp.Spec.MinReadySeconds).To(Equal(topologyMinReadySeconds))
  1857  		g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains))
  1858  		g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration))
  1859  		g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration))
  1860  		g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration))
  1861  		g.Expect(actualMp.Spec.ClusterName).To(Equal("cluster1"))
  1862  		g.Expect(actualMp.Name).To(ContainSubstring("cluster1"))
  1863  		g.Expect(actualMp.Name).To(ContainSubstring("big-pool-of-machines"))
  1864  
  1865  		expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations)
  1866  		delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1867  		delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1868  		g.Expect(actualMp.Annotations).To(Equal(expectedAnnotations))
  1869  		g.Expect(actualMp.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations))
  1870  
  1871  		g.Expect(actualMp.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{
  1872  			clusterv1.ClusterNameLabel:                    cluster.Name,
  1873  			clusterv1.ClusterTopologyOwnedLabel:           "",
  1874  			clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines",
  1875  		})))
  1876  		g.Expect(actualMp.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{
  1877  			clusterv1.ClusterNameLabel:                    cluster.Name,
  1878  			clusterv1.ClusterTopologyOwnedLabel:           "",
  1879  			clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines",
  1880  		})))
  1881  
  1882  		g.Expect(actualMp.Spec.Template.Spec.InfrastructureRef.Name).ToNot(Equal("linux-worker-inframachinetemplate"))
  1883  		g.Expect(actualMp.Spec.Template.Spec.Bootstrap.ConfigRef.Name).ToNot(Equal("linux-worker-bootstraptemplate"))
  1884  	})
  1885  	t.Run("Generates the machine pool and the referenced templates using ClusterClass defaults", func(t *testing.T) {
  1886  		g := NewWithT(t)
  1887  		scope := scope.New(cluster)
  1888  		scope.Blueprint = blueprint
  1889  
  1890  		mpTopology := clusterv1.MachinePoolTopology{
  1891  			Metadata: clusterv1.ObjectMeta{
  1892  				Labels: map[string]string{"foo": "baz"},
  1893  			},
  1894  			Class:    "linux-worker",
  1895  			Name:     "big-pool-of-machines",
  1896  			Replicas: &replicas,
  1897  			// missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy
  1898  		}
  1899  
  1900  		e := generator{}
  1901  
  1902  		actual, err := e.computeMachinePool(ctx, scope, mpTopology)
  1903  		g.Expect(err).ToNot(HaveOccurred())
  1904  
  1905  		// checking only values from CC defaults
  1906  		actualMp := actual.Object
  1907  		g.Expect(*actualMp.Spec.MinReadySeconds).To(Equal(clusterClassMinReadySeconds))
  1908  		g.Expect(actualMp.Spec.FailureDomains).To(Equal(clusterClassFailureDomains))
  1909  		g.Expect(*actualMp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(clusterClassDuration))
  1910  		g.Expect(*actualMp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(clusterClassDuration))
  1911  		g.Expect(*actualMp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(clusterClassDuration))
  1912  	})
  1913  
  1914  	t.Run("If there is already a machine pool, it preserves the object name and the reference names", func(t *testing.T) {
  1915  		g := NewWithT(t)
  1916  		s := scope.New(cluster)
  1917  		s.Blueprint = blueprint
  1918  
  1919  		currentReplicas := int32(3)
  1920  		currentMp := &expv1.MachinePool{
  1921  			ObjectMeta: metav1.ObjectMeta{
  1922  				Name: "existing-pool-1",
  1923  			},
  1924  			Spec: expv1.MachinePoolSpec{
  1925  				Replicas: &currentReplicas,
  1926  				Template: clusterv1.MachineTemplateSpec{
  1927  					Spec: clusterv1.MachineSpec{
  1928  						Version: ptr.To(version),
  1929  						Bootstrap: clusterv1.Bootstrap{
  1930  							ConfigRef: contract.ObjToRef(workerBootstrapConfig),
  1931  						},
  1932  						InfrastructureRef: *contract.ObjToRef(workerInfrastructureMachinePool),
  1933  					},
  1934  				},
  1935  			},
  1936  		}
  1937  		s.Current.MachinePools = map[string]*scope.MachinePoolState{
  1938  			"big-pool-of-machines": {
  1939  				Object:                          currentMp,
  1940  				BootstrapObject:                 workerBootstrapConfig,
  1941  				InfrastructureMachinePoolObject: workerInfrastructureMachinePool,
  1942  			},
  1943  		}
  1944  
  1945  		e := generator{}
  1946  
  1947  		actual, err := e.computeMachinePool(ctx, s, mpTopology)
  1948  		g.Expect(err).ToNot(HaveOccurred())
  1949  
  1950  		actualMp := actual.Object
  1951  
  1952  		g.Expect(*actualMp.Spec.Replicas).NotTo(Equal(currentReplicas))
  1953  		g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains))
  1954  		g.Expect(actualMp.Name).To(Equal("existing-pool-1"))
  1955  
  1956  		expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations)
  1957  		delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1958  		delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1959  		g.Expect(actualMp.Annotations).To(Equal(expectedAnnotations))
  1960  		g.Expect(actualMp.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations))
  1961  
  1962  		g.Expect(actualMp.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{
  1963  			clusterv1.ClusterNameLabel:                    cluster.Name,
  1964  			clusterv1.ClusterTopologyOwnedLabel:           "",
  1965  			clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines",
  1966  		})))
  1967  		g.Expect(actualMp.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{
  1968  			clusterv1.ClusterNameLabel:                    cluster.Name,
  1969  			clusterv1.ClusterTopologyOwnedLabel:           "",
  1970  			clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines",
  1971  		})))
  1972  
  1973  		g.Expect(actualMp.Spec.Template.Spec.InfrastructureRef.Name).To(Equal("linux-worker-inframachinepool"))
  1974  		g.Expect(actualMp.Spec.Template.Spec.Bootstrap.ConfigRef.Name).To(Equal("linux-worker-bootstrap"))
  1975  	})
  1976  
  1977  	t.Run("If a machine pool references a topology class that does not exist, machine pool generation fails", func(t *testing.T) {
  1978  		g := NewWithT(t)
  1979  		scope := scope.New(cluster)
  1980  		scope.Blueprint = blueprint
  1981  
  1982  		mpTopology = clusterv1.MachinePoolTopology{
  1983  			Metadata: clusterv1.ObjectMeta{
  1984  				Labels: map[string]string{"foo": "baz"},
  1985  			},
  1986  			Class: "windows-worker",
  1987  			Name:  "big-pool-of-machines",
  1988  		}
  1989  
  1990  		e := generator{}
  1991  
  1992  		_, err := e.computeMachinePool(ctx, scope, mpTopology)
  1993  		g.Expect(err).To(HaveOccurred())
  1994  	})
  1995  
  1996  	t.Run("Should choose the correct version for machine pool", func(t *testing.T) {
  1997  		controlPlaneStable123 := builder.ControlPlane("test1", "cp1").
  1998  			WithSpecFields(map[string]interface{}{
  1999  				"spec.version":  "v1.2.3",
  2000  				"spec.replicas": int64(2),
  2001  			}).
  2002  			WithStatusFields(map[string]interface{}{
  2003  				"status.version":         "v1.2.3",
  2004  				"status.replicas":        int64(2),
  2005  				"status.updatedReplicas": int64(2),
  2006  				"status.readyReplicas":   int64(2),
  2007  			}).
  2008  			Build()
  2009  
  2010  		// Note: in all the following tests we are setting it up so that the control plane is already
  2011  		// stable at the topology version.
  2012  		// A more extensive list of scenarios is tested in TestComputeMachinePoolVersion.
  2013  		tests := []struct {
  2014  			name                  string
  2015  			upgradingMachinePools []string
  2016  			currentMPVersion      *string
  2017  			upgradeConcurrency    string
  2018  			topologyVersion       string
  2019  			expectedVersion       string
  2020  		}{
  2021  			{
  2022  				name:                  "use cluster.spec.topology.version if creating a new machine pool",
  2023  				upgradingMachinePools: []string{},
  2024  				upgradeConcurrency:    "1",
  2025  				currentMPVersion:      nil,
  2026  				topologyVersion:       "v1.2.3",
  2027  				expectedVersion:       "v1.2.3",
  2028  			},
  2029  			{
  2030  				name:                  "use cluster.spec.topology.version if creating a new machine pool while another machine pool is upgrading",
  2031  				upgradingMachinePools: []string{"upgrading-mp1"},
  2032  				upgradeConcurrency:    "1",
  2033  				currentMPVersion:      nil,
  2034  				topologyVersion:       "v1.2.3",
  2035  				expectedVersion:       "v1.2.3",
  2036  			},
  2037  			{
  2038  				name:                  "use machine pool's spec.template.spec.version if one of the machine pools is upgrading, concurrency limit reached",
  2039  				upgradingMachinePools: []string{"upgrading-mp1"},
  2040  				upgradeConcurrency:    "1",
  2041  				currentMPVersion:      ptr.To("v1.2.2"),
  2042  				topologyVersion:       "v1.2.3",
  2043  				expectedVersion:       "v1.2.2",
  2044  			},
  2045  			{
  2046  				name:                  "use cluster.spec.topology.version if one of the machine pools is upgrading, concurrency limit not reached",
  2047  				upgradingMachinePools: []string{"upgrading-mp1"},
  2048  				upgradeConcurrency:    "2",
  2049  				currentMPVersion:      ptr.To("v1.2.2"),
  2050  				topologyVersion:       "v1.2.3",
  2051  				expectedVersion:       "v1.2.3",
  2052  			},
  2053  		}
  2054  		for _, tt := range tests {
  2055  			t.Run(tt.name, func(t *testing.T) {
  2056  				g := NewWithT(t)
  2057  
  2058  				testCluster := cluster.DeepCopy()
  2059  				if testCluster.Annotations == nil {
  2060  					testCluster.Annotations = map[string]string{}
  2061  				}
  2062  				testCluster.Annotations[clusterv1.ClusterTopologyUpgradeConcurrencyAnnotation] = tt.upgradeConcurrency
  2063  
  2064  				s := scope.New(testCluster)
  2065  				s.Blueprint = blueprint
  2066  				s.Blueprint.Topology.Version = tt.topologyVersion
  2067  				s.Blueprint.Topology.ControlPlane = clusterv1.ControlPlaneTopology{
  2068  					Replicas: ptr.To[int32](2),
  2069  				}
  2070  				s.Blueprint.Topology.Workers = &clusterv1.WorkersTopology{}
  2071  
  2072  				mpsState := scope.MachinePoolsStateMap{}
  2073  				if tt.currentMPVersion != nil {
  2074  					// testing a case with an existing machine pool
  2075  					// add the stable machine pool to the current machine pools state
  2076  					mp := builder.MachinePool("test-namespace", "big-pool-of-machines").
  2077  						WithReplicas(2).
  2078  						WithVersion(*tt.currentMPVersion).
  2079  						WithStatus(expv1.MachinePoolStatus{
  2080  							ObservedGeneration: 2,
  2081  							Replicas:           2,
  2082  							ReadyReplicas:      2,
  2083  							AvailableReplicas:  2,
  2084  						}).
  2085  						Build()
  2086  					mpsState = duplicateMachinePoolsState(mpsState)
  2087  					mpsState["big-pool-of-machines"] = &scope.MachinePoolState{
  2088  						Object: mp,
  2089  					}
  2090  				}
  2091  				s.Current.MachinePools = mpsState
  2092  				s.Current.ControlPlane = &scope.ControlPlaneState{
  2093  					Object: controlPlaneStable123,
  2094  				}
  2095  
  2096  				mpTopology := clusterv1.MachinePoolTopology{
  2097  					Class:    "linux-worker",
  2098  					Name:     "big-pool-of-machines",
  2099  					Replicas: ptr.To[int32](2),
  2100  				}
  2101  				s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...)
  2102  
  2103  				e := generator{}
  2104  
  2105  				obj, err := e.computeMachinePool(ctx, s, mpTopology)
  2106  				g.Expect(err).ToNot(HaveOccurred())
  2107  				g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion))
  2108  			})
  2109  		}
  2110  	})
  2111  }
  2112  
  2113  func TestComputeMachineDeploymentVersion(t *testing.T) {
  2114  	controlPlaneObj := builder.ControlPlane("test1", "cp1").
  2115  		Build()
  2116  
  2117  	mdName := "md-1"
  2118  	currentMachineDeploymentState := &scope.MachineDeploymentState{Object: builder.MachineDeployment("test1", mdName).WithVersion("v1.2.2").Build()}
  2119  
  2120  	tests := []struct {
  2121  		name                                 string
  2122  		machineDeploymentTopology            clusterv1.MachineDeploymentTopology
  2123  		currentMachineDeploymentState        *scope.MachineDeploymentState
  2124  		upgradingMachineDeployments          []string
  2125  		upgradeConcurrency                   int
  2126  		controlPlaneStartingUpgrade          bool
  2127  		controlPlaneUpgrading                bool
  2128  		controlPlaneScaling                  bool
  2129  		controlPlaneProvisioning             bool
  2130  		afterControlPlaneUpgradeHookBlocking bool
  2131  		topologyVersion                      string
  2132  		expectedVersion                      string
  2133  		expectPendingCreate                  bool
  2134  		expectPendingUpgrade                 bool
  2135  	}{
  2136  		{
  2137  			name:                          "should return cluster.spec.topology.version if creating a new machine deployment and if control plane is stable - not marked as pending create",
  2138  			currentMachineDeploymentState: nil,
  2139  			machineDeploymentTopology: clusterv1.MachineDeploymentTopology{
  2140  				Name: "md-topology-1",
  2141  			},
  2142  			topologyVersion:     "v1.2.3",
  2143  			expectedVersion:     "v1.2.3",
  2144  			expectPendingCreate: false,
  2145  		},
  2146  		{
  2147  			name:                "should return cluster.spec.topology.version if creating a new machine deployment and if control plane is not stable - marked as pending create",
  2148  			controlPlaneScaling: true,
  2149  			machineDeploymentTopology: clusterv1.MachineDeploymentTopology{
  2150  				Name: "md-topology-1",
  2151  			},
  2152  			topologyVersion:     "v1.2.3",
  2153  			expectedVersion:     "v1.2.3",
  2154  			expectPendingCreate: true,
  2155  		},
  2156  		{
  2157  			name: "should return machine deployment's spec.template.spec.version if upgrade is deferred",
  2158  			machineDeploymentTopology: clusterv1.MachineDeploymentTopology{
  2159  				Metadata: clusterv1.ObjectMeta{
  2160  					Annotations: map[string]string{
  2161  						clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2162  					},
  2163  				},
  2164  			},
  2165  			currentMachineDeploymentState: currentMachineDeploymentState,
  2166  			upgradingMachineDeployments:   []string{},
  2167  			topologyVersion:               "v1.2.3",
  2168  			expectedVersion:               "v1.2.2",
  2169  			expectPendingUpgrade:          true,
  2170  		},
  2171  		{
  2172  			// Control plane is considered upgrading if the control plane's spec.version and status.version is not equal.
  2173  			name:                          "should return machine deployment's spec.template.spec.version if control plane is upgrading",
  2174  			currentMachineDeploymentState: currentMachineDeploymentState,
  2175  			upgradingMachineDeployments:   []string{},
  2176  			controlPlaneUpgrading:         true,
  2177  			topologyVersion:               "v1.2.3",
  2178  			expectedVersion:               "v1.2.2",
  2179  			expectPendingUpgrade:          true,
  2180  		},
  2181  		{
  2182  			// Control plane is considered ready to upgrade if spec.version of current and desired control planes are not equal.
  2183  			name:                          "should return machine deployment's spec.template.spec.version if control plane is starting upgrade",
  2184  			currentMachineDeploymentState: currentMachineDeploymentState,
  2185  			upgradingMachineDeployments:   []string{},
  2186  			controlPlaneStartingUpgrade:   true,
  2187  			topologyVersion:               "v1.2.3",
  2188  			expectedVersion:               "v1.2.2",
  2189  			expectPendingUpgrade:          true,
  2190  		},
  2191  		{
  2192  			// Control plane is considered scaling if its spec.replicas is not equal to any of status.replicas, status.readyReplicas or status.updatedReplicas.
  2193  			name:                          "should return machine deployment's spec.template.spec.version if control plane is scaling",
  2194  			currentMachineDeploymentState: currentMachineDeploymentState,
  2195  			upgradingMachineDeployments:   []string{},
  2196  			controlPlaneScaling:           true,
  2197  			topologyVersion:               "v1.2.3",
  2198  			expectedVersion:               "v1.2.2",
  2199  			expectPendingUpgrade:          true,
  2200  		},
  2201  		{
  2202  			name:                          "should return cluster.spec.topology.version if the control plane is not upgrading, not scaling, not ready to upgrade and none of the machine deployments are upgrading",
  2203  			currentMachineDeploymentState: currentMachineDeploymentState,
  2204  			upgradingMachineDeployments:   []string{},
  2205  			topologyVersion:               "v1.2.3",
  2206  			expectedVersion:               "v1.2.3",
  2207  			expectPendingUpgrade:          false,
  2208  		},
  2209  		{
  2210  			name:                                 "should return machine deployment's spec.template.spec.version if control plane is stable, other machine deployments are upgrading, concurrency limit not reached but AfterControlPlaneUpgrade hook is blocking",
  2211  			currentMachineDeploymentState:        currentMachineDeploymentState,
  2212  			upgradingMachineDeployments:          []string{"upgrading-md1"},
  2213  			upgradeConcurrency:                   2,
  2214  			afterControlPlaneUpgradeHookBlocking: true,
  2215  			topologyVersion:                      "v1.2.3",
  2216  			expectedVersion:                      "v1.2.2",
  2217  			expectPendingUpgrade:                 true,
  2218  		},
  2219  		{
  2220  			name:                          "should return cluster.spec.topology.version if control plane is stable, other machine deployments are upgrading, concurrency limit not reached",
  2221  			currentMachineDeploymentState: currentMachineDeploymentState,
  2222  			upgradingMachineDeployments:   []string{"upgrading-md1"},
  2223  			upgradeConcurrency:            2,
  2224  			topologyVersion:               "v1.2.3",
  2225  			expectedVersion:               "v1.2.3",
  2226  			expectPendingUpgrade:          false,
  2227  		},
  2228  		{
  2229  			name:                          "should return machine deployment's spec.template.spec.version if control plane is stable, other machine deployments are upgrading, concurrency limit reached",
  2230  			currentMachineDeploymentState: currentMachineDeploymentState,
  2231  			upgradingMachineDeployments:   []string{"upgrading-md1", "upgrading-md2"},
  2232  			upgradeConcurrency:            2,
  2233  			topologyVersion:               "v1.2.3",
  2234  			expectedVersion:               "v1.2.2",
  2235  			expectPendingUpgrade:          true,
  2236  		},
  2237  	}
  2238  
  2239  	for _, tt := range tests {
  2240  		t.Run(tt.name, func(t *testing.T) {
  2241  			g := NewWithT(t)
  2242  
  2243  			s := &scope.Scope{
  2244  				Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{
  2245  					Version: tt.topologyVersion,
  2246  					ControlPlane: clusterv1.ControlPlaneTopology{
  2247  						Replicas: ptr.To[int32](2),
  2248  					},
  2249  					Workers: &clusterv1.WorkersTopology{},
  2250  				}},
  2251  				Current: &scope.ClusterState{
  2252  					ControlPlane: &scope.ControlPlaneState{Object: controlPlaneObj},
  2253  				},
  2254  				UpgradeTracker:      scope.NewUpgradeTracker(scope.MaxMDUpgradeConcurrency(tt.upgradeConcurrency)),
  2255  				HookResponseTracker: scope.NewHookResponseTracker(),
  2256  			}
  2257  			if tt.afterControlPlaneUpgradeHookBlocking {
  2258  				s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneUpgrade, &runtimehooksv1.AfterControlPlaneUpgradeResponse{
  2259  					CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
  2260  						RetryAfterSeconds: 10,
  2261  					},
  2262  				})
  2263  			}
  2264  			s.UpgradeTracker.ControlPlane.IsStartingUpgrade = tt.controlPlaneStartingUpgrade
  2265  			s.UpgradeTracker.ControlPlane.IsUpgrading = tt.controlPlaneUpgrading
  2266  			s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling
  2267  			s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning
  2268  			s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...)
  2269  
  2270  			e := generator{}
  2271  
  2272  			version := e.computeMachineDeploymentVersion(s, tt.machineDeploymentTopology, tt.currentMachineDeploymentState)
  2273  			g.Expect(version).To(Equal(tt.expectedVersion))
  2274  
  2275  			if tt.currentMachineDeploymentState != nil {
  2276  				// Verify that if the upgrade is pending it is captured in the upgrade tracker.
  2277  				if tt.expectPendingUpgrade {
  2278  					g.Expect(s.UpgradeTracker.MachineDeployments.IsPendingUpgrade(mdName)).To(BeTrue(), "MachineDeployment should be marked as pending upgrade")
  2279  				} else {
  2280  					g.Expect(s.UpgradeTracker.MachineDeployments.IsPendingUpgrade(mdName)).To(BeFalse(), "MachineDeployment should not be marked as pending upgrade")
  2281  				}
  2282  			} else {
  2283  				// Verify that if create the pending it is capture in the tracker.
  2284  				if tt.expectPendingCreate {
  2285  					g.Expect(s.UpgradeTracker.MachineDeployments.IsPendingCreate(tt.machineDeploymentTopology.Name)).To(BeTrue(), "MachineDeployment topology should be marked as pending create")
  2286  				} else {
  2287  					g.Expect(s.UpgradeTracker.MachineDeployments.IsPendingCreate(tt.machineDeploymentTopology.Name)).To(BeFalse(), "MachineDeployment topology should not be marked as pending create")
  2288  				}
  2289  			}
  2290  		})
  2291  	}
  2292  }
  2293  
  2294  func TestComputeMachinePoolVersion(t *testing.T) {
  2295  	controlPlaneObj := builder.ControlPlane("test1", "cp1").
  2296  		Build()
  2297  
  2298  	mpName := "mp-1"
  2299  	currentMachinePoolState := &scope.MachinePoolState{Object: builder.MachinePool("test1", mpName).WithVersion("v1.2.2").Build()}
  2300  
  2301  	tests := []struct {
  2302  		name                                 string
  2303  		machinePoolTopology                  clusterv1.MachinePoolTopology
  2304  		currentMachinePoolState              *scope.MachinePoolState
  2305  		upgradingMachinePools                []string
  2306  		upgradeConcurrency                   int
  2307  		controlPlaneStartingUpgrade          bool
  2308  		controlPlaneUpgrading                bool
  2309  		controlPlaneScaling                  bool
  2310  		controlPlaneProvisioning             bool
  2311  		afterControlPlaneUpgradeHookBlocking bool
  2312  		topologyVersion                      string
  2313  		expectedVersion                      string
  2314  		expectPendingCreate                  bool
  2315  		expectPendingUpgrade                 bool
  2316  	}{
  2317  		{
  2318  			name:                    "should return cluster.spec.topology.version if creating a new MachinePool and if control plane is stable - not marked as pending create",
  2319  			currentMachinePoolState: nil,
  2320  			machinePoolTopology: clusterv1.MachinePoolTopology{
  2321  				Name: "mp-topology-1",
  2322  			},
  2323  			topologyVersion:     "v1.2.3",
  2324  			expectedVersion:     "v1.2.3",
  2325  			expectPendingCreate: false,
  2326  		},
  2327  		{
  2328  			name:                "should return cluster.spec.topology.version if creating a new MachinePool and if control plane is not stable - marked as pending create",
  2329  			controlPlaneScaling: true,
  2330  			machinePoolTopology: clusterv1.MachinePoolTopology{
  2331  				Name: "mp-topology-1",
  2332  			},
  2333  			topologyVersion:     "v1.2.3",
  2334  			expectedVersion:     "v1.2.3",
  2335  			expectPendingCreate: true,
  2336  		},
  2337  		{
  2338  			name: "should return MachinePool's spec.template.spec.version if upgrade is deferred",
  2339  			machinePoolTopology: clusterv1.MachinePoolTopology{
  2340  				Metadata: clusterv1.ObjectMeta{
  2341  					Annotations: map[string]string{
  2342  						clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2343  					},
  2344  				},
  2345  			},
  2346  			currentMachinePoolState: currentMachinePoolState,
  2347  			upgradingMachinePools:   []string{},
  2348  			topologyVersion:         "v1.2.3",
  2349  			expectedVersion:         "v1.2.2",
  2350  			expectPendingUpgrade:    true,
  2351  		},
  2352  		{
  2353  			// Control plane is considered upgrading if the control plane's spec.version and status.version is not equal.
  2354  			name:                    "should return MachinePool's spec.template.spec.version if control plane is upgrading",
  2355  			currentMachinePoolState: currentMachinePoolState,
  2356  			upgradingMachinePools:   []string{},
  2357  			controlPlaneUpgrading:   true,
  2358  			topologyVersion:         "v1.2.3",
  2359  			expectedVersion:         "v1.2.2",
  2360  			expectPendingUpgrade:    true,
  2361  		},
  2362  		{
  2363  			// Control plane is considered ready to upgrade if spec.version of current and desired control planes are not equal.
  2364  			name:                        "should return MachinePool's spec.template.spec.version if control plane is starting upgrade",
  2365  			currentMachinePoolState:     currentMachinePoolState,
  2366  			upgradingMachinePools:       []string{},
  2367  			controlPlaneStartingUpgrade: true,
  2368  			topologyVersion:             "v1.2.3",
  2369  			expectedVersion:             "v1.2.2",
  2370  			expectPendingUpgrade:        true,
  2371  		},
  2372  		{
  2373  			// Control plane is considered scaling if its spec.replicas is not equal to any of status.replicas, status.readyReplicas or status.updatedReplicas.
  2374  			name:                    "should return MachinePool's spec.template.spec.version if control plane is scaling",
  2375  			currentMachinePoolState: currentMachinePoolState,
  2376  			upgradingMachinePools:   []string{},
  2377  			controlPlaneScaling:     true,
  2378  			topologyVersion:         "v1.2.3",
  2379  			expectedVersion:         "v1.2.2",
  2380  			expectPendingUpgrade:    true,
  2381  		},
  2382  		{
  2383  			name:                    "should return cluster.spec.topology.version if the control plane is not upgrading, not scaling, not ready to upgrade and none of the MachinePools are upgrading",
  2384  			currentMachinePoolState: currentMachinePoolState,
  2385  			upgradingMachinePools:   []string{},
  2386  			topologyVersion:         "v1.2.3",
  2387  			expectedVersion:         "v1.2.3",
  2388  			expectPendingUpgrade:    false,
  2389  		},
  2390  		{
  2391  			name:                                 "should return MachinePool's spec.template.spec.version if control plane is stable, other MachinePools are upgrading, concurrency limit not reached but AfterControlPlaneUpgrade hook is blocking",
  2392  			currentMachinePoolState:              currentMachinePoolState,
  2393  			upgradingMachinePools:                []string{"upgrading-mp1"},
  2394  			upgradeConcurrency:                   2,
  2395  			afterControlPlaneUpgradeHookBlocking: true,
  2396  			topologyVersion:                      "v1.2.3",
  2397  			expectedVersion:                      "v1.2.2",
  2398  			expectPendingUpgrade:                 true,
  2399  		},
  2400  		{
  2401  			name:                    "should return cluster.spec.topology.version if control plane is stable, other MachinePools are upgrading, concurrency limit not reached",
  2402  			currentMachinePoolState: currentMachinePoolState,
  2403  			upgradingMachinePools:   []string{"upgrading-mp1"},
  2404  			upgradeConcurrency:      2,
  2405  			topologyVersion:         "v1.2.3",
  2406  			expectedVersion:         "v1.2.3",
  2407  			expectPendingUpgrade:    false,
  2408  		},
  2409  		{
  2410  			name:                    "should return MachinePool's spec.template.spec.version if control plane is stable, other MachinePools are upgrading, concurrency limit reached",
  2411  			currentMachinePoolState: currentMachinePoolState,
  2412  			upgradingMachinePools:   []string{"upgrading-mp1", "upgrading-mp2"},
  2413  			upgradeConcurrency:      2,
  2414  			topologyVersion:         "v1.2.3",
  2415  			expectedVersion:         "v1.2.2",
  2416  			expectPendingUpgrade:    true,
  2417  		},
  2418  	}
  2419  
  2420  	for _, tt := range tests {
  2421  		t.Run(tt.name, func(t *testing.T) {
  2422  			g := NewWithT(t)
  2423  
  2424  			s := &scope.Scope{
  2425  				Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{
  2426  					Version: tt.topologyVersion,
  2427  					ControlPlane: clusterv1.ControlPlaneTopology{
  2428  						Replicas: ptr.To[int32](2),
  2429  					},
  2430  					Workers: &clusterv1.WorkersTopology{},
  2431  				}},
  2432  				Current: &scope.ClusterState{
  2433  					ControlPlane: &scope.ControlPlaneState{Object: controlPlaneObj},
  2434  				},
  2435  				UpgradeTracker:      scope.NewUpgradeTracker(scope.MaxMPUpgradeConcurrency(tt.upgradeConcurrency)),
  2436  				HookResponseTracker: scope.NewHookResponseTracker(),
  2437  			}
  2438  			if tt.afterControlPlaneUpgradeHookBlocking {
  2439  				s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneUpgrade, &runtimehooksv1.AfterControlPlaneUpgradeResponse{
  2440  					CommonRetryResponse: runtimehooksv1.CommonRetryResponse{
  2441  						RetryAfterSeconds: 10,
  2442  					},
  2443  				})
  2444  			}
  2445  			s.UpgradeTracker.ControlPlane.IsStartingUpgrade = tt.controlPlaneStartingUpgrade
  2446  			s.UpgradeTracker.ControlPlane.IsUpgrading = tt.controlPlaneUpgrading
  2447  			s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling
  2448  			s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning
  2449  			s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...)
  2450  
  2451  			e := generator{}
  2452  
  2453  			version := e.computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState)
  2454  			g.Expect(version).To(Equal(tt.expectedVersion))
  2455  
  2456  			if tt.currentMachinePoolState != nil {
  2457  				// Verify that if the upgrade is pending it is captured in the upgrade tracker.
  2458  				if tt.expectPendingUpgrade {
  2459  					g.Expect(s.UpgradeTracker.MachinePools.IsPendingUpgrade(mpName)).To(BeTrue(), "MachinePool should be marked as pending upgrade")
  2460  				} else {
  2461  					g.Expect(s.UpgradeTracker.MachinePools.IsPendingUpgrade(mpName)).To(BeFalse(), "MachinePool should not be marked as pending upgrade")
  2462  				}
  2463  			} else {
  2464  				// Verify that if create the pending it is capture in the tracker.
  2465  				if tt.expectPendingCreate {
  2466  					g.Expect(s.UpgradeTracker.MachinePools.IsPendingCreate(tt.machinePoolTopology.Name)).To(BeTrue(), "MachinePool topology should be marked as pending create")
  2467  				} else {
  2468  					g.Expect(s.UpgradeTracker.MachinePools.IsPendingCreate(tt.machinePoolTopology.Name)).To(BeFalse(), "MachinePool topology should not be marked as pending create")
  2469  				}
  2470  			}
  2471  		})
  2472  	}
  2473  }
  2474  
  2475  func TestIsMachineDeploymentDeferred(t *testing.T) {
  2476  	clusterTopology := &clusterv1.Topology{
  2477  		Workers: &clusterv1.WorkersTopology{
  2478  			MachineDeployments: []clusterv1.MachineDeploymentTopology{
  2479  				{
  2480  					Name: "md-with-defer-upgrade",
  2481  					Metadata: clusterv1.ObjectMeta{
  2482  						Annotations: map[string]string{
  2483  							clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2484  						},
  2485  					},
  2486  				},
  2487  				{
  2488  					Name: "md-without-annotations",
  2489  				},
  2490  				{
  2491  					Name: "md-with-hold-upgrade-sequence",
  2492  					Metadata: clusterv1.ObjectMeta{
  2493  						Annotations: map[string]string{
  2494  							clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  2495  						},
  2496  					},
  2497  				},
  2498  				{
  2499  					Name: "md-after-md-with-hold-upgrade-sequence",
  2500  				},
  2501  			},
  2502  		},
  2503  	}
  2504  
  2505  	tests := []struct {
  2506  		name       string
  2507  		mdTopology clusterv1.MachineDeploymentTopology
  2508  		deferred   bool
  2509  	}{
  2510  		{
  2511  			name: "MD with defer-upgrade annotation is deferred",
  2512  			mdTopology: clusterv1.MachineDeploymentTopology{
  2513  				Name: "md-with-defer-upgrade",
  2514  				Metadata: clusterv1.ObjectMeta{
  2515  					Annotations: map[string]string{
  2516  						clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2517  					},
  2518  				},
  2519  			},
  2520  			deferred: true,
  2521  		},
  2522  		{
  2523  			name: "MD without annotations is not deferred",
  2524  			mdTopology: clusterv1.MachineDeploymentTopology{
  2525  				Name: "md-without-annotations",
  2526  			},
  2527  			deferred: false,
  2528  		},
  2529  		{
  2530  			name: "MD with hold-upgrade-sequence annotation is deferred",
  2531  			mdTopology: clusterv1.MachineDeploymentTopology{
  2532  				Name: "md-with-hold-upgrade-sequence",
  2533  				Metadata: clusterv1.ObjectMeta{
  2534  					Annotations: map[string]string{
  2535  						clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  2536  					},
  2537  				},
  2538  			},
  2539  			deferred: true,
  2540  		},
  2541  		{
  2542  			name: "MD after MD with hold-upgrade-sequence is deferred",
  2543  			mdTopology: clusterv1.MachineDeploymentTopology{
  2544  				Name: "md-after-md-with-hold-upgrade-sequence",
  2545  			},
  2546  			deferred: true,
  2547  		},
  2548  	}
  2549  
  2550  	for _, tt := range tests {
  2551  		t.Run(tt.name, func(t *testing.T) {
  2552  			g := NewWithT(t)
  2553  			g.Expect(isMachineDeploymentDeferred(clusterTopology, tt.mdTopology)).To(Equal(tt.deferred))
  2554  		})
  2555  	}
  2556  }
  2557  
  2558  func TestIsMachinePoolDeferred(t *testing.T) {
  2559  	clusterTopology := &clusterv1.Topology{
  2560  		Workers: &clusterv1.WorkersTopology{
  2561  			MachinePools: []clusterv1.MachinePoolTopology{
  2562  				{
  2563  					Name: "mp-with-defer-upgrade",
  2564  					Metadata: clusterv1.ObjectMeta{
  2565  						Annotations: map[string]string{
  2566  							clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2567  						},
  2568  					},
  2569  				},
  2570  				{
  2571  					Name: "mp-without-annotations",
  2572  				},
  2573  				{
  2574  					Name: "mp-with-hold-upgrade-sequence",
  2575  					Metadata: clusterv1.ObjectMeta{
  2576  						Annotations: map[string]string{
  2577  							clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  2578  						},
  2579  					},
  2580  				},
  2581  				{
  2582  					Name: "mp-after-mp-with-hold-upgrade-sequence",
  2583  				},
  2584  			},
  2585  		},
  2586  	}
  2587  
  2588  	tests := []struct {
  2589  		name       string
  2590  		mpTopology clusterv1.MachinePoolTopology
  2591  		deferred   bool
  2592  	}{
  2593  		{
  2594  			name: "MP with defer-upgrade annotation is deferred",
  2595  			mpTopology: clusterv1.MachinePoolTopology{
  2596  				Name: "mp-with-defer-upgrade",
  2597  				Metadata: clusterv1.ObjectMeta{
  2598  					Annotations: map[string]string{
  2599  						clusterv1.ClusterTopologyDeferUpgradeAnnotation: "",
  2600  					},
  2601  				},
  2602  			},
  2603  			deferred: true,
  2604  		},
  2605  		{
  2606  			name: "MP without annotations is not deferred",
  2607  			mpTopology: clusterv1.MachinePoolTopology{
  2608  				Name: "mp-without-annotations",
  2609  			},
  2610  			deferred: false,
  2611  		},
  2612  		{
  2613  			name: "MP with hold-upgrade-sequence annotation is deferred",
  2614  			mpTopology: clusterv1.MachinePoolTopology{
  2615  				Name: "mp-with-hold-upgrade-sequence",
  2616  				Metadata: clusterv1.ObjectMeta{
  2617  					Annotations: map[string]string{
  2618  						clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation: "",
  2619  					},
  2620  				},
  2621  			},
  2622  			deferred: true,
  2623  		},
  2624  		{
  2625  			name: "MP after mp with hold-upgrade-sequence is deferred",
  2626  			mpTopology: clusterv1.MachinePoolTopology{
  2627  				Name: "mp-after-mp-with-hold-upgrade-sequence",
  2628  			},
  2629  			deferred: true,
  2630  		},
  2631  	}
  2632  
  2633  	for _, tt := range tests {
  2634  		t.Run(tt.name, func(t *testing.T) {
  2635  			g := NewWithT(t)
  2636  			g.Expect(isMachinePoolDeferred(clusterTopology, tt.mpTopology)).To(Equal(tt.deferred))
  2637  		})
  2638  	}
  2639  }
  2640  
  2641  func TestTemplateToObject(t *testing.T) {
  2642  	template := builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infrastructureClusterTemplate").
  2643  		WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}).
  2644  		Build()
  2645  	cluster := &clusterv1.Cluster{
  2646  		ObjectMeta: metav1.ObjectMeta{
  2647  			Name:      "cluster1",
  2648  			Namespace: metav1.NamespaceDefault,
  2649  		},
  2650  	}
  2651  
  2652  	t.Run("Generates an object from a template", func(t *testing.T) {
  2653  		g := NewWithT(t)
  2654  		obj, err := templateToObject(templateToInput{
  2655  			template:              template,
  2656  			templateClonedFromRef: fakeRef1,
  2657  			cluster:               cluster,
  2658  			nameGenerator:         names.SimpleNameGenerator(cluster.Name),
  2659  			currentObjectRef:      nil,
  2660  		})
  2661  		g.Expect(err).ToNot(HaveOccurred())
  2662  		g.Expect(obj).ToNot(BeNil())
  2663  
  2664  		assertTemplateToObject(g, assertTemplateInput{
  2665  			cluster:     cluster,
  2666  			templateRef: fakeRef1,
  2667  			template:    template,
  2668  			currentRef:  nil,
  2669  			obj:         obj,
  2670  		})
  2671  	})
  2672  	t.Run("Overrides the generated name if there is already a reference", func(t *testing.T) {
  2673  		g := NewWithT(t)
  2674  		obj, err := templateToObject(templateToInput{
  2675  			template:              template,
  2676  			templateClonedFromRef: fakeRef1,
  2677  			cluster:               cluster,
  2678  			nameGenerator:         names.SimpleNameGenerator(cluster.Name),
  2679  			currentObjectRef:      fakeRef2,
  2680  		})
  2681  		g.Expect(err).ToNot(HaveOccurred())
  2682  		g.Expect(obj).ToNot(BeNil())
  2683  
  2684  		// ObjectMeta
  2685  		assertTemplateToObject(g, assertTemplateInput{
  2686  			cluster:     cluster,
  2687  			templateRef: fakeRef1,
  2688  			template:    template,
  2689  			currentRef:  fakeRef2,
  2690  			obj:         obj,
  2691  		})
  2692  	})
  2693  }
  2694  
  2695  func TestTemplateToTemplate(t *testing.T) {
  2696  	template := builder.InfrastructureClusterTemplate(metav1.NamespaceDefault, "infrastructureClusterTemplate").
  2697  		WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}).
  2698  		Build()
  2699  	annotations := template.GetAnnotations()
  2700  	if annotations == nil {
  2701  		annotations = map[string]string{}
  2702  	}
  2703  	annotations[corev1.LastAppliedConfigAnnotation] = "foo"
  2704  	template.SetAnnotations(annotations)
  2705  
  2706  	cluster := &clusterv1.Cluster{
  2707  		ObjectMeta: metav1.ObjectMeta{
  2708  			Name:      "cluster1",
  2709  			Namespace: metav1.NamespaceDefault,
  2710  		},
  2711  	}
  2712  
  2713  	t.Run("Generates a template from a template", func(t *testing.T) {
  2714  		g := NewWithT(t)
  2715  		obj, err := templateToTemplate(templateToInput{
  2716  			template:              template,
  2717  			templateClonedFromRef: fakeRef1,
  2718  			cluster:               cluster,
  2719  			nameGenerator:         names.SimpleNameGenerator(cluster.Name),
  2720  			currentObjectRef:      nil,
  2721  		})
  2722  		g.Expect(err).ToNot(HaveOccurred())
  2723  		g.Expect(obj).ToNot(BeNil())
  2724  		assertTemplateToTemplate(g, assertTemplateInput{
  2725  			cluster:     cluster,
  2726  			templateRef: fakeRef1,
  2727  			template:    template,
  2728  			currentRef:  nil,
  2729  			obj:         obj,
  2730  		})
  2731  	})
  2732  	t.Run("Overrides the generated name if there is already a reference", func(t *testing.T) {
  2733  		g := NewWithT(t)
  2734  		obj, err := templateToTemplate(templateToInput{
  2735  			template:              template,
  2736  			templateClonedFromRef: fakeRef1,
  2737  			cluster:               cluster,
  2738  			nameGenerator:         names.SimpleNameGenerator(cluster.Name),
  2739  			currentObjectRef:      fakeRef2,
  2740  		})
  2741  		g.Expect(err).ToNot(HaveOccurred())
  2742  		g.Expect(obj).ToNot(BeNil())
  2743  		assertTemplateToTemplate(g, assertTemplateInput{
  2744  			cluster:     cluster,
  2745  			templateRef: fakeRef1,
  2746  			template:    template,
  2747  			currentRef:  fakeRef2,
  2748  			obj:         obj,
  2749  		})
  2750  	})
  2751  }
  2752  
  2753  type assertTemplateInput struct {
  2754  	cluster             *clusterv1.Cluster
  2755  	templateRef         *corev1.ObjectReference
  2756  	template            *unstructured.Unstructured
  2757  	labels, annotations map[string]string
  2758  	currentRef          *corev1.ObjectReference
  2759  	obj                 *unstructured.Unstructured
  2760  }
  2761  
  2762  func assertTemplateToObject(g *WithT, in assertTemplateInput) {
  2763  	// TypeMeta
  2764  	g.Expect(in.obj.GetAPIVersion()).To(Equal(in.template.GetAPIVersion()))
  2765  	g.Expect(in.obj.GetKind()).To(Equal(strings.TrimSuffix(in.template.GetKind(), "Template")))
  2766  
  2767  	// ObjectMeta
  2768  	if in.currentRef != nil {
  2769  		g.Expect(in.obj.GetName()).To(Equal(in.currentRef.Name))
  2770  	} else {
  2771  		g.Expect(in.obj.GetName()).To(HavePrefix(in.cluster.Name))
  2772  	}
  2773  	g.Expect(in.obj.GetNamespace()).To(Equal(in.cluster.Namespace))
  2774  	g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterNameLabel, in.cluster.Name))
  2775  	g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyOwnedLabel, ""))
  2776  	for k, v := range in.labels {
  2777  		g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(k, v))
  2778  	}
  2779  	g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, in.templateRef.GroupVersionKind().GroupKind().String()))
  2780  	g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, in.templateRef.Name))
  2781  	for k, v := range in.annotations {
  2782  		g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(k, v))
  2783  	}
  2784  	// Spec
  2785  	expectedSpec, ok, err := unstructured.NestedMap(in.template.UnstructuredContent(), "spec", "template", "spec")
  2786  	g.Expect(err).ToNot(HaveOccurred())
  2787  	g.Expect(ok).To(BeTrue())
  2788  
  2789  	cloneSpec, ok, err := unstructured.NestedMap(in.obj.UnstructuredContent(), "spec")
  2790  	g.Expect(err).ToNot(HaveOccurred())
  2791  	g.Expect(ok).To(BeTrue())
  2792  	for k, v := range expectedSpec {
  2793  		g.Expect(cloneSpec).To(HaveKeyWithValue(k, v))
  2794  	}
  2795  }
  2796  
  2797  func assertTemplateToTemplate(g *WithT, in assertTemplateInput) {
  2798  	// TypeMeta
  2799  	g.Expect(in.obj.GetAPIVersion()).To(Equal(in.template.GetAPIVersion()))
  2800  	g.Expect(in.obj.GetKind()).To(Equal(in.template.GetKind()))
  2801  
  2802  	// ObjectMeta
  2803  	if in.currentRef != nil {
  2804  		g.Expect(in.obj.GetName()).To(Equal(in.currentRef.Name))
  2805  	} else {
  2806  		g.Expect(in.obj.GetName()).To(HavePrefix(in.cluster.Name))
  2807  	}
  2808  	g.Expect(in.obj.GetNamespace()).To(Equal(in.cluster.Namespace))
  2809  	g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterNameLabel, in.cluster.Name))
  2810  	g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyOwnedLabel, ""))
  2811  	for k, v := range in.labels {
  2812  		g.Expect(in.obj.GetLabels()).To(HaveKeyWithValue(k, v))
  2813  	}
  2814  	g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, in.templateRef.GroupVersionKind().GroupKind().String()))
  2815  	g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, in.templateRef.Name))
  2816  	g.Expect(in.obj.GetAnnotations()).ToNot(HaveKey(corev1.LastAppliedConfigAnnotation))
  2817  	for k, v := range in.annotations {
  2818  		g.Expect(in.obj.GetAnnotations()).To(HaveKeyWithValue(k, v))
  2819  	}
  2820  	// Spec
  2821  	expectedSpec, ok, err := unstructured.NestedMap(in.template.UnstructuredContent(), "spec")
  2822  	g.Expect(err).ToNot(HaveOccurred())
  2823  	g.Expect(ok).To(BeTrue())
  2824  
  2825  	cloneSpec, ok, err := unstructured.NestedMap(in.obj.UnstructuredContent(), "spec")
  2826  	g.Expect(err).ToNot(HaveOccurred())
  2827  	g.Expect(ok).To(BeTrue())
  2828  	g.Expect(cloneSpec).To(BeComparableTo(expectedSpec))
  2829  }
  2830  
  2831  func assertNestedField(g *WithT, obj *unstructured.Unstructured, value interface{}, fields ...string) {
  2832  	v, ok, err := unstructured.NestedFieldCopy(obj.UnstructuredContent(), fields...)
  2833  
  2834  	g.Expect(err).ToNot(HaveOccurred())
  2835  	g.Expect(ok).To(BeTrue())
  2836  	g.Expect(v).To(BeComparableTo(value))
  2837  }
  2838  
  2839  func assertNestedFieldUnset(g *WithT, obj *unstructured.Unstructured, fields ...string) {
  2840  	_, ok, err := unstructured.NestedFieldCopy(obj.UnstructuredContent(), fields...)
  2841  
  2842  	g.Expect(err).ToNot(HaveOccurred())
  2843  	g.Expect(ok).To(BeFalse())
  2844  }
  2845  
  2846  func duplicateMachineDeploymentsState(s scope.MachineDeploymentsStateMap) scope.MachineDeploymentsStateMap {
  2847  	n := make(scope.MachineDeploymentsStateMap)
  2848  	for k, v := range s {
  2849  		n[k] = v
  2850  	}
  2851  	return n
  2852  }
  2853  
  2854  func duplicateMachinePoolsState(s scope.MachinePoolsStateMap) scope.MachinePoolsStateMap {
  2855  	n := make(scope.MachinePoolsStateMap)
  2856  	for k, v := range s {
  2857  		n[k] = v
  2858  	}
  2859  	return n
  2860  }
  2861  
  2862  func TestMergeMap(t *testing.T) {
  2863  	t.Run("Merge maps", func(t *testing.T) {
  2864  		g := NewWithT(t)
  2865  
  2866  		m := util.MergeMap(
  2867  			map[string]string{
  2868  				"a": "a",
  2869  				"b": "b",
  2870  			}, map[string]string{
  2871  				"a": "ax",
  2872  				"c": "c",
  2873  			},
  2874  		)
  2875  		g.Expect(m).To(HaveKeyWithValue("a", "a"))
  2876  		g.Expect(m).To(HaveKeyWithValue("b", "b"))
  2877  		g.Expect(m).To(HaveKeyWithValue("c", "c"))
  2878  	})
  2879  	t.Run("Nils empty maps", func(t *testing.T) {
  2880  		g := NewWithT(t)
  2881  
  2882  		m := util.MergeMap(map[string]string{}, map[string]string{})
  2883  		g.Expect(m).To(BeNil())
  2884  	})
  2885  }
  2886  
  2887  func Test_computeMachineHealthCheck(t *testing.T) {
  2888  	maxUnhealthyValue := intstr.FromString("100%")
  2889  	mhcSpec := &clusterv1.MachineHealthCheckClass{
  2890  		UnhealthyConditions: []clusterv1.UnhealthyCondition{
  2891  			{
  2892  				Type:    corev1.NodeReady,
  2893  				Status:  corev1.ConditionUnknown,
  2894  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  2895  			},
  2896  			{
  2897  				Type:    corev1.NodeReady,
  2898  				Status:  corev1.ConditionFalse,
  2899  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  2900  			},
  2901  		},
  2902  		NodeStartupTimeout: &metav1.Duration{
  2903  			Duration: time.Duration(1)},
  2904  	}
  2905  	selector := &metav1.LabelSelector{MatchLabels: map[string]string{
  2906  		"foo": "bar",
  2907  	}}
  2908  	healthCheckTarget := builder.MachineDeployment("ns1", "md1").Build()
  2909  	cluster := builder.Cluster("ns1", "cluster1").Build()
  2910  	want := &clusterv1.MachineHealthCheck{
  2911  		TypeMeta: metav1.TypeMeta{
  2912  			APIVersion: clusterv1.GroupVersion.String(),
  2913  			Kind:       "MachineHealthCheck",
  2914  		},
  2915  		ObjectMeta: metav1.ObjectMeta{
  2916  			Name:      "md1",
  2917  			Namespace: "ns1",
  2918  			// Label is added by defaulting values using MachineHealthCheck.Default()
  2919  			Labels: map[string]string{
  2920  				"cluster.x-k8s.io/cluster-name":     "cluster1",
  2921  				clusterv1.ClusterTopologyOwnedLabel: "",
  2922  			},
  2923  			OwnerReferences: []metav1.OwnerReference{
  2924  				*ownerrefs.OwnerReferenceTo(cluster, clusterv1.GroupVersion.WithKind("Cluster")),
  2925  			},
  2926  		},
  2927  		Spec: clusterv1.MachineHealthCheckSpec{
  2928  			ClusterName: cluster.Name,
  2929  			Selector: metav1.LabelSelector{MatchLabels: map[string]string{
  2930  				"foo": "bar",
  2931  			}},
  2932  			// MaxUnhealthy is added by defaulting values using MachineHealthCheck.Default()
  2933  			MaxUnhealthy: &maxUnhealthyValue,
  2934  			UnhealthyConditions: []clusterv1.UnhealthyCondition{
  2935  				{
  2936  					Type:    corev1.NodeReady,
  2937  					Status:  corev1.ConditionUnknown,
  2938  					Timeout: metav1.Duration{Duration: 5 * time.Minute},
  2939  				},
  2940  				{
  2941  					Type:    corev1.NodeReady,
  2942  					Status:  corev1.ConditionFalse,
  2943  					Timeout: metav1.Duration{Duration: 5 * time.Minute},
  2944  				},
  2945  			},
  2946  			NodeStartupTimeout: &metav1.Duration{
  2947  				Duration: time.Duration(1)},
  2948  		},
  2949  	}
  2950  
  2951  	t.Run("set all fields correctly", func(t *testing.T) {
  2952  		g := NewWithT(t)
  2953  
  2954  		got := computeMachineHealthCheck(ctx, healthCheckTarget, selector, cluster, mhcSpec)
  2955  
  2956  		g.Expect(got).To(BeComparableTo(want), cmp.Diff(got, want))
  2957  	})
  2958  }
  2959  
  2960  func TestCalculateRefDesiredAPIVersion(t *testing.T) {
  2961  	tests := []struct {
  2962  		name                    string
  2963  		currentRef              *corev1.ObjectReference
  2964  		desiredReferencedObject *unstructured.Unstructured
  2965  		want                    *corev1.ObjectReference
  2966  		wantErr                 bool
  2967  	}{
  2968  		{
  2969  			name: "Return desired ref if current ref is nil",
  2970  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  2971  				"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  2972  				"kind":       "DockerCluster",
  2973  				"metadata": map[string]interface{}{
  2974  					"name":      "my-cluster-abc",
  2975  					"namespace": metav1.NamespaceDefault,
  2976  				},
  2977  			}},
  2978  			want: &corev1.ObjectReference{
  2979  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  2980  				Kind:       "DockerCluster",
  2981  				Name:       "my-cluster-abc",
  2982  				Namespace:  metav1.NamespaceDefault,
  2983  			},
  2984  		},
  2985  		{
  2986  			name: "Error for invalid apiVersion",
  2987  			currentRef: &corev1.ObjectReference{
  2988  				APIVersion: "invalid/api/version",
  2989  				Kind:       "DockerCluster",
  2990  				Name:       "my-cluster-abc",
  2991  				Namespace:  metav1.NamespaceDefault,
  2992  			},
  2993  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  2994  				"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  2995  				"kind":       "DockerCluster",
  2996  				"metadata": map[string]interface{}{
  2997  					"name":      "my-cluster-abc",
  2998  					"namespace": metav1.NamespaceDefault,
  2999  				},
  3000  			}},
  3001  			wantErr: true,
  3002  		},
  3003  		{
  3004  			name: "Return desired ref if group changed",
  3005  			currentRef: &corev1.ObjectReference{
  3006  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  3007  				Kind:       "DockerCluster",
  3008  				Name:       "my-cluster-abc",
  3009  				Namespace:  metav1.NamespaceDefault,
  3010  			},
  3011  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  3012  				"apiVersion": "infrastructure2.cluster.x-k8s.io/v1beta1",
  3013  				"kind":       "DockerCluster",
  3014  				"metadata": map[string]interface{}{
  3015  					"name":      "my-cluster-abc",
  3016  					"namespace": metav1.NamespaceDefault,
  3017  				},
  3018  			}},
  3019  			want: &corev1.ObjectReference{
  3020  				// Group changed => apiVersion is taken from desired.
  3021  				APIVersion: "infrastructure2.cluster.x-k8s.io/v1beta1",
  3022  				Kind:       "DockerCluster",
  3023  				Name:       "my-cluster-abc",
  3024  				Namespace:  metav1.NamespaceDefault,
  3025  			},
  3026  		},
  3027  		{
  3028  			name: "Return desired ref if kind changed",
  3029  			currentRef: &corev1.ObjectReference{
  3030  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  3031  				Kind:       "DockerCluster",
  3032  				Name:       "my-cluster-abc",
  3033  				Namespace:  metav1.NamespaceDefault,
  3034  			},
  3035  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  3036  				"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  3037  				"kind":       "DockerCluster2",
  3038  				"metadata": map[string]interface{}{
  3039  					"name":      "my-cluster-abc",
  3040  					"namespace": metav1.NamespaceDefault,
  3041  				},
  3042  			}},
  3043  			want: &corev1.ObjectReference{
  3044  				// Kind changed => apiVersion is taken from desired.
  3045  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  3046  				Kind:       "DockerCluster2",
  3047  				Name:       "my-cluster-abc",
  3048  				Namespace:  metav1.NamespaceDefault,
  3049  			},
  3050  		},
  3051  		{
  3052  			name: "Return current apiVersion if group and kind are the same",
  3053  			currentRef: &corev1.ObjectReference{
  3054  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2",
  3055  				Kind:       "DockerCluster",
  3056  				Name:       "my-cluster-abc",
  3057  				Namespace:  metav1.NamespaceDefault,
  3058  			},
  3059  			desiredReferencedObject: &unstructured.Unstructured{Object: map[string]interface{}{
  3060  				"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  3061  				"kind":       "DockerCluster",
  3062  				"metadata": map[string]interface{}{
  3063  					"name":      "my-cluster-abc",
  3064  					"namespace": metav1.NamespaceDefault,
  3065  				},
  3066  			}},
  3067  			want: &corev1.ObjectReference{
  3068  				// Group and kind are the same => apiVersion is taken from currentRef.
  3069  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2",
  3070  				Kind:       "DockerCluster",
  3071  				Name:       "my-cluster-abc",
  3072  				Namespace:  metav1.NamespaceDefault,
  3073  			},
  3074  		},
  3075  	}
  3076  	for _, tt := range tests {
  3077  		t.Run(tt.name, func(t *testing.T) {
  3078  			g := NewWithT(t)
  3079  
  3080  			got, err := calculateRefDesiredAPIVersion(tt.currentRef, tt.desiredReferencedObject)
  3081  			if tt.wantErr {
  3082  				g.Expect(err).To(HaveOccurred())
  3083  				return
  3084  			}
  3085  			g.Expect(err).ToNot(HaveOccurred())
  3086  
  3087  			g.Expect(got).To(BeComparableTo(tt.want))
  3088  		})
  3089  	}
  3090  }