sigs.k8s.io/cluster-api@v1.6.3/internal/controllers/topology/cluster/desired_state.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cluster
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  
    23  	"github.com/pkg/errors"
    24  	corev1 "k8s.io/api/core/v1"
    25  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    26  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    27  	"k8s.io/apimachinery/pkg/runtime/schema"
    28  	"k8s.io/utils/pointer"
    29  	"sigs.k8s.io/controller-runtime/pkg/client"
    30  
    31  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    32  	"sigs.k8s.io/cluster-api/controllers/external"
    33  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    34  	runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
    35  	runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
    36  	"sigs.k8s.io/cluster-api/feature"
    37  	"sigs.k8s.io/cluster-api/internal/contract"
    38  	"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
    39  	"sigs.k8s.io/cluster-api/internal/hooks"
    40  	tlog "sigs.k8s.io/cluster-api/internal/log"
    41  	"sigs.k8s.io/cluster-api/internal/topology/names"
    42  	"sigs.k8s.io/cluster-api/internal/webhooks"
    43  	"sigs.k8s.io/cluster-api/util"
    44  )
    45  
    46  // computeDesiredState computes the desired state of the cluster topology.
    47  // NOTE: We are assuming all the required objects are provided as input; also, in case of any error,
    48  // the entire compute operation will fail. This might be improved in the future if support for reconciling
    49  // subset of a topology will be implemented.
    50  func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) {
    51  	var err error
    52  	desiredState := &scope.ClusterState{
    53  		ControlPlane: &scope.ControlPlaneState{},
    54  	}
    55  
    56  	// Compute the desired state of the InfrastructureCluster object.
    57  	if desiredState.InfrastructureCluster, err = computeInfrastructureCluster(ctx, s); err != nil {
    58  		return nil, errors.Wrapf(err, "failed to compute InfrastructureCluster")
    59  	}
    60  
    61  	// If the clusterClass mandates the controlPlane has infrastructureMachines, compute the InfrastructureMachineTemplate for the ControlPlane.
    62  	if s.Blueprint.HasControlPlaneInfrastructureMachine() {
    63  		if desiredState.ControlPlane.InfrastructureMachineTemplate, err = computeControlPlaneInfrastructureMachineTemplate(ctx, s); err != nil {
    64  			return nil, errors.Wrapf(err, "failed to compute ControlPlane InfrastructureMachineTemplate")
    65  		}
    66  	}
    67  
    68  	// Mark all the MachineDeployments that are currently upgrading.
    69  	// This captured information is used for:
    70  	// - Building the TopologyReconciled condition.
    71  	// - Make upgrade decisions on the control plane.
    72  	// - Making upgrade decisions on machine deployments.
    73  	mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, r.Client)
    74  	if err != nil {
    75  		return nil, errors.Wrap(err, "failed to check if any MachineDeployment is upgrading")
    76  	}
    77  	s.UpgradeTracker.MachineDeployments.MarkUpgrading(mdUpgradingNames...)
    78  
    79  	// Mark all the MachinePools that are currently upgrading.
    80  	// This captured information is used for:
    81  	// - Building the TopologyReconciled condition.
    82  	// - Make upgrade decisions on the control plane.
    83  	// - Making upgrade decisions on machine pools.
    84  	if len(s.Current.MachinePools) > 0 {
    85  		client, err := r.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster))
    86  		if err != nil {
    87  			return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
    88  		}
    89  		// Mark all the MachinePools that are currently upgrading.
    90  		mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, client)
    91  		if err != nil {
    92  			return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
    93  		}
    94  		s.UpgradeTracker.MachinePools.MarkUpgrading(mpUpgradingNames...)
    95  	}
    96  
    97  	// Compute the desired state of the ControlPlane object, eventually adding a reference to the
    98  	// InfrastructureMachineTemplate generated by the previous step.
    99  	if desiredState.ControlPlane.Object, err = r.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil {
   100  		return nil, errors.Wrapf(err, "failed to compute ControlPlane")
   101  	}
   102  
   103  	// Compute the desired state of the ControlPlane MachineHealthCheck if defined.
   104  	// The MachineHealthCheck will have the same name as the ControlPlane Object and a selector for the ControlPlane InfrastructureMachines.
   105  	if s.Blueprint.IsControlPlaneMachineHealthCheckEnabled() {
   106  		desiredState.ControlPlane.MachineHealthCheck = computeMachineHealthCheck(
   107  			ctx,
   108  			desiredState.ControlPlane.Object,
   109  			selectorForControlPlaneMHC(),
   110  			s.Current.Cluster.Name,
   111  			s.Blueprint.ControlPlaneMachineHealthCheckClass())
   112  	}
   113  
   114  	// Compute the desired state for the Cluster object adding a reference to the
   115  	// InfrastructureCluster and the ControlPlane objects generated by the previous step.
   116  	desiredState.Cluster, err = computeCluster(ctx, s, desiredState.InfrastructureCluster, desiredState.ControlPlane.Object)
   117  	if err != nil {
   118  		return nil, errors.Wrapf(err, "failed to compute Cluster")
   119  	}
   120  
   121  	// If required, compute the desired state of the MachineDeployments from the list of MachineDeploymentTopologies
   122  	// defined in the cluster.
   123  	if s.Blueprint.HasMachineDeployments() {
   124  		desiredState.MachineDeployments, err = r.computeMachineDeployments(ctx, s)
   125  		if err != nil {
   126  			return nil, errors.Wrapf(err, "failed to compute MachineDeployments")
   127  		}
   128  	}
   129  
   130  	// If required, compute the desired state of the MachinePools from the list of MachinePoolTopologies
   131  	// defined in the cluster.
   132  	if s.Blueprint.HasMachinePools() {
   133  		desiredState.MachinePools, err = r.computeMachinePools(ctx, s)
   134  		if err != nil {
   135  			return nil, errors.Wrapf(err, "failed to compute MachinePools")
   136  		}
   137  	}
   138  
   139  	// Apply patches the desired state according to the patches from the ClusterClass, variables from the Cluster
   140  	// and builtin variables.
   141  	// NOTE: We have to make sure all spec fields that were explicitly set in desired objects during the computation above
   142  	// are preserved during patching. When desired objects are computed their spec is copied from a template, in some cases
   143  	// further modifications to the spec are made afterwards. In those cases we have to make sure those fields are not overwritten
   144  	// in apply patches. Some examples are .spec.machineTemplate and .spec.version in control planes.
   145  	if err := r.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil {
   146  		return nil, errors.Wrap(err, "failed to apply patches")
   147  	}
   148  
   149  	return desiredState, nil
   150  }
   151  
   152  // computeInfrastructureCluster computes the desired state for the InfrastructureCluster object starting from the
   153  // corresponding template defined in the blueprint.
   154  func computeInfrastructureCluster(_ context.Context, s *scope.Scope) (*unstructured.Unstructured, error) {
   155  	template := s.Blueprint.InfrastructureClusterTemplate
   156  	templateClonedFromRef := s.Blueprint.ClusterClass.Spec.Infrastructure.Ref
   157  	cluster := s.Current.Cluster
   158  	currentRef := cluster.Spec.InfrastructureRef
   159  
   160  	infrastructureCluster, err := templateToObject(templateToInput{
   161  		template:              template,
   162  		templateClonedFromRef: templateClonedFromRef,
   163  		cluster:               cluster,
   164  		nameGenerator:         names.SimpleNameGenerator(fmt.Sprintf("%s-", cluster.Name)),
   165  		currentObjectRef:      currentRef,
   166  		// Note: It is not possible to add an ownerRef to Cluster at this stage, otherwise the provisioning
   167  		// of the infrastructure cluster starts no matter of the object being actually referenced by the Cluster itself.
   168  	})
   169  	if err != nil {
   170  		return nil, errors.Wrapf(err, "failed to generate the InfrastructureCluster object from the %s", template.GetKind())
   171  	}
   172  
   173  	// Carry over shim owner reference if any.
   174  	// NOTE: this prevents to the ownerRef to be deleted by server side apply.
   175  	if s.Current.InfrastructureCluster != nil {
   176  		shim := clusterShim(s.Current.Cluster)
   177  		if ref := getOwnerReferenceFrom(s.Current.InfrastructureCluster, shim); ref != nil {
   178  			infrastructureCluster.SetOwnerReferences([]metav1.OwnerReference{*ref})
   179  		}
   180  	}
   181  
   182  	return infrastructureCluster, nil
   183  }
   184  
   185  // computeControlPlaneInfrastructureMachineTemplate computes the desired state for InfrastructureMachineTemplate
   186  // that should be referenced by the ControlPlane object.
   187  func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scope.Scope) (*unstructured.Unstructured, error) {
   188  	template := s.Blueprint.ControlPlane.InfrastructureMachineTemplate
   189  	templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref
   190  	cluster := s.Current.Cluster
   191  
   192  	// Check if the current control plane object has a machineTemplate.infrastructureRef already defined.
   193  	// TODO: Move the next few lines into a method on scope.ControlPlaneState
   194  	var currentRef *corev1.ObjectReference
   195  	if s.Current.ControlPlane != nil && s.Current.ControlPlane.Object != nil {
   196  		var err error
   197  		if currentRef, err = contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(s.Current.ControlPlane.Object); err != nil {
   198  			return nil, errors.Wrap(err, "failed to get spec.machineTemplate.infrastructureRef for the current ControlPlane object")
   199  		}
   200  	}
   201  
   202  	return templateToTemplate(templateToInput{
   203  		template:              template,
   204  		templateClonedFromRef: templateClonedFromRef,
   205  		cluster:               cluster,
   206  		nameGenerator:         names.SimpleNameGenerator(controlPlaneInfrastructureMachineTemplateNamePrefix(cluster.Name)),
   207  		currentObjectRef:      currentRef,
   208  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
   209  		// in case of errors in between creating this template and updating the Cluster object
   210  		// with the reference to the ControlPlane object using this template.
   211  		ownerRef: ownerReferenceTo(s.Current.Cluster),
   212  	})
   213  }
   214  
   215  // computeControlPlane computes the desired state for the ControlPlane object starting from the
   216  // corresponding template defined in the blueprint.
   217  func (r *Reconciler) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) {
   218  	template := s.Blueprint.ControlPlane.Template
   219  	templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.Ref
   220  	cluster := s.Current.Cluster
   221  	currentRef := cluster.Spec.ControlPlaneRef
   222  
   223  	// Compute the labels and annotations to be applied to ControlPlane metadata and ControlPlane machines.
   224  	// We merge the labels and annotations from topology and ClusterClass.
   225  	// We also add the cluster-name and the topology owned labels, so they are propagated down.
   226  	topologyMetadata := s.Blueprint.Topology.ControlPlane.Metadata
   227  	clusterClassMetadata := s.Blueprint.ClusterClass.Spec.ControlPlane.Metadata
   228  
   229  	controlPlaneLabels := util.MergeMap(topologyMetadata.Labels, clusterClassMetadata.Labels)
   230  	if controlPlaneLabels == nil {
   231  		controlPlaneLabels = map[string]string{}
   232  	}
   233  	controlPlaneLabels[clusterv1.ClusterNameLabel] = cluster.Name
   234  	controlPlaneLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
   235  
   236  	controlPlaneAnnotations := util.MergeMap(topologyMetadata.Annotations, clusterClassMetadata.Annotations)
   237  
   238  	nameTemplate := "{{ .cluster.name }}-{{ .random }}"
   239  	if s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy != nil && s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template != nil {
   240  		nameTemplate = *s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template
   241  	}
   242  
   243  	controlPlane, err := templateToObject(templateToInput{
   244  		template:              template,
   245  		templateClonedFromRef: templateClonedFromRef,
   246  		cluster:               cluster,
   247  		nameGenerator:         names.ControlPlaneNameGenerator(nameTemplate, cluster.Name),
   248  		currentObjectRef:      currentRef,
   249  		labels:                controlPlaneLabels,
   250  		annotations:           controlPlaneAnnotations,
   251  		// Note: It is not possible to add an ownerRef to Cluster at this stage, otherwise the provisioning
   252  		// of the ControlPlane starts no matter of the object being actually referenced by the Cluster itself.
   253  	})
   254  	if err != nil {
   255  		return nil, errors.Wrapf(err, "failed to generate the ControlPlane object from the %s", template.GetKind())
   256  	}
   257  
   258  	// Carry over shim owner reference if any.
   259  	// NOTE: this prevents to the ownerRef to be deleted by server side apply.
   260  	if s.Current.ControlPlane != nil && s.Current.ControlPlane.Object != nil {
   261  		shim := clusterShim(s.Current.Cluster)
   262  		if ref := getOwnerReferenceFrom(s.Current.ControlPlane.Object, shim); ref != nil {
   263  			controlPlane.SetOwnerReferences([]metav1.OwnerReference{*ref})
   264  		}
   265  	}
   266  
   267  	// If the ClusterClass mandates the controlPlane has infrastructureMachines, add a reference to InfrastructureMachine
   268  	// template and metadata to be used for the control plane machines.
   269  	if s.Blueprint.HasControlPlaneInfrastructureMachine() {
   270  		// We have to copy the template to avoid modifying the one from desired state.
   271  		refCopy := infrastructureMachineTemplate.DeepCopy()
   272  
   273  		// If the ControlPlane already exists, avoid downgrading the version if it was bumped
   274  		// by the control plane controller in the meantime.
   275  		if s.Current.ControlPlane.Object != nil {
   276  			currentRef, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(s.Current.ControlPlane.Object)
   277  			if err != nil {
   278  				return nil, errors.Wrapf(err, "failed get spec.machineTemplate.infrastructureRef from the ControlPlane object")
   279  			}
   280  			desiredRef, err := calculateRefDesiredAPIVersion(currentRef, refCopy)
   281  			if err != nil {
   282  				return nil, errors.Wrap(err, "failed to calculate desired spec.machineTemplate.infrastructureRef")
   283  			}
   284  			refCopy.SetAPIVersion(desiredRef.APIVersion)
   285  		}
   286  		if err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Set(controlPlane, refCopy); err != nil {
   287  			return nil, errors.Wrap(err, "failed to spec.machineTemplate.infrastructureRef in the ControlPlane object")
   288  		}
   289  
   290  		// Add the ControlPlane labels and annotations to the ControlPlane machines as well.
   291  		// Note: We have to ensure the machine template metadata copied from the control plane template is not overwritten.
   292  		controlPlaneMachineTemplateMetadata, err := contract.ControlPlane().MachineTemplate().Metadata().Get(controlPlane)
   293  		if err != nil {
   294  			return nil, errors.Wrap(err, "failed to get spec.machineTemplate.metadata from the ControlPlane object")
   295  		}
   296  
   297  		controlPlaneMachineTemplateMetadata.Labels = util.MergeMap(controlPlaneLabels, controlPlaneMachineTemplateMetadata.Labels)
   298  		controlPlaneMachineTemplateMetadata.Annotations = util.MergeMap(controlPlaneAnnotations, controlPlaneMachineTemplateMetadata.Annotations)
   299  
   300  		if err := contract.ControlPlane().MachineTemplate().Metadata().Set(controlPlane,
   301  			&clusterv1.ObjectMeta{
   302  				Labels:      controlPlaneMachineTemplateMetadata.Labels,
   303  				Annotations: controlPlaneMachineTemplateMetadata.Annotations,
   304  			}); err != nil {
   305  			return nil, errors.Wrap(err, "failed to set spec.machineTemplate.metadata in the ControlPlane object")
   306  		}
   307  	}
   308  
   309  	// If it is required to manage the number of replicas for the control plane, set the corresponding field.
   310  	// NOTE: If the Topology.ControlPlane.replicas value is nil, it is assumed that the control plane controller
   311  	// does not implement support for this field and the ControlPlane object is generated without the number of Replicas.
   312  	if s.Blueprint.Topology.ControlPlane.Replicas != nil {
   313  		if err := contract.ControlPlane().Replicas().Set(controlPlane, int64(*s.Blueprint.Topology.ControlPlane.Replicas)); err != nil {
   314  			return nil, errors.Wrap(err, "failed to set spec.replicas in the ControlPlane object")
   315  		}
   316  	}
   317  
   318  	// If it is required to manage the NodeDrainTimeout for the control plane, set the corresponding field.
   319  	nodeDrainTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDrainTimeout
   320  	if s.Blueprint.Topology.ControlPlane.NodeDrainTimeout != nil {
   321  		nodeDrainTimeout = s.Blueprint.Topology.ControlPlane.NodeDrainTimeout
   322  	}
   323  	if nodeDrainTimeout != nil {
   324  		if err := contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Set(controlPlane, *nodeDrainTimeout); err != nil {
   325  			return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeDrainTimeout in the ControlPlane object")
   326  		}
   327  	}
   328  
   329  	// If it is required to manage the NodeVolumeDetachTimeout for the control plane, set the corresponding field.
   330  	nodeVolumeDetachTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeVolumeDetachTimeout
   331  	if s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeout != nil {
   332  		nodeVolumeDetachTimeout = s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeout
   333  	}
   334  	if nodeVolumeDetachTimeout != nil {
   335  		if err := contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Set(controlPlane, *nodeVolumeDetachTimeout); err != nil {
   336  			return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeVolumeDetachTimeout in the ControlPlane object")
   337  		}
   338  	}
   339  
   340  	// If it is required to manage the NodeDeletionTimeout for the control plane, set the corresponding field.
   341  	nodeDeletionTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDeletionTimeout
   342  	if s.Blueprint.Topology.ControlPlane.NodeDeletionTimeout != nil {
   343  		nodeDeletionTimeout = s.Blueprint.Topology.ControlPlane.NodeDeletionTimeout
   344  	}
   345  	if nodeDeletionTimeout != nil {
   346  		if err := contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Set(controlPlane, *nodeDeletionTimeout); err != nil {
   347  			return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeDeletionTimeout in the ControlPlane object")
   348  		}
   349  	}
   350  
   351  	// Sets the desired Kubernetes version for the control plane.
   352  	version, err := r.computeControlPlaneVersion(ctx, s)
   353  	if err != nil {
   354  		return nil, errors.Wrap(err, "failed to compute version of control plane")
   355  	}
   356  	if err := contract.ControlPlane().Version().Set(controlPlane, version); err != nil {
   357  		return nil, errors.Wrap(err, "failed to set spec.version in the ControlPlane object")
   358  	}
   359  
   360  	return controlPlane, nil
   361  }
   362  
   363  // computeControlPlaneVersion calculates the version of the desired control plane.
   364  // The version is calculated using the state of the current machine deployments, the current control plane
   365  // and the version defined in the topology.
   366  func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) {
   367  	log := tlog.LoggerFrom(ctx)
   368  	desiredVersion := s.Blueprint.Topology.Version
   369  	// If we are creating the control plane object (current control plane is nil), use version from topology.
   370  	if s.Current.ControlPlane == nil || s.Current.ControlPlane.Object == nil {
   371  		return desiredVersion, nil
   372  	}
   373  
   374  	// Get the current currentVersion of the control plane.
   375  	currentVersion, err := contract.ControlPlane().Version().Get(s.Current.ControlPlane.Object)
   376  	if err != nil {
   377  		return "", errors.Wrap(err, "failed to get the version from control plane spec")
   378  	}
   379  
   380  	s.UpgradeTracker.ControlPlane.IsPendingUpgrade = true
   381  	if *currentVersion == desiredVersion {
   382  		// Mark that the control plane spec is already at the desired version.
   383  		// This information is used to show the appropriate message for the TopologyReconciled
   384  		// condition.
   385  		s.UpgradeTracker.ControlPlane.IsPendingUpgrade = false
   386  	}
   387  
   388  	// Check if the control plane is being created for the first time.
   389  	cpProvisioning, err := contract.ControlPlane().IsProvisioning(s.Current.ControlPlane.Object)
   390  	if err != nil {
   391  		return "", errors.Wrap(err, "failed to check if the control plane is being provisioned")
   392  	}
   393  	// If the control plane is being provisioned (being craeted for the first time), then do not
   394  	// pick up the desiredVersion yet.
   395  	// Return the current version of the control plane. We will pick up the new version after the
   396  	// control plane is provisioned.
   397  	if cpProvisioning {
   398  		s.UpgradeTracker.ControlPlane.IsProvisioning = true
   399  		return *currentVersion, nil
   400  	}
   401  
   402  	// Check if the current control plane is upgrading
   403  	cpUpgrading, err := contract.ControlPlane().IsUpgrading(s.Current.ControlPlane.Object)
   404  	if err != nil {
   405  		return "", errors.Wrap(err, "failed to check if control plane is upgrading")
   406  	}
   407  	// If the current control plane is upgrading  (still completing a previous upgrade),
   408  	// then do not pick up the desiredVersion yet.
   409  	// Return the current version of the control plane. We will pick up the new version
   410  	// after the control plane is stable.
   411  	if cpUpgrading {
   412  		s.UpgradeTracker.ControlPlane.IsUpgrading = true
   413  		return *currentVersion, nil
   414  	}
   415  
   416  	// Return here if the control plane is already at the desired version
   417  	if !s.UpgradeTracker.ControlPlane.IsPendingUpgrade {
   418  		// At this stage the control plane is not upgrading and is already at the desired version.
   419  		// We can return.
   420  		// Nb. We do not return early in the function if the control plane is already at the desired version so as
   421  		// to know if the control plane is being upgraded. This information
   422  		// is required when updating the TopologyReconciled condition on the cluster.
   423  
   424  		// Call the AfterControlPlaneUpgrade now that the control plane is upgraded.
   425  		if feature.Gates.Enabled(feature.RuntimeSDK) {
   426  			// Call the hook only if we are tracking the intent to do so. If it is not tracked it means we don't need to call the
   427  			// hook because we didn't go through an upgrade or we already called the hook after the upgrade.
   428  			if hooks.IsPending(runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster) {
   429  				// Call all the registered extension for the hook.
   430  				hookRequest := &runtimehooksv1.AfterControlPlaneUpgradeRequest{
   431  					Cluster:           *s.Current.Cluster,
   432  					KubernetesVersion: desiredVersion,
   433  				}
   434  				hookResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{}
   435  				if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil {
   436  					return "", err
   437  				}
   438  				// Add the response to the tracker so we can later update condition or requeue when required.
   439  				s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneUpgrade, hookResponse)
   440  
   441  				// If the extension responds to hold off on starting Machine deployments upgrades,
   442  				// change the UpgradeTracker accordingly, otherwise the hook call is completed and we
   443  				// can remove this hook from the list of pending-hooks.
   444  				if hookResponse.RetryAfterSeconds != 0 {
   445  					log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade))
   446  				} else {
   447  					if err := hooks.MarkAsDone(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {
   448  						return "", err
   449  					}
   450  				}
   451  			}
   452  		}
   453  
   454  		return *currentVersion, nil
   455  	}
   456  
   457  	// If the control plane supports replicas, check if the control plane is in the middle of a scale operation.
   458  	// If yes, then do not pick up the desiredVersion yet. We will pick up the new version after the control plane is stable.
   459  	if s.Blueprint.Topology.ControlPlane.Replicas != nil {
   460  		cpScaling, err := contract.ControlPlane().IsScaling(s.Current.ControlPlane.Object)
   461  		if err != nil {
   462  			return "", errors.Wrap(err, "failed to check if the control plane is scaling")
   463  		}
   464  		if cpScaling {
   465  			s.UpgradeTracker.ControlPlane.IsScaling = true
   466  			return *currentVersion, nil
   467  		}
   468  	}
   469  
   470  	// If the control plane is not upgrading or scaling, we can assume the control plane is stable.
   471  	// However, we should also check for the MachineDeployments/MachinePools upgrading.
   472  	// If the MachineDeployments/MachinePools are upgrading, then do not pick up the desiredVersion yet.
   473  	// We will pick up the new version after the MachineDeployments/MachinePools finish upgrading.
   474  	if len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0 ||
   475  		len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0 {
   476  		return *currentVersion, nil
   477  	}
   478  
   479  	if feature.Gates.Enabled(feature.RuntimeSDK) {
   480  		// At this point the control plane and the machine deployments are stable and we are almost ready to pick
   481  		// up the desiredVersion. Call the BeforeClusterUpgrade hook before picking up the desired version.
   482  		hookRequest := &runtimehooksv1.BeforeClusterUpgradeRequest{
   483  			Cluster:               *s.Current.Cluster,
   484  			FromKubernetesVersion: *currentVersion,
   485  			ToKubernetesVersion:   desiredVersion,
   486  		}
   487  		hookResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{}
   488  		if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil {
   489  			return "", err
   490  		}
   491  		// Add the response to the tracker so we can later update condition or requeue when required.
   492  		s.HookResponseTracker.Add(runtimehooksv1.BeforeClusterUpgrade, hookResponse)
   493  		if hookResponse.RetryAfterSeconds != 0 {
   494  			// Cannot pickup the new version right now. Need to try again later.
   495  			log.Infof("Cluster upgrade to version %q is blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.BeforeClusterUpgrade))
   496  			return *currentVersion, nil
   497  		}
   498  
   499  		// We are picking up the new version here.
   500  		// Track the intent of calling the AfterControlPlaneUpgrade and the AfterClusterUpgrade hooks once we are done with the upgrade.
   501  		if err := hooks.MarkAsPending(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil {
   502  			return "", err
   503  		}
   504  	}
   505  
   506  	// Control plane and machine deployments are stable. All the required hook are called.
   507  	// Ready to pick up the topology version.
   508  	s.UpgradeTracker.ControlPlane.IsPendingUpgrade = false
   509  	s.UpgradeTracker.ControlPlane.IsStartingUpgrade = true
   510  	return desiredVersion, nil
   511  }
   512  
   513  // computeCluster computes the desired state for the Cluster object.
   514  // NOTE: Some fields of the Cluster’s fields contribute to defining the Cluster blueprint (e.g. Cluster.Spec.Topology),
   515  // while some other fields should be managed as part of the actual Cluster (e.g. Cluster.Spec.ControlPlaneRef); in this func
   516  // we are concerned only about the latest group of fields.
   517  func computeCluster(_ context.Context, s *scope.Scope, infrastructureCluster, controlPlane *unstructured.Unstructured) (*clusterv1.Cluster, error) {
   518  	cluster := s.Current.Cluster.DeepCopy()
   519  
   520  	// Enforce the topology labels.
   521  	// NOTE: The cluster label is added at creation time so this object could be read by the ClusterTopology
   522  	// controller immediately after creation, even before other controllers are going to add the label (if missing).
   523  	if cluster.Labels == nil {
   524  		cluster.Labels = map[string]string{}
   525  	}
   526  	cluster.Labels[clusterv1.ClusterNameLabel] = cluster.Name
   527  	cluster.Labels[clusterv1.ClusterTopologyOwnedLabel] = ""
   528  
   529  	// Set the references to the infrastructureCluster and controlPlane objects.
   530  	// NOTE: Once set for the first time, the references are not expected to change.
   531  	var err error
   532  	cluster.Spec.InfrastructureRef, err = calculateRefDesiredAPIVersion(cluster.Spec.InfrastructureRef, infrastructureCluster)
   533  	if err != nil {
   534  		return nil, errors.Wrapf(err, "failed to calculate infrastructureRef")
   535  	}
   536  	cluster.Spec.ControlPlaneRef, err = calculateRefDesiredAPIVersion(cluster.Spec.ControlPlaneRef, controlPlane)
   537  	if err != nil {
   538  		return nil, errors.Wrapf(err, "failed to calculate controlPlaneRef")
   539  	}
   540  
   541  	return cluster, nil
   542  }
   543  
   544  // calculateRefDesiredAPIVersion returns the desired ref calculated from desiredReferencedObject
   545  // so it doesn't override the version in apiVersion stored in the currentRef, if any.
   546  // This is required because the apiVersion in the desired ref is aligned to the apiVersion used
   547  // in ClusterClass when reading the current state. If the currentRef is nil or group or kind
   548  // doesn't match, no changes are applied to desired ref.
   549  func calculateRefDesiredAPIVersion(currentRef *corev1.ObjectReference, desiredReferencedObject *unstructured.Unstructured) (*corev1.ObjectReference, error) {
   550  	desiredRef := contract.ObjToRef(desiredReferencedObject)
   551  	// If ref is not set yet, just set a ref to the desired referenced object.
   552  	if currentRef == nil {
   553  		return desiredRef, nil
   554  	}
   555  
   556  	currentGV, err := schema.ParseGroupVersion(currentRef.APIVersion)
   557  	if err != nil {
   558  		return nil, errors.Wrapf(err, "failed to parse apiVersion %q of current ref", currentRef.APIVersion)
   559  	}
   560  	desiredGK := desiredReferencedObject.GroupVersionKind().GroupKind()
   561  
   562  	// Keep the apiVersion of the current ref if the group and kind is already correct.
   563  	// We only want to change the apiVersion to update the group, as it should be possible
   564  	// for other controllers to bump the version if necessary (i.e. if there is a newer
   565  	// version of the CRD compared to the one that the topology controller is working on).
   566  	if currentGV.Group == desiredGK.Group && currentRef.Kind == desiredGK.Kind {
   567  		desiredRef.APIVersion = currentRef.APIVersion
   568  	}
   569  	return desiredRef, nil
   570  }
   571  
   572  // computeMachineDeployments computes the desired state of the list of MachineDeployments.
   573  func (r *Reconciler) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) {
   574  	machineDeploymentsStateMap := make(scope.MachineDeploymentsStateMap)
   575  	for _, mdTopology := range s.Blueprint.Topology.Workers.MachineDeployments {
   576  		desiredMachineDeployment, err := computeMachineDeployment(ctx, s, mdTopology)
   577  		if err != nil {
   578  			return nil, errors.Wrapf(err, "failed to compute MachineDepoyment for topology %q", mdTopology.Name)
   579  		}
   580  		machineDeploymentsStateMap[mdTopology.Name] = desiredMachineDeployment
   581  	}
   582  	return machineDeploymentsStateMap, nil
   583  }
   584  
   585  // computeMachineDeployment computes the desired state for a MachineDeploymentTopology.
   586  // The generated machineDeployment object is calculated using the values from the machineDeploymentTopology and
   587  // the machineDeployment class.
   588  func computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) {
   589  	desiredMachineDeployment := &scope.MachineDeploymentState{}
   590  
   591  	// Gets the blueprint for the MachineDeployment class.
   592  	className := machineDeploymentTopology.Class
   593  	machineDeploymentBlueprint, ok := s.Blueprint.MachineDeployments[className]
   594  	if !ok {
   595  		return nil, errors.Errorf("MachineDeployment class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
   596  	}
   597  
   598  	var machineDeploymentClass *clusterv1.MachineDeploymentClass
   599  	for _, mdClass := range s.Blueprint.ClusterClass.Spec.Workers.MachineDeployments {
   600  		mdClass := mdClass
   601  		if mdClass.Class == className {
   602  			machineDeploymentClass = &mdClass
   603  			break
   604  		}
   605  	}
   606  	if machineDeploymentClass == nil {
   607  		return nil, errors.Errorf("MachineDeployment class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
   608  	}
   609  
   610  	// Compute the bootstrap template.
   611  	currentMachineDeployment := s.Current.MachineDeployments[machineDeploymentTopology.Name]
   612  	var currentBootstrapTemplateRef *corev1.ObjectReference
   613  	if currentMachineDeployment != nil && currentMachineDeployment.BootstrapTemplate != nil {
   614  		currentBootstrapTemplateRef = currentMachineDeployment.Object.Spec.Template.Spec.Bootstrap.ConfigRef
   615  	}
   616  	var err error
   617  	desiredMachineDeployment.BootstrapTemplate, err = templateToTemplate(templateToInput{
   618  		template:              machineDeploymentBlueprint.BootstrapTemplate,
   619  		templateClonedFromRef: contract.ObjToRef(machineDeploymentBlueprint.BootstrapTemplate),
   620  		cluster:               s.Current.Cluster,
   621  		nameGenerator:         names.SimpleNameGenerator(bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)),
   622  		currentObjectRef:      currentBootstrapTemplateRef,
   623  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
   624  		// in case of errors in between creating this template and creating/updating the MachineDeployment object
   625  		// with the reference to this template.
   626  		ownerRef: ownerReferenceTo(s.Current.Cluster),
   627  	})
   628  	if err != nil {
   629  		return nil, err
   630  	}
   631  
   632  	bootstrapTemplateLabels := desiredMachineDeployment.BootstrapTemplate.GetLabels()
   633  	if bootstrapTemplateLabels == nil {
   634  		bootstrapTemplateLabels = map[string]string{}
   635  	}
   636  	// Add ClusterTopologyMachineDeploymentLabel to the generated Bootstrap template
   637  	bootstrapTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
   638  	desiredMachineDeployment.BootstrapTemplate.SetLabels(bootstrapTemplateLabels)
   639  
   640  	// Compute the Infrastructure template.
   641  	var currentInfraMachineTemplateRef *corev1.ObjectReference
   642  	if currentMachineDeployment != nil && currentMachineDeployment.InfrastructureMachineTemplate != nil {
   643  		currentInfraMachineTemplateRef = &currentMachineDeployment.Object.Spec.Template.Spec.InfrastructureRef
   644  	}
   645  	desiredMachineDeployment.InfrastructureMachineTemplate, err = templateToTemplate(templateToInput{
   646  		template:              machineDeploymentBlueprint.InfrastructureMachineTemplate,
   647  		templateClonedFromRef: contract.ObjToRef(machineDeploymentBlueprint.InfrastructureMachineTemplate),
   648  		cluster:               s.Current.Cluster,
   649  		nameGenerator:         names.SimpleNameGenerator(infrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)),
   650  		currentObjectRef:      currentInfraMachineTemplateRef,
   651  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
   652  		// in case of errors in between creating this template and creating/updating the MachineDeployment object
   653  		// with the reference to this template.
   654  		ownerRef: ownerReferenceTo(s.Current.Cluster),
   655  	})
   656  	if err != nil {
   657  		return nil, err
   658  	}
   659  
   660  	infraMachineTemplateLabels := desiredMachineDeployment.InfrastructureMachineTemplate.GetLabels()
   661  	if infraMachineTemplateLabels == nil {
   662  		infraMachineTemplateLabels = map[string]string{}
   663  	}
   664  	// Add ClusterTopologyMachineDeploymentLabel to the generated InfrastructureMachine template
   665  	infraMachineTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
   666  	desiredMachineDeployment.InfrastructureMachineTemplate.SetLabels(infraMachineTemplateLabels)
   667  	version := computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment)
   668  
   669  	// Compute values that can be set both in the MachineDeploymentClass and in the MachineDeploymentTopology
   670  	minReadySeconds := machineDeploymentClass.MinReadySeconds
   671  	if machineDeploymentTopology.MinReadySeconds != nil {
   672  		minReadySeconds = machineDeploymentTopology.MinReadySeconds
   673  	}
   674  
   675  	strategy := machineDeploymentClass.Strategy
   676  	if machineDeploymentTopology.Strategy != nil {
   677  		strategy = machineDeploymentTopology.Strategy
   678  	}
   679  
   680  	failureDomain := machineDeploymentClass.FailureDomain
   681  	if machineDeploymentTopology.FailureDomain != nil {
   682  		failureDomain = machineDeploymentTopology.FailureDomain
   683  	}
   684  
   685  	nodeDrainTimeout := machineDeploymentClass.NodeDrainTimeout
   686  	if machineDeploymentTopology.NodeDrainTimeout != nil {
   687  		nodeDrainTimeout = machineDeploymentTopology.NodeDrainTimeout
   688  	}
   689  
   690  	nodeVolumeDetachTimeout := machineDeploymentClass.NodeVolumeDetachTimeout
   691  	if machineDeploymentTopology.NodeVolumeDetachTimeout != nil {
   692  		nodeVolumeDetachTimeout = machineDeploymentTopology.NodeVolumeDetachTimeout
   693  	}
   694  
   695  	nodeDeletionTimeout := machineDeploymentClass.NodeDeletionTimeout
   696  	if machineDeploymentTopology.NodeDeletionTimeout != nil {
   697  		nodeDeletionTimeout = machineDeploymentTopology.NodeDeletionTimeout
   698  	}
   699  
   700  	// Compute the MachineDeployment object.
   701  	desiredBootstrapTemplateRef, err := calculateRefDesiredAPIVersion(currentBootstrapTemplateRef, desiredMachineDeployment.BootstrapTemplate)
   702  	if err != nil {
   703  		return nil, errors.Wrap(err, "failed to calculate desired bootstrap template ref")
   704  	}
   705  	desiredInfraMachineTemplateRef, err := calculateRefDesiredAPIVersion(currentInfraMachineTemplateRef, desiredMachineDeployment.InfrastructureMachineTemplate)
   706  	if err != nil {
   707  		return nil, errors.Wrap(err, "failed to calculate desired infrastructure machine template ref")
   708  	}
   709  
   710  	nameTemplate := "{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}"
   711  	if machineDeploymentClass.NamingStrategy != nil && machineDeploymentClass.NamingStrategy.Template != nil {
   712  		nameTemplate = *machineDeploymentClass.NamingStrategy.Template
   713  	}
   714  
   715  	name, err := names.MachineDeploymentNameGenerator(nameTemplate, s.Current.Cluster.Name, machineDeploymentTopology.Name).GenerateName()
   716  	if err != nil {
   717  		return nil, errors.Wrap(err, "failed to generate name for MachineDeployment")
   718  	}
   719  
   720  	desiredMachineDeploymentObj := &clusterv1.MachineDeployment{
   721  		TypeMeta: metav1.TypeMeta{
   722  			Kind:       clusterv1.GroupVersion.WithKind("MachineDeployment").Kind,
   723  			APIVersion: clusterv1.GroupVersion.String(),
   724  		},
   725  		ObjectMeta: metav1.ObjectMeta{
   726  			Name:      name,
   727  			Namespace: s.Current.Cluster.Namespace,
   728  		},
   729  		Spec: clusterv1.MachineDeploymentSpec{
   730  			ClusterName:     s.Current.Cluster.Name,
   731  			MinReadySeconds: minReadySeconds,
   732  			Strategy:        strategy,
   733  			Template: clusterv1.MachineTemplateSpec{
   734  				Spec: clusterv1.MachineSpec{
   735  					ClusterName:             s.Current.Cluster.Name,
   736  					Version:                 pointer.String(version),
   737  					Bootstrap:               clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef},
   738  					InfrastructureRef:       *desiredInfraMachineTemplateRef,
   739  					FailureDomain:           failureDomain,
   740  					NodeDrainTimeout:        nodeDrainTimeout,
   741  					NodeVolumeDetachTimeout: nodeVolumeDetachTimeout,
   742  					NodeDeletionTimeout:     nodeDeletionTimeout,
   743  				},
   744  			},
   745  		},
   746  	}
   747  
   748  	// If an existing MachineDeployment is present, override the MachineDeployment generate name
   749  	// re-using the existing name (this will help in reconcile).
   750  	if currentMachineDeployment != nil && currentMachineDeployment.Object != nil {
   751  		desiredMachineDeploymentObj.SetName(currentMachineDeployment.Object.Name)
   752  	}
   753  
   754  	// Apply annotations
   755  	machineDeploymentAnnotations := util.MergeMap(machineDeploymentTopology.Metadata.Annotations, machineDeploymentBlueprint.Metadata.Annotations)
   756  	// Ensure the annotations used to control the upgrade sequence are never propagated.
   757  	delete(machineDeploymentAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
   758  	delete(machineDeploymentAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
   759  	desiredMachineDeploymentObj.SetAnnotations(machineDeploymentAnnotations)
   760  	desiredMachineDeploymentObj.Spec.Template.Annotations = machineDeploymentAnnotations
   761  
   762  	// Apply Labels
   763  	// NOTE: On top of all the labels applied to managed objects we are applying the ClusterTopologyMachineDeploymentLabel
   764  	// keeping track of the MachineDeployment name from the Topology; this will be used to identify the object in next reconcile loops.
   765  	machineDeploymentLabels := util.MergeMap(machineDeploymentTopology.Metadata.Labels, machineDeploymentBlueprint.Metadata.Labels)
   766  	if machineDeploymentLabels == nil {
   767  		machineDeploymentLabels = map[string]string{}
   768  	}
   769  	machineDeploymentLabels[clusterv1.ClusterNameLabel] = s.Current.Cluster.Name
   770  	machineDeploymentLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
   771  	machineDeploymentLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
   772  	desiredMachineDeploymentObj.SetLabels(machineDeploymentLabels)
   773  
   774  	// Also set the labels in .spec.template.labels so that they are propagated to
   775  	// MachineSet.labels and MachineSet.spec.template.labels and thus to Machine.labels.
   776  	// Note: the labels in MachineSet are used to properly cleanup templates when the MachineSet is deleted.
   777  	desiredMachineDeploymentObj.Spec.Template.Labels = machineDeploymentLabels
   778  
   779  	// Set the selector with the subset of labels identifying controlled machines.
   780  	// NOTE: this prevents the web hook to add cluster.x-k8s.io/deployment-name label, that is
   781  	// redundant for managed MachineDeployments given that we already have topology.cluster.x-k8s.io/deployment-name.
   782  	desiredMachineDeploymentObj.Spec.Selector.MatchLabels = map[string]string{}
   783  	desiredMachineDeploymentObj.Spec.Selector.MatchLabels[clusterv1.ClusterNameLabel] = s.Current.Cluster.Name
   784  	desiredMachineDeploymentObj.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
   785  	desiredMachineDeploymentObj.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
   786  
   787  	// Set the desired replicas.
   788  	desiredMachineDeploymentObj.Spec.Replicas = machineDeploymentTopology.Replicas
   789  
   790  	desiredMachineDeployment.Object = desiredMachineDeploymentObj
   791  
   792  	// If the ClusterClass defines a MachineHealthCheck for the MachineDeployment add it to the desired state.
   793  	if s.Blueprint.IsMachineDeploymentMachineHealthCheckEnabled(&machineDeploymentTopology) {
   794  		// Note: The MHC is going to use a selector that provides a minimal set of labels which are common to all MachineSets belonging to the MachineDeployment.
   795  		desiredMachineDeployment.MachineHealthCheck = computeMachineHealthCheck(
   796  			ctx,
   797  			desiredMachineDeploymentObj,
   798  			selectorForMachineDeploymentMHC(desiredMachineDeploymentObj),
   799  			s.Current.Cluster.Name,
   800  			s.Blueprint.MachineDeploymentMachineHealthCheckClass(&machineDeploymentTopology))
   801  	}
   802  	return desiredMachineDeployment, nil
   803  }
   804  
   805  // computeMachineDeploymentVersion calculates the version of the desired machine deployment.
   806  // The version is calculated using the state of the current machine deployments,
   807  // the current control plane and the version defined in the topology.
   808  func computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string {
   809  	desiredVersion := s.Blueprint.Topology.Version
   810  	// If creating a new machine deployment, mark it as pending if the control plane is not
   811  	// yet stable. Creating a new MD while the control plane is upgrading can lead to unexpected race conditions.
   812  	// Example: join could fail if the load balancers are slow in detecting when CP machines are
   813  	// being deleted.
   814  	if currentMDState == nil || currentMDState.Object == nil {
   815  		if !isControlPlaneStable(s) || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) {
   816  			s.UpgradeTracker.MachineDeployments.MarkPendingCreate(machineDeploymentTopology.Name)
   817  		}
   818  		return desiredVersion
   819  	}
   820  
   821  	// Get the current version of the machine deployment.
   822  	currentVersion := *currentMDState.Object.Spec.Template.Spec.Version
   823  
   824  	// Return early if the currentVersion is already equal to the desiredVersion
   825  	// no further checks required.
   826  	if currentVersion == desiredVersion {
   827  		return currentVersion
   828  	}
   829  
   830  	// Return early if the upgrade for the MachineDeployment is deferred.
   831  	if isMachineDeploymentDeferred(s.Blueprint.Topology, machineDeploymentTopology) {
   832  		s.UpgradeTracker.MachineDeployments.MarkDeferredUpgrade(currentMDState.Object.Name)
   833  		s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name)
   834  		return currentVersion
   835  	}
   836  
   837  	// Return early if the AfterControlPlaneUpgrade hook returns a blocking response.
   838  	if s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) {
   839  		s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name)
   840  		return currentVersion
   841  	}
   842  
   843  	// Return early if the upgrade concurrency is reached.
   844  	if s.UpgradeTracker.MachineDeployments.UpgradeConcurrencyReached() {
   845  		s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name)
   846  		return currentVersion
   847  	}
   848  
   849  	// Return early if the Control Plane is not stable. Do not pick up the desiredVersion yet.
   850  	// Return the current version of the machine deployment. We will pick up the new version after the control
   851  	// plane is stable.
   852  	if !isControlPlaneStable(s) {
   853  		s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name)
   854  		return currentVersion
   855  	}
   856  
   857  	// Control plane and machine deployments are stable.
   858  	// Ready to pick up the topology version.
   859  	s.UpgradeTracker.MachineDeployments.MarkUpgrading(currentMDState.Object.Name)
   860  	return desiredVersion
   861  }
   862  
   863  // isControlPlaneStable returns true is the ControlPlane is stable.
   864  func isControlPlaneStable(s *scope.Scope) bool {
   865  	// If the current control plane is upgrading it is not considered stable.
   866  	if s.UpgradeTracker.ControlPlane.IsUpgrading {
   867  		return false
   868  	}
   869  
   870  	// If control plane supports replicas, check if the control plane is in the middle of a scale operation.
   871  	// If the current control plane is scaling then it is not considered stable.
   872  	if s.UpgradeTracker.ControlPlane.IsScaling {
   873  		return false
   874  	}
   875  
   876  	// Check if we are about to upgrade the control plane. Since the control plane is about to start its upgrade process
   877  	// it cannot be considered stable.
   878  	if s.UpgradeTracker.ControlPlane.IsStartingUpgrade {
   879  		return false
   880  	}
   881  
   882  	// If the ControlPlane is pending picking up an upgrade then it is not yet at the desired state and
   883  	// cannot be considered stable.
   884  	if s.UpgradeTracker.ControlPlane.IsPendingUpgrade {
   885  		return false
   886  	}
   887  
   888  	return true
   889  }
   890  
   891  // isMachineDeploymentDeferred returns true if the upgrade for the mdTopology is deferred.
   892  // This is the case when either:
   893  //   - the mdTopology has the ClusterTopologyDeferUpgradeAnnotation annotation.
   894  //   - the mdTopology has the ClusterTopologyHoldUpgradeSequenceAnnotation annotation.
   895  //   - another md topology which is before mdTopology in the workers.machineDeployments list has the
   896  //     ClusterTopologyHoldUpgradeSequenceAnnotation annotation.
   897  func isMachineDeploymentDeferred(clusterTopology *clusterv1.Topology, mdTopology clusterv1.MachineDeploymentTopology) bool {
   898  	// If mdTopology has the ClusterTopologyDeferUpgradeAnnotation annotation => md is deferred.
   899  	if _, ok := mdTopology.Metadata.Annotations[clusterv1.ClusterTopologyDeferUpgradeAnnotation]; ok {
   900  		return true
   901  	}
   902  
   903  	// If mdTopology has the ClusterTopologyHoldUpgradeSequenceAnnotation annotation => md is deferred.
   904  	if _, ok := mdTopology.Metadata.Annotations[clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation]; ok {
   905  		return true
   906  	}
   907  
   908  	for _, md := range clusterTopology.Workers.MachineDeployments {
   909  		// If another md topology with the ClusterTopologyHoldUpgradeSequenceAnnotation annotation
   910  		// is found before the mdTopology => md is deferred.
   911  		if _, ok := md.Metadata.Annotations[clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation]; ok {
   912  			return true
   913  		}
   914  
   915  		// If mdTopology is found before a md topology with the ClusterTopologyHoldUpgradeSequenceAnnotation
   916  		// annotation => md is not deferred.
   917  		if md.Name == mdTopology.Name {
   918  			return false
   919  		}
   920  	}
   921  
   922  	// This case should be impossible as mdTopology should have been found in workers.machineDeployments.
   923  	return false
   924  }
   925  
   926  // computeMachinePools computes the desired state of the list of MachinePools.
   927  func (r *Reconciler) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) {
   928  	machinePoolsStateMap := make(scope.MachinePoolsStateMap)
   929  	for _, mpTopology := range s.Blueprint.Topology.Workers.MachinePools {
   930  		desiredMachinePool, err := computeMachinePool(ctx, s, mpTopology)
   931  		if err != nil {
   932  			return nil, errors.Wrapf(err, "failed to compute MachinePool for topology %q", mpTopology.Name)
   933  		}
   934  		machinePoolsStateMap[mpTopology.Name] = desiredMachinePool
   935  	}
   936  	return machinePoolsStateMap, nil
   937  }
   938  
   939  // computeMachinePool computes the desired state for a MachinePoolTopology.
   940  // The generated machinePool object is calculated using the values from the machinePoolTopology and
   941  // the machinePool class.
   942  func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) {
   943  	desiredMachinePool := &scope.MachinePoolState{}
   944  
   945  	// Gets the blueprint for the MachinePool class.
   946  	className := machinePoolTopology.Class
   947  	machinePoolBlueprint, ok := s.Blueprint.MachinePools[className]
   948  	if !ok {
   949  		return nil, errors.Errorf("MachinePool class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
   950  	}
   951  
   952  	var machinePoolClass *clusterv1.MachinePoolClass
   953  	for _, mpClass := range s.Blueprint.ClusterClass.Spec.Workers.MachinePools {
   954  		mpClass := mpClass
   955  		if mpClass.Class == className {
   956  			machinePoolClass = &mpClass
   957  			break
   958  		}
   959  	}
   960  	if machinePoolClass == nil {
   961  		return nil, errors.Errorf("MachinePool class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
   962  	}
   963  
   964  	// Compute the bootstrap config.
   965  	currentMachinePool := s.Current.MachinePools[machinePoolTopology.Name]
   966  	var currentBootstrapConfigRef *corev1.ObjectReference
   967  	if currentMachinePool != nil && currentMachinePool.BootstrapObject != nil {
   968  		currentBootstrapConfigRef = currentMachinePool.Object.Spec.Template.Spec.Bootstrap.ConfigRef
   969  	}
   970  	var err error
   971  	desiredMachinePool.BootstrapObject, err = templateToObject(templateToInput{
   972  		template:              machinePoolBlueprint.BootstrapTemplate,
   973  		templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.BootstrapTemplate),
   974  		cluster:               s.Current.Cluster,
   975  		nameGenerator:         names.SimpleNameGenerator(bootstrapConfigNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name)),
   976  		currentObjectRef:      currentBootstrapConfigRef,
   977  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
   978  		// in case of errors in between creating this template and creating/updating the MachinePool object
   979  		// with the reference to this template.
   980  		ownerRef: ownerReferenceTo(s.Current.Cluster),
   981  	})
   982  	if err != nil {
   983  		return nil, errors.Wrapf(err, "failed to compute bootstrap object for topology %q", machinePoolTopology.Name)
   984  	}
   985  
   986  	bootstrapObjectLabels := desiredMachinePool.BootstrapObject.GetLabels()
   987  	if bootstrapObjectLabels == nil {
   988  		bootstrapObjectLabels = map[string]string{}
   989  	}
   990  	// Add ClusterTopologyMachinePoolLabel to the generated Bootstrap config
   991  	bootstrapObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name
   992  	desiredMachinePool.BootstrapObject.SetLabels(bootstrapObjectLabels)
   993  
   994  	// Compute the InfrastructureMachinePool.
   995  	var currentInfraMachinePoolRef *corev1.ObjectReference
   996  	if currentMachinePool != nil && currentMachinePool.InfrastructureMachinePoolObject != nil {
   997  		currentInfraMachinePoolRef = &currentMachinePool.Object.Spec.Template.Spec.InfrastructureRef
   998  	}
   999  	desiredMachinePool.InfrastructureMachinePoolObject, err = templateToObject(templateToInput{
  1000  		template:              machinePoolBlueprint.InfrastructureMachinePoolTemplate,
  1001  		templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.InfrastructureMachinePoolTemplate),
  1002  		cluster:               s.Current.Cluster,
  1003  		nameGenerator:         names.SimpleNameGenerator(infrastructureMachinePoolNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name)),
  1004  		currentObjectRef:      currentInfraMachinePoolRef,
  1005  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
  1006  		// in case of errors in between creating this template and creating/updating the MachinePool object
  1007  		// with the reference to this template.
  1008  		ownerRef: ownerReferenceTo(s.Current.Cluster),
  1009  	})
  1010  	if err != nil {
  1011  		return nil, errors.Wrapf(err, "failed to compute infrastructure object for topology %q", machinePoolTopology.Name)
  1012  	}
  1013  
  1014  	infraMachinePoolObjectLabels := desiredMachinePool.InfrastructureMachinePoolObject.GetLabels()
  1015  	if infraMachinePoolObjectLabels == nil {
  1016  		infraMachinePoolObjectLabels = map[string]string{}
  1017  	}
  1018  	// Add ClusterTopologyMachinePoolLabel to the generated InfrastructureMachinePool object
  1019  	infraMachinePoolObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name
  1020  	desiredMachinePool.InfrastructureMachinePoolObject.SetLabels(infraMachinePoolObjectLabels)
  1021  	version := computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool)
  1022  
  1023  	// Compute values that can be set both in the MachinePoolClass and in the MachinePoolTopology
  1024  	minReadySeconds := machinePoolClass.MinReadySeconds
  1025  	if machinePoolTopology.MinReadySeconds != nil {
  1026  		minReadySeconds = machinePoolTopology.MinReadySeconds
  1027  	}
  1028  
  1029  	failureDomains := machinePoolClass.FailureDomains
  1030  	if machinePoolTopology.FailureDomains != nil {
  1031  		failureDomains = machinePoolTopology.FailureDomains
  1032  	}
  1033  
  1034  	nodeDrainTimeout := machinePoolClass.NodeDrainTimeout
  1035  	if machinePoolTopology.NodeDrainTimeout != nil {
  1036  		nodeDrainTimeout = machinePoolTopology.NodeDrainTimeout
  1037  	}
  1038  
  1039  	nodeVolumeDetachTimeout := machinePoolClass.NodeVolumeDetachTimeout
  1040  	if machinePoolTopology.NodeVolumeDetachTimeout != nil {
  1041  		nodeVolumeDetachTimeout = machinePoolTopology.NodeVolumeDetachTimeout
  1042  	}
  1043  
  1044  	nodeDeletionTimeout := machinePoolClass.NodeDeletionTimeout
  1045  	if machinePoolTopology.NodeDeletionTimeout != nil {
  1046  		nodeDeletionTimeout = machinePoolTopology.NodeDeletionTimeout
  1047  	}
  1048  
  1049  	// Compute the MachinePool object.
  1050  	desiredBootstrapConfigRef, err := calculateRefDesiredAPIVersion(currentBootstrapConfigRef, desiredMachinePool.BootstrapObject)
  1051  	if err != nil {
  1052  		return nil, errors.Wrap(err, "failed to calculate desired bootstrap config ref")
  1053  	}
  1054  	desiredInfraMachinePoolRef, err := calculateRefDesiredAPIVersion(currentInfraMachinePoolRef, desiredMachinePool.InfrastructureMachinePoolObject)
  1055  	if err != nil {
  1056  		return nil, errors.Wrap(err, "failed to calculate desired infrastructure machine pool ref")
  1057  	}
  1058  
  1059  	nameTemplate := "{{ .cluster.name }}-{{ .machinePool.topologyName }}-{{ .random }}"
  1060  	if machinePoolClass.NamingStrategy != nil && machinePoolClass.NamingStrategy.Template != nil {
  1061  		nameTemplate = *machinePoolClass.NamingStrategy.Template
  1062  	}
  1063  
  1064  	name, err := names.MachinePoolNameGenerator(nameTemplate, s.Current.Cluster.Name, machinePoolTopology.Name).GenerateName()
  1065  	if err != nil {
  1066  		return nil, errors.Wrap(err, "failed to generate name for MachinePool")
  1067  	}
  1068  
  1069  	desiredMachinePoolObj := &expv1.MachinePool{
  1070  		TypeMeta: metav1.TypeMeta{
  1071  			Kind:       expv1.GroupVersion.WithKind("MachinePool").Kind,
  1072  			APIVersion: expv1.GroupVersion.String(),
  1073  		},
  1074  		ObjectMeta: metav1.ObjectMeta{
  1075  			Name:      name,
  1076  			Namespace: s.Current.Cluster.Namespace,
  1077  		},
  1078  		Spec: expv1.MachinePoolSpec{
  1079  			ClusterName:     s.Current.Cluster.Name,
  1080  			MinReadySeconds: minReadySeconds,
  1081  			FailureDomains:  failureDomains,
  1082  			Template: clusterv1.MachineTemplateSpec{
  1083  				Spec: clusterv1.MachineSpec{
  1084  					ClusterName:             s.Current.Cluster.Name,
  1085  					Version:                 pointer.String(version),
  1086  					Bootstrap:               clusterv1.Bootstrap{ConfigRef: desiredBootstrapConfigRef},
  1087  					InfrastructureRef:       *desiredInfraMachinePoolRef,
  1088  					NodeDrainTimeout:        nodeDrainTimeout,
  1089  					NodeVolumeDetachTimeout: nodeVolumeDetachTimeout,
  1090  					NodeDeletionTimeout:     nodeDeletionTimeout,
  1091  				},
  1092  			},
  1093  		},
  1094  	}
  1095  
  1096  	// If an existing MachinePool is present, override the MachinePool generate name
  1097  	// re-using the existing name (this will help in reconcile).
  1098  	if currentMachinePool != nil && currentMachinePool.Object != nil {
  1099  		desiredMachinePoolObj.SetName(currentMachinePool.Object.Name)
  1100  	}
  1101  
  1102  	// Apply annotations
  1103  	machinePoolAnnotations := util.MergeMap(machinePoolTopology.Metadata.Annotations, machinePoolBlueprint.Metadata.Annotations)
  1104  	// Ensure the annotations used to control the upgrade sequence are never propagated.
  1105  	delete(machinePoolAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1106  	delete(machinePoolAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1107  	desiredMachinePoolObj.SetAnnotations(machinePoolAnnotations)
  1108  	desiredMachinePoolObj.Spec.Template.Annotations = machinePoolAnnotations
  1109  
  1110  	// Apply Labels
  1111  	// NOTE: On top of all the labels applied to managed objects we are applying the ClusterTopologyMachinePoolLabel
  1112  	// keeping track of the MachinePool name from the Topology; this will be used to identify the object in next reconcile loops.
  1113  	machinePoolLabels := util.MergeMap(machinePoolTopology.Metadata.Labels, machinePoolBlueprint.Metadata.Labels)
  1114  	if machinePoolLabels == nil {
  1115  		machinePoolLabels = map[string]string{}
  1116  	}
  1117  	machinePoolLabels[clusterv1.ClusterNameLabel] = s.Current.Cluster.Name
  1118  	machinePoolLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
  1119  	machinePoolLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name
  1120  	desiredMachinePoolObj.SetLabels(machinePoolLabels)
  1121  
  1122  	// Also set the labels in .spec.template.labels so that they are propagated to
  1123  	// MachineSet.labels and MachineSet.spec.template.labels and thus to Machine.labels.
  1124  	// Note: the labels in MachineSet are used to properly cleanup templates when the MachineSet is deleted.
  1125  	desiredMachinePoolObj.Spec.Template.Labels = machinePoolLabels
  1126  
  1127  	// Set the desired replicas.
  1128  	desiredMachinePoolObj.Spec.Replicas = machinePoolTopology.Replicas
  1129  
  1130  	desiredMachinePool.Object = desiredMachinePoolObj
  1131  
  1132  	return desiredMachinePool, nil
  1133  }
  1134  
  1135  // computeMachinePoolVersion calculates the version of the desired machine pool.
  1136  // The version is calculated using the state of the current machine pools,
  1137  // the current control plane and the version defined in the topology.
  1138  func computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string {
  1139  	desiredVersion := s.Blueprint.Topology.Version
  1140  	// If creating a new machine pool, mark it as pending if the control plane is not
  1141  	// yet stable. Creating a new MP while the control plane is upgrading can lead to unexpected race conditions.
  1142  	// Example: join could fail if the load balancers are slow in detecting when CP machines are
  1143  	// being deleted.
  1144  	if currentMPState == nil || currentMPState.Object == nil {
  1145  		if !isControlPlaneStable(s) || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) {
  1146  			s.UpgradeTracker.MachinePools.MarkPendingCreate(machinePoolTopology.Name)
  1147  		}
  1148  		return desiredVersion
  1149  	}
  1150  
  1151  	// Get the current version of the machine pool.
  1152  	currentVersion := *currentMPState.Object.Spec.Template.Spec.Version
  1153  
  1154  	// Return early if the currentVersion is already equal to the desiredVersion
  1155  	// no further checks required.
  1156  	if currentVersion == desiredVersion {
  1157  		return currentVersion
  1158  	}
  1159  
  1160  	// Return early if the upgrade for the MachinePool is deferred.
  1161  	if isMachinePoolDeferred(s.Blueprint.Topology, machinePoolTopology) {
  1162  		s.UpgradeTracker.MachinePools.MarkDeferredUpgrade(currentMPState.Object.Name)
  1163  		s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name)
  1164  		return currentVersion
  1165  	}
  1166  
  1167  	// Return early if the AfterControlPlaneUpgrade hook returns a blocking response.
  1168  	if s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) {
  1169  		s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name)
  1170  		return currentVersion
  1171  	}
  1172  
  1173  	// Return early if the upgrade concurrency is reached.
  1174  	if s.UpgradeTracker.MachinePools.UpgradeConcurrencyReached() {
  1175  		s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name)
  1176  		return currentVersion
  1177  	}
  1178  
  1179  	// Return early if the Control Plane is not stable. Do not pick up the desiredVersion yet.
  1180  	// Return the current version of the machine pool. We will pick up the new version after the control
  1181  	// plane is stable.
  1182  	if !isControlPlaneStable(s) {
  1183  		s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name)
  1184  		return currentVersion
  1185  	}
  1186  
  1187  	// Control plane and machine pools are stable.
  1188  	// Ready to pick up the topology version.
  1189  	s.UpgradeTracker.MachinePools.MarkUpgrading(currentMPState.Object.Name)
  1190  	return desiredVersion
  1191  }
  1192  
  1193  // isMachinePoolDeferred returns true if the upgrade for the mpTopology is deferred.
  1194  // This is the case when either:
  1195  //   - the mpTopology has the ClusterTopologyDeferUpgradeAnnotation annotation.
  1196  //   - the mpTopology has the ClusterTopologyHoldUpgradeSequenceAnnotation annotation.
  1197  //   - another mp topology which is before mpTopology in the workers.machinePools list has the
  1198  //     ClusterTopologyHoldUpgradeSequenceAnnotation annotation.
  1199  func isMachinePoolDeferred(clusterTopology *clusterv1.Topology, mpTopology clusterv1.MachinePoolTopology) bool {
  1200  	// If mpTopology has the ClusterTopologyDeferUpgradeAnnotation annotation => mp is deferred.
  1201  	if _, ok := mpTopology.Metadata.Annotations[clusterv1.ClusterTopologyDeferUpgradeAnnotation]; ok {
  1202  		return true
  1203  	}
  1204  
  1205  	// If mpTopology has the ClusterTopologyHoldUpgradeSequenceAnnotation annotation => mp is deferred.
  1206  	if _, ok := mpTopology.Metadata.Annotations[clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation]; ok {
  1207  		return true
  1208  	}
  1209  
  1210  	for _, mp := range clusterTopology.Workers.MachinePools {
  1211  		// If another mp topology with the ClusterTopologyHoldUpgradeSequenceAnnotation annotation
  1212  		// is found before the mpTopology => mp is deferred.
  1213  		if _, ok := mp.Metadata.Annotations[clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation]; ok {
  1214  			return true
  1215  		}
  1216  
  1217  		// If mpTopology is found before a mp topology with the ClusterTopologyHoldUpgradeSequenceAnnotation
  1218  		// annotation => mp is not deferred.
  1219  		if mp.Name == mpTopology.Name {
  1220  			return false
  1221  		}
  1222  	}
  1223  
  1224  	// This case should be impossible as mpTopology should have been found in workers.machinePools.
  1225  	return false
  1226  }
  1227  
  1228  type templateToInput struct {
  1229  	template              *unstructured.Unstructured
  1230  	templateClonedFromRef *corev1.ObjectReference
  1231  	cluster               *clusterv1.Cluster
  1232  	nameGenerator         names.NameGenerator
  1233  	currentObjectRef      *corev1.ObjectReference
  1234  	labels                map[string]string
  1235  	annotations           map[string]string
  1236  	// OwnerRef is an optional OwnerReference to attach to the cloned object.
  1237  	ownerRef *metav1.OwnerReference
  1238  }
  1239  
  1240  // templateToObject generates an object from a template, taking care
  1241  // of adding required labels (cluster, topology), annotations (clonedFrom)
  1242  // and assigning a meaningful name (or reusing current reference name).
  1243  func templateToObject(in templateToInput) (*unstructured.Unstructured, error) {
  1244  	// NOTE: The cluster label is added at creation time so this object could be read by the ClusterTopology
  1245  	// controller immediately after creation, even before other controllers are going to add the label (if missing).
  1246  	labels := map[string]string{}
  1247  	for k, v := range in.labels {
  1248  		labels[k] = v
  1249  	}
  1250  	labels[clusterv1.ClusterNameLabel] = in.cluster.Name
  1251  	labels[clusterv1.ClusterTopologyOwnedLabel] = ""
  1252  
  1253  	// Generate the object from the template.
  1254  	// NOTE: OwnerRef can't be set at this stage; other controllers are going to add OwnerReferences when
  1255  	// the object is actually created.
  1256  	object, err := external.GenerateTemplate(&external.GenerateTemplateInput{
  1257  		Template:    in.template,
  1258  		TemplateRef: in.templateClonedFromRef,
  1259  		Namespace:   in.cluster.Namespace,
  1260  		Labels:      labels,
  1261  		Annotations: in.annotations,
  1262  		ClusterName: in.cluster.Name,
  1263  		OwnerRef:    in.ownerRef,
  1264  	})
  1265  	if err != nil {
  1266  		return nil, err
  1267  	}
  1268  
  1269  	// Ensure the generated objects have a meaningful name.
  1270  	// NOTE: In case there is already a ref to this object in the Cluster, re-use the same name
  1271  	// in order to simplify comparison at later stages of the reconcile process.
  1272  	name, err := in.nameGenerator.GenerateName()
  1273  	if err != nil {
  1274  		return nil, errors.Wrapf(err, "failed to generate name for %s", object.GetKind())
  1275  	}
  1276  	object.SetName(name)
  1277  	if in.currentObjectRef != nil && len(in.currentObjectRef.Name) > 0 {
  1278  		object.SetName(in.currentObjectRef.Name)
  1279  	}
  1280  
  1281  	return object, nil
  1282  }
  1283  
  1284  // templateToTemplate generates a template from an existing template, taking care
  1285  // of adding required labels (cluster, topology), annotations (clonedFrom)
  1286  // and assigning a meaningful name (or reusing current reference name).
  1287  // NOTE: We are creating a copy of the ClusterClass template for each cluster so
  1288  // it is possible to add cluster specific information without affecting the original object.
  1289  func templateToTemplate(in templateToInput) (*unstructured.Unstructured, error) {
  1290  	template := &unstructured.Unstructured{}
  1291  	in.template.DeepCopyInto(template)
  1292  
  1293  	// Remove all the info automatically assigned by the API server and not relevant from
  1294  	// the copy of the template.
  1295  	template.SetResourceVersion("")
  1296  	template.SetFinalizers(nil)
  1297  	template.SetUID("")
  1298  	template.SetSelfLink("")
  1299  
  1300  	// Enforce the topology labels into the provided label set.
  1301  	// NOTE: The cluster label is added at creation time so this object could be read by the ClusterTopology
  1302  	// controller immediately after creation, even before other controllers are going to add the label (if missing).
  1303  	labels := template.GetLabels()
  1304  	if labels == nil {
  1305  		labels = map[string]string{}
  1306  	}
  1307  	for k, v := range in.labels {
  1308  		labels[k] = v
  1309  	}
  1310  	labels[clusterv1.ClusterNameLabel] = in.cluster.Name
  1311  	labels[clusterv1.ClusterTopologyOwnedLabel] = ""
  1312  	template.SetLabels(labels)
  1313  
  1314  	// Enforce cloned from annotations and removes the kubectl last-applied-configuration annotation
  1315  	// because we don't want to propagate it to the cloned template objects.
  1316  	annotations := template.GetAnnotations()
  1317  	if annotations == nil {
  1318  		annotations = map[string]string{}
  1319  	}
  1320  	for k, v := range in.annotations {
  1321  		annotations[k] = v
  1322  	}
  1323  	annotations[clusterv1.TemplateClonedFromNameAnnotation] = in.templateClonedFromRef.Name
  1324  	annotations[clusterv1.TemplateClonedFromGroupKindAnnotation] = in.templateClonedFromRef.GroupVersionKind().GroupKind().String()
  1325  	delete(annotations, corev1.LastAppliedConfigAnnotation)
  1326  	template.SetAnnotations(annotations)
  1327  
  1328  	// Set the owner reference.
  1329  	if in.ownerRef != nil {
  1330  		template.SetOwnerReferences([]metav1.OwnerReference{*in.ownerRef})
  1331  	}
  1332  
  1333  	// Ensure the generated template gets a meaningful name.
  1334  	// NOTE: In case there is already an object ref to this template, it is required to re-use the same name
  1335  	// in order to simplify comparison at later stages of the reconcile process.
  1336  	name, err := in.nameGenerator.GenerateName()
  1337  	if err != nil {
  1338  		return nil, errors.Wrapf(err, "failed to generate name for %s", template.GetKind())
  1339  	}
  1340  	template.SetName(name)
  1341  	if in.currentObjectRef != nil && len(in.currentObjectRef.Name) > 0 {
  1342  		template.SetName(in.currentObjectRef.Name)
  1343  	}
  1344  
  1345  	return template, nil
  1346  }
  1347  
  1348  func ownerReferenceTo(obj client.Object) *metav1.OwnerReference {
  1349  	return &metav1.OwnerReference{
  1350  		Kind:       obj.GetObjectKind().GroupVersionKind().Kind,
  1351  		Name:       obj.GetName(),
  1352  		UID:        obj.GetUID(),
  1353  		APIVersion: obj.GetObjectKind().GroupVersionKind().GroupVersion().String(),
  1354  	}
  1355  }
  1356  
  1357  func computeMachineHealthCheck(ctx context.Context, healthCheckTarget client.Object, selector *metav1.LabelSelector, clusterName string, check *clusterv1.MachineHealthCheckClass) *clusterv1.MachineHealthCheck {
  1358  	// Create a MachineHealthCheck with the spec given in the ClusterClass.
  1359  	mhc := &clusterv1.MachineHealthCheck{
  1360  		TypeMeta: metav1.TypeMeta{
  1361  			Kind:       clusterv1.GroupVersion.WithKind("MachineHealthCheck").Kind,
  1362  			APIVersion: clusterv1.GroupVersion.String(),
  1363  		},
  1364  		ObjectMeta: metav1.ObjectMeta{
  1365  			Name:      healthCheckTarget.GetName(),
  1366  			Namespace: healthCheckTarget.GetNamespace(),
  1367  			Labels: map[string]string{
  1368  				clusterv1.ClusterTopologyOwnedLabel: "",
  1369  			},
  1370  		},
  1371  		Spec: clusterv1.MachineHealthCheckSpec{
  1372  			ClusterName:         clusterName,
  1373  			Selector:            *selector,
  1374  			UnhealthyConditions: check.UnhealthyConditions,
  1375  			MaxUnhealthy:        check.MaxUnhealthy,
  1376  			UnhealthyRange:      check.UnhealthyRange,
  1377  			NodeStartupTimeout:  check.NodeStartupTimeout,
  1378  			RemediationTemplate: check.RemediationTemplate,
  1379  		},
  1380  	}
  1381  
  1382  	// Default all fields in the MachineHealthCheck using the same function called in the webhook. This ensures the desired
  1383  	// state of the object won't be different from the current state due to webhook Defaulting.
  1384  	if err := (&webhooks.MachineHealthCheck{}).Default(ctx, mhc); err != nil {
  1385  		panic(err)
  1386  	}
  1387  
  1388  	return mhc
  1389  }
  1390  
  1391  func selectorForControlPlaneMHC() *metav1.LabelSelector {
  1392  	// The selector returned here is the minimal common selector for all Machines belonging to the ControlPlane.
  1393  	// It does not include any labels set in ClusterClass, Cluster Topology or elsewhere.
  1394  	return &metav1.LabelSelector{
  1395  		MatchLabels: map[string]string{
  1396  			clusterv1.ClusterTopologyOwnedLabel: "",
  1397  			clusterv1.MachineControlPlaneLabel:  "",
  1398  		},
  1399  	}
  1400  }
  1401  
  1402  func selectorForMachineDeploymentMHC(md *clusterv1.MachineDeployment) *metav1.LabelSelector {
  1403  	// The selector returned here is the minimal common selector for all MachineSets belonging to a MachineDeployment.
  1404  	// It does not include any labels set in ClusterClass, Cluster Topology or elsewhere.
  1405  	return &metav1.LabelSelector{MatchLabels: map[string]string{
  1406  		clusterv1.ClusterTopologyOwnedLabel:                 "",
  1407  		clusterv1.ClusterTopologyMachineDeploymentNameLabel: md.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel],
  1408  	},
  1409  	}
  1410  }