sigs.k8s.io/cluster-api@v1.7.1/exp/topology/desiredstate/desired_state.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  // Package desiredstate contains cluster topology utils, e.g. to compute the desired state.
    18  package desiredstate
    19  
    20  import (
    21  	"context"
    22  	"fmt"
    23  
    24  	"github.com/pkg/errors"
    25  	corev1 "k8s.io/api/core/v1"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    28  	"k8s.io/apimachinery/pkg/runtime/schema"
    29  	"k8s.io/utils/ptr"
    30  	"sigs.k8s.io/controller-runtime/pkg/client"
    31  
    32  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    33  	"sigs.k8s.io/cluster-api/controllers/external"
    34  	"sigs.k8s.io/cluster-api/controllers/remote"
    35  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    36  	runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
    37  	runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
    38  	"sigs.k8s.io/cluster-api/exp/topology/scope"
    39  	"sigs.k8s.io/cluster-api/feature"
    40  	"sigs.k8s.io/cluster-api/internal/contract"
    41  	"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches"
    42  	"sigs.k8s.io/cluster-api/internal/hooks"
    43  	tlog "sigs.k8s.io/cluster-api/internal/log"
    44  	runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client"
    45  	"sigs.k8s.io/cluster-api/internal/topology/clustershim"
    46  	topologynames "sigs.k8s.io/cluster-api/internal/topology/names"
    47  	"sigs.k8s.io/cluster-api/internal/topology/ownerrefs"
    48  	"sigs.k8s.io/cluster-api/internal/topology/selectors"
    49  	"sigs.k8s.io/cluster-api/internal/webhooks"
    50  	"sigs.k8s.io/cluster-api/util"
    51  )
    52  
    53  // Generator is a generator to generate the desired state.
    54  type Generator interface {
    55  	Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error)
    56  }
    57  
    58  // NewGenerator creates a new generator to generate desired state.
    59  func NewGenerator(client client.Client, tracker *remote.ClusterCacheTracker, runtimeClient runtimeclient.Client) Generator {
    60  	return &generator{
    61  		Client:        client,
    62  		Tracker:       tracker,
    63  		RuntimeClient: runtimeClient,
    64  		patchEngine:   patches.NewEngine(runtimeClient),
    65  	}
    66  }
    67  
    68  // generator is a generator to generate desired state.
    69  // It is used in the cluster topology controller, but it can also be used for testing.
    70  type generator struct {
    71  	Client client.Client
    72  
    73  	Tracker *remote.ClusterCacheTracker
    74  
    75  	RuntimeClient runtimeclient.Client
    76  
    77  	// patchEngine is used to apply patches during computeDesiredState.
    78  	patchEngine patches.Engine
    79  }
    80  
    81  // Generate computes the desired state of the cluster topology.
    82  // NOTE: We are assuming all the required objects are provided as input; also, in case of any error,
    83  // the entire compute operation will fail. This might be improved in the future if support for reconciling
    84  // subset of a topology will be implemented.
    85  func (g *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) {
    86  	var err error
    87  	desiredState := &scope.ClusterState{
    88  		ControlPlane: &scope.ControlPlaneState{},
    89  	}
    90  
    91  	// Compute the desired state of the InfrastructureCluster object.
    92  	if desiredState.InfrastructureCluster, err = computeInfrastructureCluster(ctx, s); err != nil {
    93  		return nil, errors.Wrapf(err, "failed to compute InfrastructureCluster")
    94  	}
    95  
    96  	// If the clusterClass mandates the controlPlane has infrastructureMachines, compute the InfrastructureMachineTemplate for the ControlPlane.
    97  	if s.Blueprint.HasControlPlaneInfrastructureMachine() {
    98  		if desiredState.ControlPlane.InfrastructureMachineTemplate, err = computeControlPlaneInfrastructureMachineTemplate(ctx, s); err != nil {
    99  			return nil, errors.Wrapf(err, "failed to compute ControlPlane InfrastructureMachineTemplate")
   100  		}
   101  	}
   102  
   103  	// Mark all the MachineDeployments that are currently upgrading.
   104  	// This captured information is used for:
   105  	// - Building the TopologyReconciled condition.
   106  	// - Make upgrade decisions on the control plane.
   107  	// - Making upgrade decisions on machine deployments.
   108  	mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, g.Client)
   109  	if err != nil {
   110  		return nil, errors.Wrap(err, "failed to check if any MachineDeployment is upgrading")
   111  	}
   112  	s.UpgradeTracker.MachineDeployments.MarkUpgrading(mdUpgradingNames...)
   113  
   114  	// Mark all the MachinePools that are currently upgrading.
   115  	// This captured information is used for:
   116  	// - Building the TopologyReconciled condition.
   117  	// - Make upgrade decisions on the control plane.
   118  	// - Making upgrade decisions on machine pools.
   119  	if len(s.Current.MachinePools) > 0 {
   120  		client, err := g.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster))
   121  		if err != nil {
   122  			return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
   123  		}
   124  		// Mark all the MachinePools that are currently upgrading.
   125  		mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, client)
   126  		if err != nil {
   127  			return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
   128  		}
   129  		s.UpgradeTracker.MachinePools.MarkUpgrading(mpUpgradingNames...)
   130  	}
   131  
   132  	// Compute the desired state of the ControlPlane object, eventually adding a reference to the
   133  	// InfrastructureMachineTemplate generated by the previous step.
   134  	if desiredState.ControlPlane.Object, err = g.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil {
   135  		return nil, errors.Wrapf(err, "failed to compute ControlPlane")
   136  	}
   137  
   138  	// Compute the desired state of the ControlPlane MachineHealthCheck if defined.
   139  	// The MachineHealthCheck will have the same name as the ControlPlane Object and a selector for the ControlPlane InfrastructureMachines.
   140  	if s.Blueprint.IsControlPlaneMachineHealthCheckEnabled() {
   141  		desiredState.ControlPlane.MachineHealthCheck = computeMachineHealthCheck(
   142  			ctx,
   143  			desiredState.ControlPlane.Object,
   144  			selectors.ForControlPlaneMHC(),
   145  			s.Current.Cluster,
   146  			s.Blueprint.ControlPlaneMachineHealthCheckClass())
   147  	}
   148  
   149  	// Compute the desired state for the Cluster object adding a reference to the
   150  	// InfrastructureCluster and the ControlPlane objects generated by the previous step.
   151  	desiredState.Cluster, err = computeCluster(ctx, s, desiredState.InfrastructureCluster, desiredState.ControlPlane.Object)
   152  	if err != nil {
   153  		return nil, errors.Wrapf(err, "failed to compute Cluster")
   154  	}
   155  
   156  	// If required, compute the desired state of the MachineDeployments from the list of MachineDeploymentTopologies
   157  	// defined in the cluster.
   158  	if s.Blueprint.HasMachineDeployments() {
   159  		desiredState.MachineDeployments, err = g.computeMachineDeployments(ctx, s)
   160  		if err != nil {
   161  			return nil, errors.Wrapf(err, "failed to compute MachineDeployments")
   162  		}
   163  	}
   164  
   165  	// If required, compute the desired state of the MachinePools from the list of MachinePoolTopologies
   166  	// defined in the cluster.
   167  	if s.Blueprint.HasMachinePools() {
   168  		desiredState.MachinePools, err = g.computeMachinePools(ctx, s)
   169  		if err != nil {
   170  			return nil, errors.Wrapf(err, "failed to compute MachinePools")
   171  		}
   172  	}
   173  
   174  	// Apply patches the desired state according to the patches from the ClusterClass, variables from the Cluster
   175  	// and builtin variables.
   176  	// NOTE: We have to make sure all spec fields that were explicitly set in desired objects during the computation above
   177  	// are preserved during patching. When desired objects are computed their spec is copied from a template, in some cases
   178  	// further modifications to the spec are made afterwards. In those cases we have to make sure those fields are not overwritten
   179  	// in apply patches. Some examples are .spec.machineTemplate and .spec.version in control planes.
   180  	if err := g.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil {
   181  		return nil, errors.Wrap(err, "failed to apply patches")
   182  	}
   183  
   184  	return desiredState, nil
   185  }
   186  
   187  // computeInfrastructureCluster computes the desired state for the InfrastructureCluster object starting from the
   188  // corresponding template defined in the blueprint.
   189  func computeInfrastructureCluster(_ context.Context, s *scope.Scope) (*unstructured.Unstructured, error) {
   190  	template := s.Blueprint.InfrastructureClusterTemplate
   191  	templateClonedFromRef := s.Blueprint.ClusterClass.Spec.Infrastructure.Ref
   192  	cluster := s.Current.Cluster
   193  	currentRef := cluster.Spec.InfrastructureRef
   194  
   195  	infrastructureCluster, err := templateToObject(templateToInput{
   196  		template:              template,
   197  		templateClonedFromRef: templateClonedFromRef,
   198  		cluster:               cluster,
   199  		nameGenerator:         topologynames.SimpleNameGenerator(fmt.Sprintf("%s-", cluster.Name)),
   200  		currentObjectRef:      currentRef,
   201  		// Note: It is not possible to add an ownerRef to Cluster at this stage, otherwise the provisioning
   202  		// of the infrastructure cluster starts no matter of the object being actually referenced by the Cluster itself.
   203  	})
   204  	if err != nil {
   205  		return nil, errors.Wrapf(err, "failed to generate the InfrastructureCluster object from the %s", template.GetKind())
   206  	}
   207  
   208  	// Carry over shim owner reference if any.
   209  	// NOTE: this prevents to the ownerRef to be deleted by server side apply.
   210  	if s.Current.InfrastructureCluster != nil {
   211  		shim := clustershim.New(s.Current.Cluster)
   212  		if ref := getOwnerReferenceFrom(s.Current.InfrastructureCluster, shim); ref != nil {
   213  			infrastructureCluster.SetOwnerReferences([]metav1.OwnerReference{*ref})
   214  		}
   215  	}
   216  
   217  	return infrastructureCluster, nil
   218  }
   219  
   220  // computeControlPlaneInfrastructureMachineTemplate computes the desired state for InfrastructureMachineTemplate
   221  // that should be referenced by the ControlPlane object.
   222  func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scope.Scope) (*unstructured.Unstructured, error) {
   223  	template := s.Blueprint.ControlPlane.InfrastructureMachineTemplate
   224  	templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref
   225  	cluster := s.Current.Cluster
   226  
   227  	// Check if the current control plane object has a machineTemplate.infrastructureRef already defined.
   228  	// TODO: Move the next few lines into a method on scope.ControlPlaneState
   229  	var currentRef *corev1.ObjectReference
   230  	if s.Current.ControlPlane != nil && s.Current.ControlPlane.Object != nil {
   231  		var err error
   232  		if currentRef, err = contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(s.Current.ControlPlane.Object); err != nil {
   233  			return nil, errors.Wrap(err, "failed to get spec.machineTemplate.infrastructureRef for the current ControlPlane object")
   234  		}
   235  	}
   236  
   237  	return templateToTemplate(templateToInput{
   238  		template:              template,
   239  		templateClonedFromRef: templateClonedFromRef,
   240  		cluster:               cluster,
   241  		nameGenerator:         topologynames.SimpleNameGenerator(topologynames.ControlPlaneInfrastructureMachineTemplateNamePrefix(cluster.Name)),
   242  		currentObjectRef:      currentRef,
   243  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
   244  		// in case of errors in between creating this template and updating the Cluster object
   245  		// with the reference to the ControlPlane object using this template.
   246  		ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")),
   247  	})
   248  }
   249  
   250  // computeControlPlane computes the desired state for the ControlPlane object starting from the
   251  // corresponding template defined in the blueprint.
   252  func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) {
   253  	template := s.Blueprint.ControlPlane.Template
   254  	templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.Ref
   255  	cluster := s.Current.Cluster
   256  	currentRef := cluster.Spec.ControlPlaneRef
   257  
   258  	// Compute the labels and annotations to be applied to ControlPlane metadata and ControlPlane machines.
   259  	// We merge the labels and annotations from topology and ClusterClass.
   260  	// We also add the cluster-name and the topology owned labels, so they are propagated down.
   261  	topologyMetadata := s.Blueprint.Topology.ControlPlane.Metadata
   262  	clusterClassMetadata := s.Blueprint.ClusterClass.Spec.ControlPlane.Metadata
   263  
   264  	controlPlaneLabels := util.MergeMap(topologyMetadata.Labels, clusterClassMetadata.Labels)
   265  	if controlPlaneLabels == nil {
   266  		controlPlaneLabels = map[string]string{}
   267  	}
   268  	controlPlaneLabels[clusterv1.ClusterNameLabel] = cluster.Name
   269  	controlPlaneLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
   270  
   271  	controlPlaneAnnotations := util.MergeMap(topologyMetadata.Annotations, clusterClassMetadata.Annotations)
   272  
   273  	nameTemplate := "{{ .cluster.name }}-{{ .random }}"
   274  	if s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy != nil && s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template != nil {
   275  		nameTemplate = *s.Blueprint.ClusterClass.Spec.ControlPlane.NamingStrategy.Template
   276  	}
   277  
   278  	controlPlane, err := templateToObject(templateToInput{
   279  		template:              template,
   280  		templateClonedFromRef: templateClonedFromRef,
   281  		cluster:               cluster,
   282  		nameGenerator:         topologynames.ControlPlaneNameGenerator(nameTemplate, cluster.Name),
   283  		currentObjectRef:      currentRef,
   284  		labels:                controlPlaneLabels,
   285  		annotations:           controlPlaneAnnotations,
   286  		// Note: It is not possible to add an ownerRef to Cluster at this stage, otherwise the provisioning
   287  		// of the ControlPlane starts no matter of the object being actually referenced by the Cluster itself.
   288  	})
   289  	if err != nil {
   290  		return nil, errors.Wrapf(err, "failed to generate the ControlPlane object from the %s", template.GetKind())
   291  	}
   292  
   293  	// Carry over shim owner reference if any.
   294  	// NOTE: this prevents to the ownerRef to be deleted by server side apply.
   295  	if s.Current.ControlPlane != nil && s.Current.ControlPlane.Object != nil {
   296  		shim := clustershim.New(s.Current.Cluster)
   297  		if ref := getOwnerReferenceFrom(s.Current.ControlPlane.Object, shim); ref != nil {
   298  			controlPlane.SetOwnerReferences([]metav1.OwnerReference{*ref})
   299  		}
   300  	}
   301  
   302  	// If the ClusterClass mandates the controlPlane has infrastructureMachines, add a reference to InfrastructureMachine
   303  	// template and metadata to be used for the control plane machines.
   304  	if s.Blueprint.HasControlPlaneInfrastructureMachine() {
   305  		// We have to copy the template to avoid modifying the one from desired state.
   306  		refCopy := infrastructureMachineTemplate.DeepCopy()
   307  
   308  		// If the ControlPlane already exists, avoid downgrading the version if it was bumped
   309  		// by the control plane controller in the meantime.
   310  		if s.Current.ControlPlane.Object != nil {
   311  			currentRef, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(s.Current.ControlPlane.Object)
   312  			if err != nil {
   313  				return nil, errors.Wrapf(err, "failed get spec.machineTemplate.infrastructureRef from the ControlPlane object")
   314  			}
   315  			desiredRef, err := calculateRefDesiredAPIVersion(currentRef, refCopy)
   316  			if err != nil {
   317  				return nil, errors.Wrap(err, "failed to calculate desired spec.machineTemplate.infrastructureRef")
   318  			}
   319  			refCopy.SetAPIVersion(desiredRef.APIVersion)
   320  		}
   321  		if err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Set(controlPlane, refCopy); err != nil {
   322  			return nil, errors.Wrap(err, "failed to spec.machineTemplate.infrastructureRef in the ControlPlane object")
   323  		}
   324  
   325  		// Add the ControlPlane labels and annotations to the ControlPlane machines as well.
   326  		// Note: We have to ensure the machine template metadata copied from the control plane template is not overwritten.
   327  		controlPlaneMachineTemplateMetadata, err := contract.ControlPlane().MachineTemplate().Metadata().Get(controlPlane)
   328  		if err != nil {
   329  			return nil, errors.Wrap(err, "failed to get spec.machineTemplate.metadata from the ControlPlane object")
   330  		}
   331  
   332  		controlPlaneMachineTemplateMetadata.Labels = util.MergeMap(controlPlaneLabels, controlPlaneMachineTemplateMetadata.Labels)
   333  		controlPlaneMachineTemplateMetadata.Annotations = util.MergeMap(controlPlaneAnnotations, controlPlaneMachineTemplateMetadata.Annotations)
   334  
   335  		if err := contract.ControlPlane().MachineTemplate().Metadata().Set(controlPlane,
   336  			&clusterv1.ObjectMeta{
   337  				Labels:      controlPlaneMachineTemplateMetadata.Labels,
   338  				Annotations: controlPlaneMachineTemplateMetadata.Annotations,
   339  			}); err != nil {
   340  			return nil, errors.Wrap(err, "failed to set spec.machineTemplate.metadata in the ControlPlane object")
   341  		}
   342  	}
   343  
   344  	// If it is required to manage the number of replicas for the control plane, set the corresponding field.
   345  	// NOTE: If the Topology.ControlPlane.replicas value is nil, it is assumed that the control plane controller
   346  	// does not implement support for this field and the ControlPlane object is generated without the number of Replicas.
   347  	if s.Blueprint.Topology.ControlPlane.Replicas != nil {
   348  		if err := contract.ControlPlane().Replicas().Set(controlPlane, int64(*s.Blueprint.Topology.ControlPlane.Replicas)); err != nil {
   349  			return nil, errors.Wrap(err, "failed to set spec.replicas in the ControlPlane object")
   350  		}
   351  	}
   352  
   353  	// If it is required to manage the NodeDrainTimeout for the control plane, set the corresponding field.
   354  	nodeDrainTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDrainTimeout
   355  	if s.Blueprint.Topology.ControlPlane.NodeDrainTimeout != nil {
   356  		nodeDrainTimeout = s.Blueprint.Topology.ControlPlane.NodeDrainTimeout
   357  	}
   358  	if nodeDrainTimeout != nil {
   359  		if err := contract.ControlPlane().MachineTemplate().NodeDrainTimeout().Set(controlPlane, *nodeDrainTimeout); err != nil {
   360  			return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeDrainTimeout in the ControlPlane object")
   361  		}
   362  	}
   363  
   364  	// If it is required to manage the NodeVolumeDetachTimeout for the control plane, set the corresponding field.
   365  	nodeVolumeDetachTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeVolumeDetachTimeout
   366  	if s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeout != nil {
   367  		nodeVolumeDetachTimeout = s.Blueprint.Topology.ControlPlane.NodeVolumeDetachTimeout
   368  	}
   369  	if nodeVolumeDetachTimeout != nil {
   370  		if err := contract.ControlPlane().MachineTemplate().NodeVolumeDetachTimeout().Set(controlPlane, *nodeVolumeDetachTimeout); err != nil {
   371  			return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeVolumeDetachTimeout in the ControlPlane object")
   372  		}
   373  	}
   374  
   375  	// If it is required to manage the NodeDeletionTimeout for the control plane, set the corresponding field.
   376  	nodeDeletionTimeout := s.Blueprint.ClusterClass.Spec.ControlPlane.NodeDeletionTimeout
   377  	if s.Blueprint.Topology.ControlPlane.NodeDeletionTimeout != nil {
   378  		nodeDeletionTimeout = s.Blueprint.Topology.ControlPlane.NodeDeletionTimeout
   379  	}
   380  	if nodeDeletionTimeout != nil {
   381  		if err := contract.ControlPlane().MachineTemplate().NodeDeletionTimeout().Set(controlPlane, *nodeDeletionTimeout); err != nil {
   382  			return nil, errors.Wrap(err, "failed to set spec.machineTemplate.nodeDeletionTimeout in the ControlPlane object")
   383  		}
   384  	}
   385  
   386  	// Sets the desired Kubernetes version for the control plane.
   387  	version, err := g.computeControlPlaneVersion(ctx, s)
   388  	if err != nil {
   389  		return nil, errors.Wrap(err, "failed to compute version of control plane")
   390  	}
   391  	if err := contract.ControlPlane().Version().Set(controlPlane, version); err != nil {
   392  		return nil, errors.Wrap(err, "failed to set spec.version in the ControlPlane object")
   393  	}
   394  
   395  	return controlPlane, nil
   396  }
   397  
   398  // computeControlPlaneVersion calculates the version of the desired control plane.
   399  // The version is calculated using the state of the current machine deployments, the current control plane
   400  // and the version defined in the topology.
   401  func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) {
   402  	log := tlog.LoggerFrom(ctx)
   403  	desiredVersion := s.Blueprint.Topology.Version
   404  	// If we are creating the control plane object (current control plane is nil), use version from topology.
   405  	if s.Current.ControlPlane == nil || s.Current.ControlPlane.Object == nil {
   406  		return desiredVersion, nil
   407  	}
   408  
   409  	// Get the current currentVersion of the control plane.
   410  	currentVersion, err := contract.ControlPlane().Version().Get(s.Current.ControlPlane.Object)
   411  	if err != nil {
   412  		return "", errors.Wrap(err, "failed to get the version from control plane spec")
   413  	}
   414  
   415  	s.UpgradeTracker.ControlPlane.IsPendingUpgrade = true
   416  	if *currentVersion == desiredVersion {
   417  		// Mark that the control plane spec is already at the desired version.
   418  		// This information is used to show the appropriate message for the TopologyReconciled
   419  		// condition.
   420  		s.UpgradeTracker.ControlPlane.IsPendingUpgrade = false
   421  	}
   422  
   423  	// Check if the control plane is being created for the first time.
   424  	cpProvisioning, err := contract.ControlPlane().IsProvisioning(s.Current.ControlPlane.Object)
   425  	if err != nil {
   426  		return "", errors.Wrap(err, "failed to check if the control plane is being provisioned")
   427  	}
   428  	// If the control plane is being provisioned (being craeted for the first time), then do not
   429  	// pick up the desiredVersion yet.
   430  	// Return the current version of the control plane. We will pick up the new version after the
   431  	// control plane is provisioned.
   432  	if cpProvisioning {
   433  		s.UpgradeTracker.ControlPlane.IsProvisioning = true
   434  		return *currentVersion, nil
   435  	}
   436  
   437  	// Check if the current control plane is upgrading
   438  	cpUpgrading, err := contract.ControlPlane().IsUpgrading(s.Current.ControlPlane.Object)
   439  	if err != nil {
   440  		return "", errors.Wrap(err, "failed to check if control plane is upgrading")
   441  	}
   442  	// If the current control plane is upgrading  (still completing a previous upgrade),
   443  	// then do not pick up the desiredVersion yet.
   444  	// Return the current version of the control plane. We will pick up the new version
   445  	// after the control plane is stable.
   446  	if cpUpgrading {
   447  		s.UpgradeTracker.ControlPlane.IsUpgrading = true
   448  		return *currentVersion, nil
   449  	}
   450  
   451  	// Return here if the control plane is already at the desired version
   452  	if !s.UpgradeTracker.ControlPlane.IsPendingUpgrade {
   453  		// At this stage the control plane is not upgrading and is already at the desired version.
   454  		// We can return.
   455  		// Nb. We do not return early in the function if the control plane is already at the desired version so as
   456  		// to know if the control plane is being upgraded. This information
   457  		// is required when updating the TopologyReconciled condition on the cluster.
   458  
   459  		// Call the AfterControlPlaneUpgrade now that the control plane is upgraded.
   460  		if feature.Gates.Enabled(feature.RuntimeSDK) {
   461  			// Call the hook only if we are tracking the intent to do so. If it is not tracked it means we don't need to call the
   462  			// hook because we didn't go through an upgrade or we already called the hook after the upgrade.
   463  			if hooks.IsPending(runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster) {
   464  				// Call all the registered extension for the hook.
   465  				hookRequest := &runtimehooksv1.AfterControlPlaneUpgradeRequest{
   466  					Cluster:           *s.Current.Cluster,
   467  					KubernetesVersion: desiredVersion,
   468  				}
   469  				hookResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{}
   470  				if err := g.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil {
   471  					return "", err
   472  				}
   473  				// Add the response to the tracker so we can later update condition or requeue when required.
   474  				s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneUpgrade, hookResponse)
   475  
   476  				// If the extension responds to hold off on starting Machine deployments upgrades,
   477  				// change the UpgradeTracker accordingly, otherwise the hook call is completed and we
   478  				// can remove this hook from the list of pending-hooks.
   479  				if hookResponse.RetryAfterSeconds != 0 {
   480  					log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade))
   481  				} else {
   482  					if err := hooks.MarkAsDone(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {
   483  						return "", err
   484  					}
   485  				}
   486  			}
   487  		}
   488  
   489  		return *currentVersion, nil
   490  	}
   491  
   492  	// If the control plane supports replicas, check if the control plane is in the middle of a scale operation.
   493  	// If yes, then do not pick up the desiredVersion yet. We will pick up the new version after the control plane is stable.
   494  	if s.Blueprint.Topology.ControlPlane.Replicas != nil {
   495  		cpScaling, err := contract.ControlPlane().IsScaling(s.Current.ControlPlane.Object)
   496  		if err != nil {
   497  			return "", errors.Wrap(err, "failed to check if the control plane is scaling")
   498  		}
   499  		if cpScaling {
   500  			s.UpgradeTracker.ControlPlane.IsScaling = true
   501  			return *currentVersion, nil
   502  		}
   503  	}
   504  
   505  	// If the control plane is not upgrading or scaling, we can assume the control plane is stable.
   506  	// However, we should also check for the MachineDeployments/MachinePools upgrading.
   507  	// If the MachineDeployments/MachinePools are upgrading, then do not pick up the desiredVersion yet.
   508  	// We will pick up the new version after the MachineDeployments/MachinePools finish upgrading.
   509  	if len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0 ||
   510  		len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0 {
   511  		return *currentVersion, nil
   512  	}
   513  
   514  	if feature.Gates.Enabled(feature.RuntimeSDK) {
   515  		// At this point the control plane and the machine deployments are stable and we are almost ready to pick
   516  		// up the desiredVersion. Call the BeforeClusterUpgrade hook before picking up the desired version.
   517  		hookRequest := &runtimehooksv1.BeforeClusterUpgradeRequest{
   518  			Cluster:               *s.Current.Cluster,
   519  			FromKubernetesVersion: *currentVersion,
   520  			ToKubernetesVersion:   desiredVersion,
   521  		}
   522  		hookResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{}
   523  		if err := g.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil {
   524  			return "", err
   525  		}
   526  		// Add the response to the tracker so we can later update condition or requeue when required.
   527  		s.HookResponseTracker.Add(runtimehooksv1.BeforeClusterUpgrade, hookResponse)
   528  		if hookResponse.RetryAfterSeconds != 0 {
   529  			// Cannot pickup the new version right now. Need to try again later.
   530  			log.Infof("Cluster upgrade to version %q is blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.BeforeClusterUpgrade))
   531  			return *currentVersion, nil
   532  		}
   533  
   534  		// We are picking up the new version here.
   535  		// Track the intent of calling the AfterControlPlaneUpgrade and the AfterClusterUpgrade hooks once we are done with the upgrade.
   536  		if err := hooks.MarkAsPending(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil {
   537  			return "", err
   538  		}
   539  	}
   540  
   541  	// Control plane and machine deployments are stable. All the required hook are called.
   542  	// Ready to pick up the topology version.
   543  	s.UpgradeTracker.ControlPlane.IsPendingUpgrade = false
   544  	s.UpgradeTracker.ControlPlane.IsStartingUpgrade = true
   545  	return desiredVersion, nil
   546  }
   547  
   548  // computeCluster computes the desired state for the Cluster object.
   549  // NOTE: Some fields of the Cluster’s fields contribute to defining the Cluster blueprint (e.g. Cluster.Spec.Topology),
   550  // while some other fields should be managed as part of the actual Cluster (e.g. Cluster.Spec.ControlPlaneRef); in this func
   551  // we are concerned only about the latest group of fields.
   552  func computeCluster(_ context.Context, s *scope.Scope, infrastructureCluster, controlPlane *unstructured.Unstructured) (*clusterv1.Cluster, error) {
   553  	cluster := s.Current.Cluster.DeepCopy()
   554  
   555  	// Enforce the topology labels.
   556  	// NOTE: The cluster label is added at creation time so this object could be read by the ClusterTopology
   557  	// controller immediately after creation, even before other controllers are going to add the label (if missing).
   558  	if cluster.Labels == nil {
   559  		cluster.Labels = map[string]string{}
   560  	}
   561  	cluster.Labels[clusterv1.ClusterNameLabel] = cluster.Name
   562  	cluster.Labels[clusterv1.ClusterTopologyOwnedLabel] = ""
   563  
   564  	// Set the references to the infrastructureCluster and controlPlane objects.
   565  	// NOTE: Once set for the first time, the references are not expected to change.
   566  	var err error
   567  	cluster.Spec.InfrastructureRef, err = calculateRefDesiredAPIVersion(cluster.Spec.InfrastructureRef, infrastructureCluster)
   568  	if err != nil {
   569  		return nil, errors.Wrapf(err, "failed to calculate infrastructureRef")
   570  	}
   571  	cluster.Spec.ControlPlaneRef, err = calculateRefDesiredAPIVersion(cluster.Spec.ControlPlaneRef, controlPlane)
   572  	if err != nil {
   573  		return nil, errors.Wrapf(err, "failed to calculate controlPlaneRef")
   574  	}
   575  
   576  	return cluster, nil
   577  }
   578  
   579  // calculateRefDesiredAPIVersion returns the desired ref calculated from desiredReferencedObject
   580  // so it doesn't override the version in apiVersion stored in the currentRef, if any.
   581  // This is required because the apiVersion in the desired ref is aligned to the apiVersion used
   582  // in ClusterClass when reading the current state. If the currentRef is nil or group or kind
   583  // doesn't match, no changes are applied to desired ref.
   584  func calculateRefDesiredAPIVersion(currentRef *corev1.ObjectReference, desiredReferencedObject *unstructured.Unstructured) (*corev1.ObjectReference, error) {
   585  	desiredRef := contract.ObjToRef(desiredReferencedObject)
   586  	// If ref is not set yet, just set a ref to the desired referenced object.
   587  	if currentRef == nil {
   588  		return desiredRef, nil
   589  	}
   590  
   591  	currentGV, err := schema.ParseGroupVersion(currentRef.APIVersion)
   592  	if err != nil {
   593  		return nil, errors.Wrapf(err, "failed to parse apiVersion %q of current ref", currentRef.APIVersion)
   594  	}
   595  	desiredGK := desiredReferencedObject.GroupVersionKind().GroupKind()
   596  
   597  	// Keep the apiVersion of the current ref if the group and kind is already correct.
   598  	// We only want to change the apiVersion to update the group, as it should be possible
   599  	// for other controllers to bump the version if necessary (i.e. if there is a newer
   600  	// version of the CRD compared to the one that the topology controller is working on).
   601  	if currentGV.Group == desiredGK.Group && currentRef.Kind == desiredGK.Kind {
   602  		desiredRef.APIVersion = currentRef.APIVersion
   603  	}
   604  	return desiredRef, nil
   605  }
   606  
   607  // computeMachineDeployments computes the desired state of the list of MachineDeployments.
   608  func (g *generator) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) {
   609  	machineDeploymentsStateMap := make(scope.MachineDeploymentsStateMap)
   610  	for _, mdTopology := range s.Blueprint.Topology.Workers.MachineDeployments {
   611  		desiredMachineDeployment, err := g.computeMachineDeployment(ctx, s, mdTopology)
   612  		if err != nil {
   613  			return nil, errors.Wrapf(err, "failed to compute MachineDepoyment for topology %q", mdTopology.Name)
   614  		}
   615  		machineDeploymentsStateMap[mdTopology.Name] = desiredMachineDeployment
   616  	}
   617  	return machineDeploymentsStateMap, nil
   618  }
   619  
   620  // computeMachineDeployment computes the desired state for a MachineDeploymentTopology.
   621  // The generated machineDeployment object is calculated using the values from the machineDeploymentTopology and
   622  // the machineDeployment class.
   623  func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) {
   624  	desiredMachineDeployment := &scope.MachineDeploymentState{}
   625  
   626  	// Gets the blueprint for the MachineDeployment class.
   627  	className := machineDeploymentTopology.Class
   628  	machineDeploymentBlueprint, ok := s.Blueprint.MachineDeployments[className]
   629  	if !ok {
   630  		return nil, errors.Errorf("MachineDeployment class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
   631  	}
   632  
   633  	var machineDeploymentClass *clusterv1.MachineDeploymentClass
   634  	for _, mdClass := range s.Blueprint.ClusterClass.Spec.Workers.MachineDeployments {
   635  		mdClass := mdClass
   636  		if mdClass.Class == className {
   637  			machineDeploymentClass = &mdClass
   638  			break
   639  		}
   640  	}
   641  	if machineDeploymentClass == nil {
   642  		return nil, errors.Errorf("MachineDeployment class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
   643  	}
   644  
   645  	// Compute the bootstrap template.
   646  	currentMachineDeployment := s.Current.MachineDeployments[machineDeploymentTopology.Name]
   647  	var currentBootstrapTemplateRef *corev1.ObjectReference
   648  	if currentMachineDeployment != nil && currentMachineDeployment.BootstrapTemplate != nil {
   649  		currentBootstrapTemplateRef = currentMachineDeployment.Object.Spec.Template.Spec.Bootstrap.ConfigRef
   650  	}
   651  	var err error
   652  	desiredMachineDeployment.BootstrapTemplate, err = templateToTemplate(templateToInput{
   653  		template:              machineDeploymentBlueprint.BootstrapTemplate,
   654  		templateClonedFromRef: contract.ObjToRef(machineDeploymentBlueprint.BootstrapTemplate),
   655  		cluster:               s.Current.Cluster,
   656  		nameGenerator:         topologynames.SimpleNameGenerator(topologynames.BootstrapTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)),
   657  		currentObjectRef:      currentBootstrapTemplateRef,
   658  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
   659  		// in case of errors in between creating this template and creating/updating the MachineDeployment object
   660  		// with the reference to this template.
   661  		ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")),
   662  	})
   663  	if err != nil {
   664  		return nil, err
   665  	}
   666  
   667  	bootstrapTemplateLabels := desiredMachineDeployment.BootstrapTemplate.GetLabels()
   668  	if bootstrapTemplateLabels == nil {
   669  		bootstrapTemplateLabels = map[string]string{}
   670  	}
   671  	// Add ClusterTopologyMachineDeploymentLabel to the generated Bootstrap template
   672  	bootstrapTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
   673  	desiredMachineDeployment.BootstrapTemplate.SetLabels(bootstrapTemplateLabels)
   674  
   675  	// Compute the Infrastructure template.
   676  	var currentInfraMachineTemplateRef *corev1.ObjectReference
   677  	if currentMachineDeployment != nil && currentMachineDeployment.InfrastructureMachineTemplate != nil {
   678  		currentInfraMachineTemplateRef = &currentMachineDeployment.Object.Spec.Template.Spec.InfrastructureRef
   679  	}
   680  	desiredMachineDeployment.InfrastructureMachineTemplate, err = templateToTemplate(templateToInput{
   681  		template:              machineDeploymentBlueprint.InfrastructureMachineTemplate,
   682  		templateClonedFromRef: contract.ObjToRef(machineDeploymentBlueprint.InfrastructureMachineTemplate),
   683  		cluster:               s.Current.Cluster,
   684  		nameGenerator:         topologynames.SimpleNameGenerator(topologynames.InfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)),
   685  		currentObjectRef:      currentInfraMachineTemplateRef,
   686  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
   687  		// in case of errors in between creating this template and creating/updating the MachineDeployment object
   688  		// with the reference to this template.
   689  		ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")),
   690  	})
   691  	if err != nil {
   692  		return nil, err
   693  	}
   694  
   695  	infraMachineTemplateLabels := desiredMachineDeployment.InfrastructureMachineTemplate.GetLabels()
   696  	if infraMachineTemplateLabels == nil {
   697  		infraMachineTemplateLabels = map[string]string{}
   698  	}
   699  	// Add ClusterTopologyMachineDeploymentLabel to the generated InfrastructureMachine template
   700  	infraMachineTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
   701  	desiredMachineDeployment.InfrastructureMachineTemplate.SetLabels(infraMachineTemplateLabels)
   702  	version := g.computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment)
   703  
   704  	// Compute values that can be set both in the MachineDeploymentClass and in the MachineDeploymentTopology
   705  	minReadySeconds := machineDeploymentClass.MinReadySeconds
   706  	if machineDeploymentTopology.MinReadySeconds != nil {
   707  		minReadySeconds = machineDeploymentTopology.MinReadySeconds
   708  	}
   709  
   710  	strategy := machineDeploymentClass.Strategy
   711  	if machineDeploymentTopology.Strategy != nil {
   712  		strategy = machineDeploymentTopology.Strategy
   713  	}
   714  
   715  	failureDomain := machineDeploymentClass.FailureDomain
   716  	if machineDeploymentTopology.FailureDomain != nil {
   717  		failureDomain = machineDeploymentTopology.FailureDomain
   718  	}
   719  
   720  	nodeDrainTimeout := machineDeploymentClass.NodeDrainTimeout
   721  	if machineDeploymentTopology.NodeDrainTimeout != nil {
   722  		nodeDrainTimeout = machineDeploymentTopology.NodeDrainTimeout
   723  	}
   724  
   725  	nodeVolumeDetachTimeout := machineDeploymentClass.NodeVolumeDetachTimeout
   726  	if machineDeploymentTopology.NodeVolumeDetachTimeout != nil {
   727  		nodeVolumeDetachTimeout = machineDeploymentTopology.NodeVolumeDetachTimeout
   728  	}
   729  
   730  	nodeDeletionTimeout := machineDeploymentClass.NodeDeletionTimeout
   731  	if machineDeploymentTopology.NodeDeletionTimeout != nil {
   732  		nodeDeletionTimeout = machineDeploymentTopology.NodeDeletionTimeout
   733  	}
   734  
   735  	// Compute the MachineDeployment object.
   736  	desiredBootstrapTemplateRef, err := calculateRefDesiredAPIVersion(currentBootstrapTemplateRef, desiredMachineDeployment.BootstrapTemplate)
   737  	if err != nil {
   738  		return nil, errors.Wrap(err, "failed to calculate desired bootstrap template ref")
   739  	}
   740  	desiredInfraMachineTemplateRef, err := calculateRefDesiredAPIVersion(currentInfraMachineTemplateRef, desiredMachineDeployment.InfrastructureMachineTemplate)
   741  	if err != nil {
   742  		return nil, errors.Wrap(err, "failed to calculate desired infrastructure machine template ref")
   743  	}
   744  
   745  	nameTemplate := "{{ .cluster.name }}-{{ .machineDeployment.topologyName }}-{{ .random }}"
   746  	if machineDeploymentClass.NamingStrategy != nil && machineDeploymentClass.NamingStrategy.Template != nil {
   747  		nameTemplate = *machineDeploymentClass.NamingStrategy.Template
   748  	}
   749  
   750  	name, err := topologynames.MachineDeploymentNameGenerator(nameTemplate, s.Current.Cluster.Name, machineDeploymentTopology.Name).GenerateName()
   751  	if err != nil {
   752  		return nil, errors.Wrap(err, "failed to generate name for MachineDeployment")
   753  	}
   754  
   755  	desiredMachineDeploymentObj := &clusterv1.MachineDeployment{
   756  		TypeMeta: metav1.TypeMeta{
   757  			APIVersion: clusterv1.GroupVersion.String(),
   758  			Kind:       "MachineDeployment",
   759  		},
   760  		ObjectMeta: metav1.ObjectMeta{
   761  			Name:      name,
   762  			Namespace: s.Current.Cluster.Namespace,
   763  		},
   764  		Spec: clusterv1.MachineDeploymentSpec{
   765  			ClusterName:     s.Current.Cluster.Name,
   766  			MinReadySeconds: minReadySeconds,
   767  			Strategy:        strategy,
   768  			Template: clusterv1.MachineTemplateSpec{
   769  				Spec: clusterv1.MachineSpec{
   770  					ClusterName:             s.Current.Cluster.Name,
   771  					Version:                 ptr.To(version),
   772  					Bootstrap:               clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef},
   773  					InfrastructureRef:       *desiredInfraMachineTemplateRef,
   774  					FailureDomain:           failureDomain,
   775  					NodeDrainTimeout:        nodeDrainTimeout,
   776  					NodeVolumeDetachTimeout: nodeVolumeDetachTimeout,
   777  					NodeDeletionTimeout:     nodeDeletionTimeout,
   778  				},
   779  			},
   780  		},
   781  	}
   782  
   783  	// If an existing MachineDeployment is present, override the MachineDeployment generate name
   784  	// re-using the existing name (this will help in reconcile).
   785  	if currentMachineDeployment != nil && currentMachineDeployment.Object != nil {
   786  		desiredMachineDeploymentObj.SetName(currentMachineDeployment.Object.Name)
   787  	}
   788  
   789  	// Apply annotations
   790  	machineDeploymentAnnotations := util.MergeMap(machineDeploymentTopology.Metadata.Annotations, machineDeploymentBlueprint.Metadata.Annotations)
   791  	// Ensure the annotations used to control the upgrade sequence are never propagated.
   792  	delete(machineDeploymentAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
   793  	delete(machineDeploymentAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
   794  	desiredMachineDeploymentObj.SetAnnotations(machineDeploymentAnnotations)
   795  	desiredMachineDeploymentObj.Spec.Template.Annotations = machineDeploymentAnnotations
   796  
   797  	// Apply Labels
   798  	// NOTE: On top of all the labels applied to managed objects we are applying the ClusterTopologyMachineDeploymentLabel
   799  	// keeping track of the MachineDeployment name from the Topology; this will be used to identify the object in next reconcile loops.
   800  	machineDeploymentLabels := util.MergeMap(machineDeploymentTopology.Metadata.Labels, machineDeploymentBlueprint.Metadata.Labels)
   801  	if machineDeploymentLabels == nil {
   802  		machineDeploymentLabels = map[string]string{}
   803  	}
   804  	machineDeploymentLabels[clusterv1.ClusterNameLabel] = s.Current.Cluster.Name
   805  	machineDeploymentLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
   806  	machineDeploymentLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
   807  	desiredMachineDeploymentObj.SetLabels(machineDeploymentLabels)
   808  
   809  	// Also set the labels in .spec.template.labels so that they are propagated to
   810  	// MachineSet.labels and MachineSet.spec.template.labels and thus to Machine.labels.
   811  	// Note: the labels in MachineSet are used to properly cleanup templates when the MachineSet is deleted.
   812  	desiredMachineDeploymentObj.Spec.Template.Labels = machineDeploymentLabels
   813  
   814  	// Set the selector with the subset of labels identifying controlled machines.
   815  	// NOTE: this prevents the web hook to add cluster.x-k8s.io/deployment-name label, that is
   816  	// redundant for managed MachineDeployments given that we already have topology.cluster.x-k8s.io/deployment-name.
   817  	desiredMachineDeploymentObj.Spec.Selector.MatchLabels = map[string]string{}
   818  	desiredMachineDeploymentObj.Spec.Selector.MatchLabels[clusterv1.ClusterNameLabel] = s.Current.Cluster.Name
   819  	desiredMachineDeploymentObj.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
   820  	desiredMachineDeploymentObj.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name
   821  
   822  	// Set the desired replicas.
   823  	desiredMachineDeploymentObj.Spec.Replicas = machineDeploymentTopology.Replicas
   824  
   825  	desiredMachineDeployment.Object = desiredMachineDeploymentObj
   826  
   827  	// If the ClusterClass defines a MachineHealthCheck for the MachineDeployment add it to the desired state.
   828  	if s.Blueprint.IsMachineDeploymentMachineHealthCheckEnabled(&machineDeploymentTopology) {
   829  		// Note: The MHC is going to use a selector that provides a minimal set of labels which are common to all MachineSets belonging to the MachineDeployment.
   830  		desiredMachineDeployment.MachineHealthCheck = computeMachineHealthCheck(
   831  			ctx,
   832  			desiredMachineDeploymentObj,
   833  			selectors.ForMachineDeploymentMHC(desiredMachineDeploymentObj),
   834  			s.Current.Cluster,
   835  			s.Blueprint.MachineDeploymentMachineHealthCheckClass(&machineDeploymentTopology))
   836  	}
   837  	return desiredMachineDeployment, nil
   838  }
   839  
   840  // computeMachineDeploymentVersion calculates the version of the desired machine deployment.
   841  // The version is calculated using the state of the current machine deployments,
   842  // the current control plane and the version defined in the topology.
   843  func (g *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string {
   844  	desiredVersion := s.Blueprint.Topology.Version
   845  	// If creating a new machine deployment, mark it as pending if the control plane is not
   846  	// yet stable. Creating a new MD while the control plane is upgrading can lead to unexpected race conditions.
   847  	// Example: join could fail if the load balancers are slow in detecting when CP machines are
   848  	// being deleted.
   849  	if currentMDState == nil || currentMDState.Object == nil {
   850  		if !s.UpgradeTracker.ControlPlane.IsControlPlaneStable() || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) {
   851  			s.UpgradeTracker.MachineDeployments.MarkPendingCreate(machineDeploymentTopology.Name)
   852  		}
   853  		return desiredVersion
   854  	}
   855  
   856  	// Get the current version of the machine deployment.
   857  	currentVersion := *currentMDState.Object.Spec.Template.Spec.Version
   858  
   859  	// Return early if the currentVersion is already equal to the desiredVersion
   860  	// no further checks required.
   861  	if currentVersion == desiredVersion {
   862  		return currentVersion
   863  	}
   864  
   865  	// Return early if the upgrade for the MachineDeployment is deferred.
   866  	if isMachineDeploymentDeferred(s.Blueprint.Topology, machineDeploymentTopology) {
   867  		s.UpgradeTracker.MachineDeployments.MarkDeferredUpgrade(currentMDState.Object.Name)
   868  		s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name)
   869  		return currentVersion
   870  	}
   871  
   872  	// Return early if the AfterControlPlaneUpgrade hook returns a blocking response.
   873  	if s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) {
   874  		s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name)
   875  		return currentVersion
   876  	}
   877  
   878  	// Return early if the upgrade concurrency is reached.
   879  	if s.UpgradeTracker.MachineDeployments.UpgradeConcurrencyReached() {
   880  		s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name)
   881  		return currentVersion
   882  	}
   883  
   884  	// Return early if the Control Plane is not stable. Do not pick up the desiredVersion yet.
   885  	// Return the current version of the machine deployment. We will pick up the new version after the control
   886  	// plane is stable.
   887  	if !s.UpgradeTracker.ControlPlane.IsControlPlaneStable() {
   888  		s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name)
   889  		return currentVersion
   890  	}
   891  
   892  	// Control plane and machine deployments are stable.
   893  	// Ready to pick up the topology version.
   894  	s.UpgradeTracker.MachineDeployments.MarkUpgrading(currentMDState.Object.Name)
   895  	return desiredVersion
   896  }
   897  
   898  // isMachineDeploymentDeferred returns true if the upgrade for the mdTopology is deferred.
   899  // This is the case when either:
   900  //   - the mdTopology has the ClusterTopologyDeferUpgradeAnnotation annotation.
   901  //   - the mdTopology has the ClusterTopologyHoldUpgradeSequenceAnnotation annotation.
   902  //   - another md topology which is before mdTopology in the workers.machineDeployments list has the
   903  //     ClusterTopologyHoldUpgradeSequenceAnnotation annotation.
   904  func isMachineDeploymentDeferred(clusterTopology *clusterv1.Topology, mdTopology clusterv1.MachineDeploymentTopology) bool {
   905  	// If mdTopology has the ClusterTopologyDeferUpgradeAnnotation annotation => md is deferred.
   906  	if _, ok := mdTopology.Metadata.Annotations[clusterv1.ClusterTopologyDeferUpgradeAnnotation]; ok {
   907  		return true
   908  	}
   909  
   910  	// If mdTopology has the ClusterTopologyHoldUpgradeSequenceAnnotation annotation => md is deferred.
   911  	if _, ok := mdTopology.Metadata.Annotations[clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation]; ok {
   912  		return true
   913  	}
   914  
   915  	for _, md := range clusterTopology.Workers.MachineDeployments {
   916  		// If another md topology with the ClusterTopologyHoldUpgradeSequenceAnnotation annotation
   917  		// is found before the mdTopology => md is deferred.
   918  		if _, ok := md.Metadata.Annotations[clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation]; ok {
   919  			return true
   920  		}
   921  
   922  		// If mdTopology is found before a md topology with the ClusterTopologyHoldUpgradeSequenceAnnotation
   923  		// annotation => md is not deferred.
   924  		if md.Name == mdTopology.Name {
   925  			return false
   926  		}
   927  	}
   928  
   929  	// This case should be impossible as mdTopology should have been found in workers.machineDeployments.
   930  	return false
   931  }
   932  
   933  // computeMachinePools computes the desired state of the list of MachinePools.
   934  func (g *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) {
   935  	machinePoolsStateMap := make(scope.MachinePoolsStateMap)
   936  	for _, mpTopology := range s.Blueprint.Topology.Workers.MachinePools {
   937  		desiredMachinePool, err := g.computeMachinePool(ctx, s, mpTopology)
   938  		if err != nil {
   939  			return nil, errors.Wrapf(err, "failed to compute MachinePool for topology %q", mpTopology.Name)
   940  		}
   941  		machinePoolsStateMap[mpTopology.Name] = desiredMachinePool
   942  	}
   943  	return machinePoolsStateMap, nil
   944  }
   945  
   946  // computeMachinePool computes the desired state for a MachinePoolTopology.
   947  // The generated machinePool object is calculated using the values from the machinePoolTopology and
   948  // the machinePool class.
   949  func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) {
   950  	desiredMachinePool := &scope.MachinePoolState{}
   951  
   952  	// Gets the blueprint for the MachinePool class.
   953  	className := machinePoolTopology.Class
   954  	machinePoolBlueprint, ok := s.Blueprint.MachinePools[className]
   955  	if !ok {
   956  		return nil, errors.Errorf("MachinePool class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
   957  	}
   958  
   959  	var machinePoolClass *clusterv1.MachinePoolClass
   960  	for _, mpClass := range s.Blueprint.ClusterClass.Spec.Workers.MachinePools {
   961  		mpClass := mpClass
   962  		if mpClass.Class == className {
   963  			machinePoolClass = &mpClass
   964  			break
   965  		}
   966  	}
   967  	if machinePoolClass == nil {
   968  		return nil, errors.Errorf("MachinePool class %s not found in %s", className, tlog.KObj{Obj: s.Blueprint.ClusterClass})
   969  	}
   970  
   971  	// Compute the bootstrap config.
   972  	currentMachinePool := s.Current.MachinePools[machinePoolTopology.Name]
   973  	var currentBootstrapConfigRef *corev1.ObjectReference
   974  	if currentMachinePool != nil && currentMachinePool.BootstrapObject != nil {
   975  		currentBootstrapConfigRef = currentMachinePool.Object.Spec.Template.Spec.Bootstrap.ConfigRef
   976  	}
   977  	var err error
   978  	desiredMachinePool.BootstrapObject, err = templateToObject(templateToInput{
   979  		template:              machinePoolBlueprint.BootstrapTemplate,
   980  		templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.BootstrapTemplate),
   981  		cluster:               s.Current.Cluster,
   982  		nameGenerator:         topologynames.SimpleNameGenerator(topologynames.BootstrapConfigNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name)),
   983  		currentObjectRef:      currentBootstrapConfigRef,
   984  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
   985  		// in case of errors in between creating this template and creating/updating the MachinePool object
   986  		// with the reference to this template.
   987  		ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")),
   988  	})
   989  	if err != nil {
   990  		return nil, errors.Wrapf(err, "failed to compute bootstrap object for topology %q", machinePoolTopology.Name)
   991  	}
   992  
   993  	bootstrapObjectLabels := desiredMachinePool.BootstrapObject.GetLabels()
   994  	if bootstrapObjectLabels == nil {
   995  		bootstrapObjectLabels = map[string]string{}
   996  	}
   997  	// Add ClusterTopologyMachinePoolLabel to the generated Bootstrap config
   998  	bootstrapObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name
   999  	desiredMachinePool.BootstrapObject.SetLabels(bootstrapObjectLabels)
  1000  
  1001  	// Compute the InfrastructureMachinePool.
  1002  	var currentInfraMachinePoolRef *corev1.ObjectReference
  1003  	if currentMachinePool != nil && currentMachinePool.InfrastructureMachinePoolObject != nil {
  1004  		currentInfraMachinePoolRef = &currentMachinePool.Object.Spec.Template.Spec.InfrastructureRef
  1005  	}
  1006  	desiredMachinePool.InfrastructureMachinePoolObject, err = templateToObject(templateToInput{
  1007  		template:              machinePoolBlueprint.InfrastructureMachinePoolTemplate,
  1008  		templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.InfrastructureMachinePoolTemplate),
  1009  		cluster:               s.Current.Cluster,
  1010  		nameGenerator:         topologynames.SimpleNameGenerator(topologynames.InfrastructureMachinePoolNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name)),
  1011  		currentObjectRef:      currentInfraMachinePoolRef,
  1012  		// Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected
  1013  		// in case of errors in between creating this template and creating/updating the MachinePool object
  1014  		// with the reference to this template.
  1015  		ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")),
  1016  	})
  1017  	if err != nil {
  1018  		return nil, errors.Wrapf(err, "failed to compute infrastructure object for topology %q", machinePoolTopology.Name)
  1019  	}
  1020  
  1021  	infraMachinePoolObjectLabels := desiredMachinePool.InfrastructureMachinePoolObject.GetLabels()
  1022  	if infraMachinePoolObjectLabels == nil {
  1023  		infraMachinePoolObjectLabels = map[string]string{}
  1024  	}
  1025  	// Add ClusterTopologyMachinePoolLabel to the generated InfrastructureMachinePool object
  1026  	infraMachinePoolObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name
  1027  	desiredMachinePool.InfrastructureMachinePoolObject.SetLabels(infraMachinePoolObjectLabels)
  1028  	version := g.computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool)
  1029  
  1030  	// Compute values that can be set both in the MachinePoolClass and in the MachinePoolTopology
  1031  	minReadySeconds := machinePoolClass.MinReadySeconds
  1032  	if machinePoolTopology.MinReadySeconds != nil {
  1033  		minReadySeconds = machinePoolTopology.MinReadySeconds
  1034  	}
  1035  
  1036  	failureDomains := machinePoolClass.FailureDomains
  1037  	if machinePoolTopology.FailureDomains != nil {
  1038  		failureDomains = machinePoolTopology.FailureDomains
  1039  	}
  1040  
  1041  	nodeDrainTimeout := machinePoolClass.NodeDrainTimeout
  1042  	if machinePoolTopology.NodeDrainTimeout != nil {
  1043  		nodeDrainTimeout = machinePoolTopology.NodeDrainTimeout
  1044  	}
  1045  
  1046  	nodeVolumeDetachTimeout := machinePoolClass.NodeVolumeDetachTimeout
  1047  	if machinePoolTopology.NodeVolumeDetachTimeout != nil {
  1048  		nodeVolumeDetachTimeout = machinePoolTopology.NodeVolumeDetachTimeout
  1049  	}
  1050  
  1051  	nodeDeletionTimeout := machinePoolClass.NodeDeletionTimeout
  1052  	if machinePoolTopology.NodeDeletionTimeout != nil {
  1053  		nodeDeletionTimeout = machinePoolTopology.NodeDeletionTimeout
  1054  	}
  1055  
  1056  	// Compute the MachinePool object.
  1057  	desiredBootstrapConfigRef, err := calculateRefDesiredAPIVersion(currentBootstrapConfigRef, desiredMachinePool.BootstrapObject)
  1058  	if err != nil {
  1059  		return nil, errors.Wrap(err, "failed to calculate desired bootstrap config ref")
  1060  	}
  1061  	desiredInfraMachinePoolRef, err := calculateRefDesiredAPIVersion(currentInfraMachinePoolRef, desiredMachinePool.InfrastructureMachinePoolObject)
  1062  	if err != nil {
  1063  		return nil, errors.Wrap(err, "failed to calculate desired infrastructure machine pool ref")
  1064  	}
  1065  
  1066  	nameTemplate := "{{ .cluster.name }}-{{ .machinePool.topologyName }}-{{ .random }}"
  1067  	if machinePoolClass.NamingStrategy != nil && machinePoolClass.NamingStrategy.Template != nil {
  1068  		nameTemplate = *machinePoolClass.NamingStrategy.Template
  1069  	}
  1070  
  1071  	name, err := topologynames.MachinePoolNameGenerator(nameTemplate, s.Current.Cluster.Name, machinePoolTopology.Name).GenerateName()
  1072  	if err != nil {
  1073  		return nil, errors.Wrap(err, "failed to generate name for MachinePool")
  1074  	}
  1075  
  1076  	desiredMachinePoolObj := &expv1.MachinePool{
  1077  		TypeMeta: metav1.TypeMeta{
  1078  			APIVersion: expv1.GroupVersion.String(),
  1079  			Kind:       "MachinePool",
  1080  		},
  1081  		ObjectMeta: metav1.ObjectMeta{
  1082  			Name:      name,
  1083  			Namespace: s.Current.Cluster.Namespace,
  1084  		},
  1085  		Spec: expv1.MachinePoolSpec{
  1086  			ClusterName:     s.Current.Cluster.Name,
  1087  			MinReadySeconds: minReadySeconds,
  1088  			FailureDomains:  failureDomains,
  1089  			Template: clusterv1.MachineTemplateSpec{
  1090  				Spec: clusterv1.MachineSpec{
  1091  					ClusterName:             s.Current.Cluster.Name,
  1092  					Version:                 ptr.To(version),
  1093  					Bootstrap:               clusterv1.Bootstrap{ConfigRef: desiredBootstrapConfigRef},
  1094  					InfrastructureRef:       *desiredInfraMachinePoolRef,
  1095  					NodeDrainTimeout:        nodeDrainTimeout,
  1096  					NodeVolumeDetachTimeout: nodeVolumeDetachTimeout,
  1097  					NodeDeletionTimeout:     nodeDeletionTimeout,
  1098  				},
  1099  			},
  1100  		},
  1101  	}
  1102  
  1103  	// If an existing MachinePool is present, override the MachinePool generate name
  1104  	// re-using the existing name (this will help in reconcile).
  1105  	if currentMachinePool != nil && currentMachinePool.Object != nil {
  1106  		desiredMachinePoolObj.SetName(currentMachinePool.Object.Name)
  1107  	}
  1108  
  1109  	// Apply annotations
  1110  	machinePoolAnnotations := util.MergeMap(machinePoolTopology.Metadata.Annotations, machinePoolBlueprint.Metadata.Annotations)
  1111  	// Ensure the annotations used to control the upgrade sequence are never propagated.
  1112  	delete(machinePoolAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation)
  1113  	delete(machinePoolAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation)
  1114  	desiredMachinePoolObj.SetAnnotations(machinePoolAnnotations)
  1115  	desiredMachinePoolObj.Spec.Template.Annotations = machinePoolAnnotations
  1116  
  1117  	// Apply Labels
  1118  	// NOTE: On top of all the labels applied to managed objects we are applying the ClusterTopologyMachinePoolLabel
  1119  	// keeping track of the MachinePool name from the Topology; this will be used to identify the object in next reconcile loops.
  1120  	machinePoolLabels := util.MergeMap(machinePoolTopology.Metadata.Labels, machinePoolBlueprint.Metadata.Labels)
  1121  	if machinePoolLabels == nil {
  1122  		machinePoolLabels = map[string]string{}
  1123  	}
  1124  	machinePoolLabels[clusterv1.ClusterNameLabel] = s.Current.Cluster.Name
  1125  	machinePoolLabels[clusterv1.ClusterTopologyOwnedLabel] = ""
  1126  	machinePoolLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name
  1127  	desiredMachinePoolObj.SetLabels(machinePoolLabels)
  1128  
  1129  	// Also set the labels in .spec.template.labels so that they are propagated to
  1130  	// MachineSet.labels and MachineSet.spec.template.labels and thus to Machine.labels.
  1131  	// Note: the labels in MachineSet are used to properly cleanup templates when the MachineSet is deleted.
  1132  	desiredMachinePoolObj.Spec.Template.Labels = machinePoolLabels
  1133  
  1134  	// Set the desired replicas.
  1135  	desiredMachinePoolObj.Spec.Replicas = machinePoolTopology.Replicas
  1136  
  1137  	desiredMachinePool.Object = desiredMachinePoolObj
  1138  
  1139  	return desiredMachinePool, nil
  1140  }
  1141  
  1142  // computeMachinePoolVersion calculates the version of the desired machine pool.
  1143  // The version is calculated using the state of the current machine pools,
  1144  // the current control plane and the version defined in the topology.
  1145  func (g *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string {
  1146  	desiredVersion := s.Blueprint.Topology.Version
  1147  	// If creating a new machine pool, mark it as pending if the control plane is not
  1148  	// yet stable. Creating a new MP while the control plane is upgrading can lead to unexpected race conditions.
  1149  	// Example: join could fail if the load balancers are slow in detecting when CP machines are
  1150  	// being deleted.
  1151  	if currentMPState == nil || currentMPState.Object == nil {
  1152  		if !s.UpgradeTracker.ControlPlane.IsControlPlaneStable() || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) {
  1153  			s.UpgradeTracker.MachinePools.MarkPendingCreate(machinePoolTopology.Name)
  1154  		}
  1155  		return desiredVersion
  1156  	}
  1157  
  1158  	// Get the current version of the machine pool.
  1159  	currentVersion := *currentMPState.Object.Spec.Template.Spec.Version
  1160  
  1161  	// Return early if the currentVersion is already equal to the desiredVersion
  1162  	// no further checks required.
  1163  	if currentVersion == desiredVersion {
  1164  		return currentVersion
  1165  	}
  1166  
  1167  	// Return early if the upgrade for the MachinePool is deferred.
  1168  	if isMachinePoolDeferred(s.Blueprint.Topology, machinePoolTopology) {
  1169  		s.UpgradeTracker.MachinePools.MarkDeferredUpgrade(currentMPState.Object.Name)
  1170  		s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name)
  1171  		return currentVersion
  1172  	}
  1173  
  1174  	// Return early if the AfterControlPlaneUpgrade hook returns a blocking response.
  1175  	if s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) {
  1176  		s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name)
  1177  		return currentVersion
  1178  	}
  1179  
  1180  	// Return early if the upgrade concurrency is reached.
  1181  	if s.UpgradeTracker.MachinePools.UpgradeConcurrencyReached() {
  1182  		s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name)
  1183  		return currentVersion
  1184  	}
  1185  
  1186  	// Return early if the Control Plane is not stable. Do not pick up the desiredVersion yet.
  1187  	// Return the current version of the machine pool. We will pick up the new version after the control
  1188  	// plane is stable.
  1189  	if !s.UpgradeTracker.ControlPlane.IsControlPlaneStable() {
  1190  		s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name)
  1191  		return currentVersion
  1192  	}
  1193  
  1194  	// Control plane and machine pools are stable.
  1195  	// Ready to pick up the topology version.
  1196  	s.UpgradeTracker.MachinePools.MarkUpgrading(currentMPState.Object.Name)
  1197  	return desiredVersion
  1198  }
  1199  
  1200  // isMachinePoolDeferred returns true if the upgrade for the mpTopology is deferred.
  1201  // This is the case when either:
  1202  //   - the mpTopology has the ClusterTopologyDeferUpgradeAnnotation annotation.
  1203  //   - the mpTopology has the ClusterTopologyHoldUpgradeSequenceAnnotation annotation.
  1204  //   - another mp topology which is before mpTopology in the workers.machinePools list has the
  1205  //     ClusterTopologyHoldUpgradeSequenceAnnotation annotation.
  1206  func isMachinePoolDeferred(clusterTopology *clusterv1.Topology, mpTopology clusterv1.MachinePoolTopology) bool {
  1207  	// If mpTopology has the ClusterTopologyDeferUpgradeAnnotation annotation => mp is deferred.
  1208  	if _, ok := mpTopology.Metadata.Annotations[clusterv1.ClusterTopologyDeferUpgradeAnnotation]; ok {
  1209  		return true
  1210  	}
  1211  
  1212  	// If mpTopology has the ClusterTopologyHoldUpgradeSequenceAnnotation annotation => mp is deferred.
  1213  	if _, ok := mpTopology.Metadata.Annotations[clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation]; ok {
  1214  		return true
  1215  	}
  1216  
  1217  	for _, mp := range clusterTopology.Workers.MachinePools {
  1218  		// If another mp topology with the ClusterTopologyHoldUpgradeSequenceAnnotation annotation
  1219  		// is found before the mpTopology => mp is deferred.
  1220  		if _, ok := mp.Metadata.Annotations[clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation]; ok {
  1221  			return true
  1222  		}
  1223  
  1224  		// If mpTopology is found before a mp topology with the ClusterTopologyHoldUpgradeSequenceAnnotation
  1225  		// annotation => mp is not deferred.
  1226  		if mp.Name == mpTopology.Name {
  1227  			return false
  1228  		}
  1229  	}
  1230  
  1231  	// This case should be impossible as mpTopology should have been found in workers.machinePools.
  1232  	return false
  1233  }
  1234  
  1235  type templateToInput struct {
  1236  	template              *unstructured.Unstructured
  1237  	templateClonedFromRef *corev1.ObjectReference
  1238  	cluster               *clusterv1.Cluster
  1239  	nameGenerator         topologynames.NameGenerator
  1240  	currentObjectRef      *corev1.ObjectReference
  1241  	labels                map[string]string
  1242  	annotations           map[string]string
  1243  	// OwnerRef is an optional OwnerReference to attach to the cloned object.
  1244  	ownerRef *metav1.OwnerReference
  1245  }
  1246  
  1247  // templateToObject generates an object from a template, taking care
  1248  // of adding required labels (cluster, topology), annotations (clonedFrom)
  1249  // and assigning a meaningful name (or reusing current reference name).
  1250  func templateToObject(in templateToInput) (*unstructured.Unstructured, error) {
  1251  	// NOTE: The cluster label is added at creation time so this object could be read by the ClusterTopology
  1252  	// controller immediately after creation, even before other controllers are going to add the label (if missing).
  1253  	labels := map[string]string{}
  1254  	for k, v := range in.labels {
  1255  		labels[k] = v
  1256  	}
  1257  	labels[clusterv1.ClusterNameLabel] = in.cluster.Name
  1258  	labels[clusterv1.ClusterTopologyOwnedLabel] = ""
  1259  
  1260  	// Generate the object from the template.
  1261  	// NOTE: OwnerRef can't be set at this stage; other controllers are going to add OwnerReferences when
  1262  	// the object is actually created.
  1263  	object, err := external.GenerateTemplate(&external.GenerateTemplateInput{
  1264  		Template:    in.template,
  1265  		TemplateRef: in.templateClonedFromRef,
  1266  		Namespace:   in.cluster.Namespace,
  1267  		Labels:      labels,
  1268  		Annotations: in.annotations,
  1269  		ClusterName: in.cluster.Name,
  1270  		OwnerRef:    in.ownerRef,
  1271  	})
  1272  	if err != nil {
  1273  		return nil, err
  1274  	}
  1275  
  1276  	// Ensure the generated objects have a meaningful name.
  1277  	// NOTE: In case there is already a ref to this object in the Cluster, re-use the same name
  1278  	// in order to simplify comparison at later stages of the reconcile process.
  1279  	name, err := in.nameGenerator.GenerateName()
  1280  	if err != nil {
  1281  		return nil, errors.Wrapf(err, "failed to generate name for %s", object.GetKind())
  1282  	}
  1283  	object.SetName(name)
  1284  	if in.currentObjectRef != nil && in.currentObjectRef.Name != "" {
  1285  		object.SetName(in.currentObjectRef.Name)
  1286  	}
  1287  
  1288  	return object, nil
  1289  }
  1290  
  1291  // templateToTemplate generates a template from an existing template, taking care
  1292  // of adding required labels (cluster, topology), annotations (clonedFrom)
  1293  // and assigning a meaningful name (or reusing current reference name).
  1294  // NOTE: We are creating a copy of the ClusterClass template for each cluster so
  1295  // it is possible to add cluster specific information without affecting the original object.
  1296  func templateToTemplate(in templateToInput) (*unstructured.Unstructured, error) {
  1297  	template := &unstructured.Unstructured{}
  1298  	in.template.DeepCopyInto(template)
  1299  
  1300  	// Remove all the info automatically assigned by the API server and not relevant from
  1301  	// the copy of the template.
  1302  	template.SetResourceVersion("")
  1303  	template.SetFinalizers(nil)
  1304  	template.SetUID("")
  1305  	template.SetSelfLink("")
  1306  
  1307  	// Enforce the topology labels into the provided label set.
  1308  	// NOTE: The cluster label is added at creation time so this object could be read by the ClusterTopology
  1309  	// controller immediately after creation, even before other controllers are going to add the label (if missing).
  1310  	labels := template.GetLabels()
  1311  	if labels == nil {
  1312  		labels = map[string]string{}
  1313  	}
  1314  	for k, v := range in.labels {
  1315  		labels[k] = v
  1316  	}
  1317  	labels[clusterv1.ClusterNameLabel] = in.cluster.Name
  1318  	labels[clusterv1.ClusterTopologyOwnedLabel] = ""
  1319  	template.SetLabels(labels)
  1320  
  1321  	// Enforce cloned from annotations and removes the kubectl last-applied-configuration annotation
  1322  	// because we don't want to propagate it to the cloned template objects.
  1323  	annotations := template.GetAnnotations()
  1324  	if annotations == nil {
  1325  		annotations = map[string]string{}
  1326  	}
  1327  	for k, v := range in.annotations {
  1328  		annotations[k] = v
  1329  	}
  1330  	annotations[clusterv1.TemplateClonedFromNameAnnotation] = in.templateClonedFromRef.Name
  1331  	annotations[clusterv1.TemplateClonedFromGroupKindAnnotation] = in.templateClonedFromRef.GroupVersionKind().GroupKind().String()
  1332  	delete(annotations, corev1.LastAppliedConfigAnnotation)
  1333  	template.SetAnnotations(annotations)
  1334  
  1335  	// Set the owner reference.
  1336  	if in.ownerRef != nil {
  1337  		template.SetOwnerReferences([]metav1.OwnerReference{*in.ownerRef})
  1338  	}
  1339  
  1340  	// Ensure the generated template gets a meaningful name.
  1341  	// NOTE: In case there is already an object ref to this template, it is required to re-use the same name
  1342  	// in order to simplify comparison at later stages of the reconcile process.
  1343  	name, err := in.nameGenerator.GenerateName()
  1344  	if err != nil {
  1345  		return nil, errors.Wrapf(err, "failed to generate name for %s", template.GetKind())
  1346  	}
  1347  	template.SetName(name)
  1348  	if in.currentObjectRef != nil && in.currentObjectRef.Name != "" {
  1349  		template.SetName(in.currentObjectRef.Name)
  1350  	}
  1351  
  1352  	return template, nil
  1353  }
  1354  
  1355  func computeMachineHealthCheck(ctx context.Context, healthCheckTarget client.Object, selector *metav1.LabelSelector, cluster *clusterv1.Cluster, check *clusterv1.MachineHealthCheckClass) *clusterv1.MachineHealthCheck {
  1356  	// Create a MachineHealthCheck with the spec given in the ClusterClass.
  1357  	mhc := &clusterv1.MachineHealthCheck{
  1358  		TypeMeta: metav1.TypeMeta{
  1359  			APIVersion: clusterv1.GroupVersion.String(),
  1360  			Kind:       "MachineHealthCheck",
  1361  		},
  1362  		ObjectMeta: metav1.ObjectMeta{
  1363  			Name:      healthCheckTarget.GetName(),
  1364  			Namespace: healthCheckTarget.GetNamespace(),
  1365  			Labels: map[string]string{
  1366  				clusterv1.ClusterTopologyOwnedLabel: "",
  1367  			},
  1368  			// Note: we are adding an ownerRef to Cluster so the MHC will be automatically garbage collected
  1369  			// in case deletion is triggered before an object reconcile happens.
  1370  			OwnerReferences: []metav1.OwnerReference{
  1371  				*ownerrefs.OwnerReferenceTo(cluster, clusterv1.GroupVersion.WithKind("Cluster")),
  1372  			},
  1373  		},
  1374  		Spec: clusterv1.MachineHealthCheckSpec{
  1375  			ClusterName:         cluster.Name,
  1376  			Selector:            *selector,
  1377  			UnhealthyConditions: check.UnhealthyConditions,
  1378  			MaxUnhealthy:        check.MaxUnhealthy,
  1379  			UnhealthyRange:      check.UnhealthyRange,
  1380  			NodeStartupTimeout:  check.NodeStartupTimeout,
  1381  			RemediationTemplate: check.RemediationTemplate,
  1382  		},
  1383  	}
  1384  
  1385  	// Default all fields in the MachineHealthCheck using the same function called in the webhook. This ensures the desired
  1386  	// state of the object won't be different from the current state due to webhook Defaulting.
  1387  	if err := (&webhooks.MachineHealthCheck{}).Default(ctx, mhc); err != nil {
  1388  		panic(err)
  1389  	}
  1390  
  1391  	return mhc
  1392  }
  1393  
  1394  func getOwnerReferenceFrom(obj, owner client.Object) *metav1.OwnerReference {
  1395  	for _, o := range obj.GetOwnerReferences() {
  1396  		if o.Kind == owner.GetObjectKind().GroupVersionKind().Kind && o.Name == owner.GetName() {
  1397  			return &o
  1398  		}
  1399  	}
  1400  	return nil
  1401  }