sigs.k8s.io/cluster-api@v1.7.1/internal/controllers/topology/cluster/conditions.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cluster
    18  
    19  import (
    20  	"fmt"
    21  	"strings"
    22  
    23  	"github.com/pkg/errors"
    24  
    25  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    26  	"sigs.k8s.io/cluster-api/exp/topology/scope"
    27  	"sigs.k8s.io/cluster-api/internal/contract"
    28  	"sigs.k8s.io/cluster-api/util/conditions"
    29  )
    30  
    31  func (r *Reconciler) reconcileConditions(s *scope.Scope, cluster *clusterv1.Cluster, reconcileErr error) error {
    32  	return r.reconcileTopologyReconciledCondition(s, cluster, reconcileErr)
    33  }
    34  
    35  // reconcileTopologyReconciledCondition sets the TopologyReconciled condition on the cluster.
    36  // The TopologyReconciled condition is considered true if spec of all the objects associated with the
    37  // cluster are in sync with the topology defined in the cluster.
    38  // The condition is false under the following conditions:
    39  // - An error occurred during the reconcile process of the cluster topology.
    40  // - The ClusterClass has not been successfully reconciled with its current spec.
    41  // - The cluster upgrade has not yet propagated to all the components of the cluster.
    42  //   - For a managed topology cluster the version upgrade is propagated one component at a time.
    43  //     In such a case, since some of the component's spec would be adrift from the topology the
    44  //     topology cannot be considered fully reconciled.
    45  func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluster *clusterv1.Cluster, reconcileErr error) error {
    46  	// Mark TopologyReconciled as false due to cluster deletion.
    47  	if !cluster.ObjectMeta.DeletionTimestamp.IsZero() {
    48  		conditions.Set(
    49  			cluster,
    50  			conditions.FalseCondition(
    51  				clusterv1.TopologyReconciledCondition,
    52  				clusterv1.DeletedReason,
    53  				clusterv1.ConditionSeverityInfo,
    54  				"",
    55  			),
    56  		)
    57  		return nil
    58  	}
    59  
    60  	// If an error occurred during reconciliation set the TopologyReconciled condition to false.
    61  	// Add the error message from the reconcile function to the message of the condition.
    62  	if reconcileErr != nil {
    63  		conditions.Set(
    64  			cluster,
    65  			conditions.FalseCondition(
    66  				clusterv1.TopologyReconciledCondition,
    67  				clusterv1.TopologyReconcileFailedReason,
    68  				clusterv1.ConditionSeverityError,
    69  				// TODO: Add a protection for messages continuously changing leading to Cluster object changes/reconcile.
    70  				reconcileErr.Error(),
    71  			),
    72  		)
    73  		return nil
    74  	}
    75  
    76  	// If the ClusterClass `metadata.Generation` doesn't match the `status.ObservedGeneration` requeue as the ClusterClass
    77  	// is not up to date.
    78  	if s.Blueprint != nil && s.Blueprint.ClusterClass != nil &&
    79  		s.Blueprint.ClusterClass.GetGeneration() != s.Blueprint.ClusterClass.Status.ObservedGeneration {
    80  		conditions.Set(
    81  			cluster,
    82  			conditions.FalseCondition(
    83  				clusterv1.TopologyReconciledCondition,
    84  				clusterv1.TopologyReconciledClusterClassNotReconciledReason,
    85  				clusterv1.ConditionSeverityInfo,
    86  				"ClusterClass not reconciled. If this condition persists please check ClusterClass status. A ClusterClass is reconciled if"+
    87  					".status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused",
    88  			),
    89  		)
    90  		return nil
    91  	}
    92  
    93  	// If any of the lifecycle hooks are blocking any part of the reconciliation then topology
    94  	// is not considered as fully reconciled.
    95  	if s.HookResponseTracker.AggregateRetryAfter() != 0 {
    96  		conditions.Set(
    97  			cluster,
    98  			conditions.FalseCondition(
    99  				clusterv1.TopologyReconciledCondition,
   100  				clusterv1.TopologyReconciledHookBlockingReason,
   101  				clusterv1.ConditionSeverityInfo,
   102  				// TODO: Add a protection for messages continuously changing leading to Cluster object changes/reconcile.
   103  				s.HookResponseTracker.AggregateMessage(),
   104  			),
   105  		)
   106  		return nil
   107  	}
   108  
   109  	// The topology is not considered as fully reconciled if one of the following is true:
   110  	// * either the Control Plane or any of the MachineDeployments/MachinePools are still pending to pick up the new version
   111  	//  (generally happens when upgrading the cluster)
   112  	// * when there are MachineDeployments/MachinePools for which the upgrade has been deferred
   113  	// * when new MachineDeployments/MachinePools are pending to be created
   114  	//  (generally happens when upgrading the cluster)
   115  	if s.UpgradeTracker.ControlPlane.IsPendingUpgrade ||
   116  		s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() ||
   117  		s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade() ||
   118  		s.UpgradeTracker.MachineDeployments.DeferredUpgrade() ||
   119  		s.UpgradeTracker.MachinePools.IsAnyPendingCreate() ||
   120  		s.UpgradeTracker.MachinePools.IsAnyPendingUpgrade() ||
   121  		s.UpgradeTracker.MachinePools.DeferredUpgrade() {
   122  		msgBuilder := &strings.Builder{}
   123  		var reason string
   124  
   125  		// TODO(ykakarap): Evaluate potential improvements to building the condition. Multiple causes can trigger the
   126  		// condition to be false at the same time (Example: ControlPlane.IsPendingUpgrade and MachineDeployments.IsAnyPendingCreate can
   127  		// occur at the same time). Find better wording and `Reason` for the condition so that the condition can be rich
   128  		// with all the relevant information.
   129  		switch {
   130  		case s.UpgradeTracker.ControlPlane.IsPendingUpgrade:
   131  			fmt.Fprintf(msgBuilder, "Control plane rollout and upgrade to version %s on hold.", s.Blueprint.Topology.Version)
   132  			reason = clusterv1.TopologyReconciledControlPlaneUpgradePendingReason
   133  		case s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade():
   134  			fmt.Fprintf(msgBuilder, "MachineDeployment(s) %s rollout and upgrade to version %s on hold.",
   135  				computeNameList(s.UpgradeTracker.MachineDeployments.PendingUpgradeNames()),
   136  				s.Blueprint.Topology.Version,
   137  			)
   138  			reason = clusterv1.TopologyReconciledMachineDeploymentsUpgradePendingReason
   139  		case s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate():
   140  			fmt.Fprintf(msgBuilder, "MachineDeployment(s) for Topologies %s creation on hold.",
   141  				computeNameList(s.UpgradeTracker.MachineDeployments.PendingCreateTopologyNames()),
   142  			)
   143  			reason = clusterv1.TopologyReconciledMachineDeploymentsCreatePendingReason
   144  		case s.UpgradeTracker.MachineDeployments.DeferredUpgrade():
   145  			fmt.Fprintf(msgBuilder, "MachineDeployment(s) %s rollout and upgrade to version %s deferred.",
   146  				computeNameList(s.UpgradeTracker.MachineDeployments.DeferredUpgradeNames()),
   147  				s.Blueprint.Topology.Version,
   148  			)
   149  			reason = clusterv1.TopologyReconciledMachineDeploymentsUpgradeDeferredReason
   150  		case s.UpgradeTracker.MachinePools.IsAnyPendingUpgrade():
   151  			fmt.Fprintf(msgBuilder, "MachinePool(s) %s rollout and upgrade to version %s on hold.",
   152  				computeNameList(s.UpgradeTracker.MachinePools.PendingUpgradeNames()),
   153  				s.Blueprint.Topology.Version,
   154  			)
   155  			reason = clusterv1.TopologyReconciledMachinePoolsUpgradePendingReason
   156  		case s.UpgradeTracker.MachinePools.IsAnyPendingCreate():
   157  			fmt.Fprintf(msgBuilder, "MachinePool(s) for Topologies %s creation on hold.",
   158  				computeNameList(s.UpgradeTracker.MachinePools.PendingCreateTopologyNames()),
   159  			)
   160  			reason = clusterv1.TopologyReconciledMachinePoolsCreatePendingReason
   161  		case s.UpgradeTracker.MachinePools.DeferredUpgrade():
   162  			fmt.Fprintf(msgBuilder, "MachinePool(s) %s rollout and upgrade to version %s deferred.",
   163  				computeNameList(s.UpgradeTracker.MachinePools.DeferredUpgradeNames()),
   164  				s.Blueprint.Topology.Version,
   165  			)
   166  			reason = clusterv1.TopologyReconciledMachinePoolsUpgradeDeferredReason
   167  		}
   168  
   169  		switch {
   170  		case s.UpgradeTracker.ControlPlane.IsProvisioning:
   171  			msgBuilder.WriteString(" Control plane is completing initial provisioning")
   172  
   173  		case s.UpgradeTracker.ControlPlane.IsUpgrading:
   174  			cpVersion, err := contract.ControlPlane().Version().Get(s.Current.ControlPlane.Object)
   175  			if err != nil {
   176  				return errors.Wrap(err, "failed to get control plane spec version")
   177  			}
   178  			fmt.Fprintf(msgBuilder, " Control plane is upgrading to version %s", *cpVersion)
   179  
   180  		case s.UpgradeTracker.ControlPlane.IsScaling:
   181  			msgBuilder.WriteString(" Control plane is reconciling desired replicas")
   182  
   183  		case len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0:
   184  			fmt.Fprintf(msgBuilder, " MachineDeployment(s) %s are upgrading",
   185  				computeNameList(s.UpgradeTracker.MachineDeployments.UpgradingNames()),
   186  			)
   187  
   188  		case len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0:
   189  			fmt.Fprintf(msgBuilder, " MachinePool(s) %s are upgrading",
   190  				computeNameList(s.UpgradeTracker.MachinePools.UpgradingNames()),
   191  			)
   192  		}
   193  
   194  		conditions.Set(
   195  			cluster,
   196  			conditions.FalseCondition(
   197  				clusterv1.TopologyReconciledCondition,
   198  				reason,
   199  				clusterv1.ConditionSeverityInfo,
   200  				msgBuilder.String(),
   201  			),
   202  		)
   203  		return nil
   204  	}
   205  
   206  	// If there are no errors while reconciling and if the topology is not holding out changes
   207  	// we can consider that spec of all the objects is reconciled to match the topology. Set the
   208  	// TopologyReconciled condition to true.
   209  	conditions.Set(
   210  		cluster,
   211  		conditions.TrueCondition(clusterv1.TopologyReconciledCondition),
   212  	)
   213  
   214  	return nil
   215  }
   216  
   217  // computeNameList computes list of names from the given list to be shown in conditions.
   218  // It shortens the list to at most 5 names and adds an ellipsis at the end if the list
   219  // has more than 5 elements.
   220  func computeNameList(list []string) any {
   221  	if len(list) > 5 {
   222  		list = append(list[:5], "...")
   223  	}
   224  
   225  	return strings.Join(list, ", ")
   226  }