sigs.k8s.io/cluster-api@v1.7.1/internal/controllers/machinedeployment/machinedeployment_rolling.go (about)

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package machinedeployment
    18  
    19  import (
    20  	"context"
    21  	"sort"
    22  
    23  	"github.com/pkg/errors"
    24  	ctrl "sigs.k8s.io/controller-runtime"
    25  	"sigs.k8s.io/controller-runtime/pkg/client"
    26  
    27  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    28  	"sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil"
    29  )
    30  
    31  // rolloutRolling implements the logic for rolling a new MachineSet.
    32  func (r *Reconciler) rolloutRolling(ctx context.Context, md *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet) error {
    33  	newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(ctx, md, msList, true)
    34  	if err != nil {
    35  		return err
    36  	}
    37  
    38  	// newMS can be nil in case there is already a MachineSet associated with this deployment,
    39  	// but there are only either changes in annotations or MinReadySeconds. Or in other words,
    40  	// this can be nil if there are changes, but no replacement of existing machines is needed.
    41  	if newMS == nil {
    42  		return nil
    43  	}
    44  
    45  	allMSs := append(oldMSs, newMS)
    46  
    47  	// Scale up, if we can.
    48  	if err := r.reconcileNewMachineSet(ctx, allMSs, newMS, md); err != nil {
    49  		return err
    50  	}
    51  
    52  	if err := r.syncDeploymentStatus(allMSs, newMS, md); err != nil {
    53  		return err
    54  	}
    55  
    56  	// Scale down, if we can.
    57  	if err := r.reconcileOldMachineSets(ctx, allMSs, oldMSs, newMS, md); err != nil {
    58  		return err
    59  	}
    60  
    61  	if err := r.syncDeploymentStatus(allMSs, newMS, md); err != nil {
    62  		return err
    63  	}
    64  
    65  	if mdutil.DeploymentComplete(md, &md.Status) {
    66  		if err := r.cleanupDeployment(ctx, oldMSs, md); err != nil {
    67  			return err
    68  		}
    69  	}
    70  
    71  	return nil
    72  }
    73  
    74  func (r *Reconciler) reconcileNewMachineSet(ctx context.Context, allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet, deployment *clusterv1.MachineDeployment) error {
    75  	if deployment.Spec.Replicas == nil {
    76  		return errors.Errorf("spec.replicas for MachineDeployment %v is nil, this is unexpected", client.ObjectKeyFromObject(deployment))
    77  	}
    78  
    79  	if newMS.Spec.Replicas == nil {
    80  		return errors.Errorf("spec.replicas for MachineSet %v is nil, this is unexpected", client.ObjectKeyFromObject(newMS))
    81  	}
    82  
    83  	if *(newMS.Spec.Replicas) == *(deployment.Spec.Replicas) {
    84  		// Scaling not required.
    85  		return nil
    86  	}
    87  
    88  	if *(newMS.Spec.Replicas) > *(deployment.Spec.Replicas) {
    89  		// Scale down.
    90  		return r.scaleMachineSet(ctx, newMS, *(deployment.Spec.Replicas), deployment)
    91  	}
    92  
    93  	newReplicasCount, err := mdutil.NewMSNewReplicas(deployment, allMSs, *newMS.Spec.Replicas)
    94  	if err != nil {
    95  		return err
    96  	}
    97  	return r.scaleMachineSet(ctx, newMS, newReplicasCount, deployment)
    98  }
    99  
   100  func (r *Reconciler) reconcileOldMachineSets(ctx context.Context, allMSs []*clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet, deployment *clusterv1.MachineDeployment) error {
   101  	log := ctrl.LoggerFrom(ctx)
   102  
   103  	if deployment.Spec.Replicas == nil {
   104  		return errors.Errorf("spec.replicas for MachineDeployment %v is nil, this is unexpected",
   105  			client.ObjectKeyFromObject(deployment))
   106  	}
   107  
   108  	if newMS.Spec.Replicas == nil {
   109  		return errors.Errorf("spec.replicas for MachineSet %v is nil, this is unexpected",
   110  			client.ObjectKeyFromObject(newMS))
   111  	}
   112  
   113  	oldMachinesCount := mdutil.GetReplicaCountForMachineSets(oldMSs)
   114  	if oldMachinesCount == 0 {
   115  		// Can't scale down further
   116  		return nil
   117  	}
   118  
   119  	allMachinesCount := mdutil.GetReplicaCountForMachineSets(allMSs)
   120  	log.V(4).Info("New MachineSet has available machines",
   121  		"machineset", client.ObjectKeyFromObject(newMS).String(), "available-replicas", newMS.Status.AvailableReplicas)
   122  	maxUnavailable := mdutil.MaxUnavailable(*deployment)
   123  
   124  	// Check if we can scale down. We can scale down in the following 2 cases:
   125  	// * Some old MachineSets have unhealthy replicas, we could safely scale down those unhealthy replicas since that won't further
   126  	//  increase unavailability.
   127  	// * New MachineSet has scaled up and it's replicas becomes ready, then we can scale down old MachineSets in a further step.
   128  	//
   129  	// maxScaledDown := allMachinesCount - minAvailable - newMachineSetMachinesUnavailable
   130  	// take into account not only maxUnavailable and any surge machines that have been created, but also unavailable machines from
   131  	// the newMS, so that the unavailable machines from the newMS would not make us scale down old MachineSets in a further
   132  	// step(that will increase unavailability).
   133  	//
   134  	// Concrete example:
   135  	//
   136  	// * 10 replicas
   137  	// * 2 maxUnavailable (absolute number, not percent)
   138  	// * 3 maxSurge (absolute number, not percent)
   139  	//
   140  	// case 1:
   141  	// * Deployment is updated, newMS is created with 3 replicas, oldMS is scaled down to 8, and newMS is scaled up to 5.
   142  	// * The new MachineSet machines crashloop and never become available.
   143  	// * allMachinesCount is 13. minAvailable is 8. newMSMachinesUnavailable is 5.
   144  	// * A node fails and causes one of the oldMS machines to become unavailable. However, 13 - 8 - 5 = 0, so the oldMS won't be scaled down.
   145  	// * The user notices the crashloop and does kubectl rollout undo to rollback.
   146  	// * newMSMachinesUnavailable is 1, since we rolled back to the good MachineSet, so maxScaledDown = 13 - 8 - 1 = 4. 4 of the crashlooping machines will be scaled down.
   147  	// * The total number of machines will then be 9 and the newMS can be scaled up to 10.
   148  	//
   149  	// case 2:
   150  	// Same example, but pushing a new machine template instead of rolling back (aka "roll over"):
   151  	// * The new MachineSet created must start with 0 replicas because allMachinesCount is already at 13.
   152  	// * However, newMSMachinesUnavailable would also be 0, so the 2 old MachineSets could be scaled down by 5 (13 - 8 - 0), which would then
   153  	// allow the new MachineSet to be scaled up by 5.
   154  	minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
   155  	newMSUnavailableMachineCount := *(newMS.Spec.Replicas) - newMS.Status.AvailableReplicas
   156  	maxScaledDown := allMachinesCount - minAvailable - newMSUnavailableMachineCount
   157  	if maxScaledDown <= 0 {
   158  		return nil
   159  	}
   160  
   161  	// Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment
   162  	// and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737
   163  	oldMSs, cleanupCount, err := r.cleanupUnhealthyReplicas(ctx, oldMSs, deployment, maxScaledDown)
   164  	if err != nil {
   165  		return err
   166  	}
   167  
   168  	log.V(4).Info("Cleaned up unhealthy replicas from old MachineSets", "count", cleanupCount)
   169  
   170  	// Scale down old MachineSets, need check maxUnavailable to ensure we can scale down
   171  	allMSs = oldMSs
   172  	allMSs = append(allMSs, newMS)
   173  	scaledDownCount, err := r.scaleDownOldMachineSetsForRollingUpdate(ctx, allMSs, oldMSs, deployment)
   174  	if err != nil {
   175  		return err
   176  	}
   177  
   178  	log.V(4).Info("Scaled down old MachineSets of MachineDeployment", "count", scaledDownCount)
   179  	return nil
   180  }
   181  
   182  // cleanupUnhealthyReplicas will scale down old MachineSets with unhealthy replicas, so that all unhealthy replicas will be deleted.
   183  func (r *Reconciler) cleanupUnhealthyReplicas(ctx context.Context, oldMSs []*clusterv1.MachineSet, deployment *clusterv1.MachineDeployment, maxCleanupCount int32) ([]*clusterv1.MachineSet, int32, error) {
   184  	log := ctrl.LoggerFrom(ctx)
   185  
   186  	sort.Sort(mdutil.MachineSetsByCreationTimestamp(oldMSs))
   187  
   188  	// Scale down all old MachineSets with any unhealthy replicas. MachineSet will honour Spec.DeletePolicy
   189  	// for deleting Machines. Machines with a deletion timestamp, with a failure message or without a nodeRef
   190  	// are preferred for all strategies.
   191  	// This results in a best effort to remove machines backing unhealthy nodes.
   192  	totalScaledDown := int32(0)
   193  
   194  	for _, targetMS := range oldMSs {
   195  		if targetMS.Spec.Replicas == nil {
   196  			return nil, 0, errors.Errorf("spec.replicas for MachineSet %v is nil, this is unexpected", client.ObjectKeyFromObject(targetMS))
   197  		}
   198  
   199  		if totalScaledDown >= maxCleanupCount {
   200  			break
   201  		}
   202  
   203  		oldMSReplicas := *(targetMS.Spec.Replicas)
   204  		if oldMSReplicas == 0 {
   205  			// cannot scale down this MachineSet.
   206  			continue
   207  		}
   208  
   209  		oldMSAvailableReplicas := targetMS.Status.AvailableReplicas
   210  		log.V(4).Info("Found available Machines in old MachineSet",
   211  			"count", oldMSAvailableReplicas, "target-machineset", client.ObjectKeyFromObject(targetMS).String())
   212  		if oldMSReplicas == oldMSAvailableReplicas {
   213  			// no unhealthy replicas found, no scaling required.
   214  			continue
   215  		}
   216  
   217  		remainingCleanupCount := maxCleanupCount - totalScaledDown
   218  		unhealthyCount := oldMSReplicas - oldMSAvailableReplicas
   219  		scaledDownCount := min(remainingCleanupCount, unhealthyCount)
   220  		newReplicasCount := oldMSReplicas - scaledDownCount
   221  
   222  		if newReplicasCount > oldMSReplicas {
   223  			return nil, 0, errors.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %v: %d -> %d",
   224  				client.ObjectKeyFromObject(targetMS), oldMSReplicas, newReplicasCount)
   225  		}
   226  
   227  		if err := r.scaleMachineSet(ctx, targetMS, newReplicasCount, deployment); err != nil {
   228  			return nil, totalScaledDown, err
   229  		}
   230  
   231  		totalScaledDown += scaledDownCount
   232  	}
   233  
   234  	return oldMSs, totalScaledDown, nil
   235  }
   236  
   237  // scaleDownOldMachineSetsForRollingUpdate scales down old MachineSets when deployment strategy is "RollingUpdate".
   238  // Need check maxUnavailable to ensure availability.
   239  func (r *Reconciler) scaleDownOldMachineSetsForRollingUpdate(ctx context.Context, allMSs []*clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, deployment *clusterv1.MachineDeployment) (int32, error) {
   240  	log := ctrl.LoggerFrom(ctx)
   241  
   242  	if deployment.Spec.Replicas == nil {
   243  		return 0, errors.Errorf("spec.replicas for MachineDeployment %v is nil, this is unexpected", client.ObjectKeyFromObject(deployment))
   244  	}
   245  
   246  	maxUnavailable := mdutil.MaxUnavailable(*deployment)
   247  	minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
   248  
   249  	// Find the number of available machines.
   250  	availableMachineCount := mdutil.GetAvailableReplicaCountForMachineSets(allMSs)
   251  
   252  	// Check if we can scale down.
   253  	if availableMachineCount <= minAvailable {
   254  		// Cannot scale down.
   255  		return 0, nil
   256  	}
   257  
   258  	log.V(4).Info("Found available machines in deployment, scaling down old MSes", "count", availableMachineCount)
   259  
   260  	sort.Sort(mdutil.MachineSetsByCreationTimestamp(oldMSs))
   261  
   262  	totalScaledDown := int32(0)
   263  	totalScaleDownCount := availableMachineCount - minAvailable
   264  	for _, targetMS := range oldMSs {
   265  		if targetMS.Spec.Replicas == nil {
   266  			return 0, errors.Errorf("spec.replicas for MachineSet %v is nil, this is unexpected", client.ObjectKeyFromObject(targetMS))
   267  		}
   268  
   269  		if totalScaledDown >= totalScaleDownCount {
   270  			// No further scaling required.
   271  			break
   272  		}
   273  
   274  		if *(targetMS.Spec.Replicas) == 0 {
   275  			// cannot scale down this MachineSet.
   276  			continue
   277  		}
   278  
   279  		// Scale down.
   280  		scaleDownCount := min(*(targetMS.Spec.Replicas), totalScaleDownCount-totalScaledDown)
   281  		newReplicasCount := *(targetMS.Spec.Replicas) - scaleDownCount
   282  		if newReplicasCount > *(targetMS.Spec.Replicas) {
   283  			return totalScaledDown, errors.Errorf("when scaling down old MachineSet, got invalid request to scale down %v: %d -> %d",
   284  				client.ObjectKeyFromObject(targetMS), *(targetMS.Spec.Replicas), newReplicasCount)
   285  		}
   286  
   287  		if err := r.scaleMachineSet(ctx, targetMS, newReplicasCount, deployment); err != nil {
   288  			return totalScaledDown, err
   289  		}
   290  
   291  		totalScaledDown += scaleDownCount
   292  	}
   293  
   294  	return totalScaledDown, nil
   295  }