k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/controller/deployment/deployment_controller.go (about)

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  // Package deployment contains all the logic for handling Kubernetes Deployments.
    18  // It implements a set of strategies (rolling, recreate) for deploying an application,
    19  // the means to rollback to previous versions, proportional scaling for mitigating
    20  // risk, cleanup policy, and other useful features of Deployments.
    21  package deployment
    22  
    23  import (
    24  	"context"
    25  	"fmt"
    26  	"reflect"
    27  	"time"
    28  
    29  	apps "k8s.io/api/apps/v1"
    30  	v1 "k8s.io/api/core/v1"
    31  	"k8s.io/apimachinery/pkg/api/errors"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/labels"
    34  	"k8s.io/apimachinery/pkg/types"
    35  	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    36  	"k8s.io/apimachinery/pkg/util/wait"
    37  	appsinformers "k8s.io/client-go/informers/apps/v1"
    38  	coreinformers "k8s.io/client-go/informers/core/v1"
    39  	clientset "k8s.io/client-go/kubernetes"
    40  	"k8s.io/client-go/kubernetes/scheme"
    41  	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
    42  	appslisters "k8s.io/client-go/listers/apps/v1"
    43  	corelisters "k8s.io/client-go/listers/core/v1"
    44  	"k8s.io/client-go/tools/cache"
    45  	"k8s.io/client-go/tools/record"
    46  	"k8s.io/client-go/util/workqueue"
    47  	"k8s.io/klog/v2"
    48  	"k8s.io/kubernetes/pkg/controller"
    49  	"k8s.io/kubernetes/pkg/controller/deployment/util"
    50  )
    51  
    52  const (
    53  	// maxRetries is the number of times a deployment will be retried before it is dropped out of the queue.
    54  	// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
    55  	// a deployment is going to be requeued:
    56  	//
    57  	// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
    58  	maxRetries = 15
    59  )
    60  
    61  // controllerKind contains the schema.GroupVersionKind for this controller type.
    62  var controllerKind = apps.SchemeGroupVersion.WithKind("Deployment")
    63  
    64  // DeploymentController is responsible for synchronizing Deployment objects stored
    65  // in the system with actual running replica sets and pods.
    66  type DeploymentController struct {
    67  	// rsControl is used for adopting/releasing replica sets.
    68  	rsControl controller.RSControlInterface
    69  	client    clientset.Interface
    70  
    71  	eventBroadcaster record.EventBroadcaster
    72  	eventRecorder    record.EventRecorder
    73  
    74  	// To allow injection of syncDeployment for testing.
    75  	syncHandler func(ctx context.Context, dKey string) error
    76  	// used for unit testing
    77  	enqueueDeployment func(deployment *apps.Deployment)
    78  
    79  	// dLister can list/get deployments from the shared informer's store
    80  	dLister appslisters.DeploymentLister
    81  	// rsLister can list/get replica sets from the shared informer's store
    82  	rsLister appslisters.ReplicaSetLister
    83  	// podLister can list/get pods from the shared informer's store
    84  	podLister corelisters.PodLister
    85  
    86  	// dListerSynced returns true if the Deployment store has been synced at least once.
    87  	// Added as a member to the struct to allow injection for testing.
    88  	dListerSynced cache.InformerSynced
    89  	// rsListerSynced returns true if the ReplicaSet store has been synced at least once.
    90  	// Added as a member to the struct to allow injection for testing.
    91  	rsListerSynced cache.InformerSynced
    92  	// podListerSynced returns true if the pod store has been synced at least once.
    93  	// Added as a member to the struct to allow injection for testing.
    94  	podListerSynced cache.InformerSynced
    95  
    96  	// Deployments that need to be synced
    97  	queue workqueue.TypedRateLimitingInterface[string]
    98  }
    99  
   100  // NewDeploymentController creates a new DeploymentController.
   101  func NewDeploymentController(ctx context.Context, dInformer appsinformers.DeploymentInformer, rsInformer appsinformers.ReplicaSetInformer, podInformer coreinformers.PodInformer, client clientset.Interface) (*DeploymentController, error) {
   102  	eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
   103  	logger := klog.FromContext(ctx)
   104  	dc := &DeploymentController{
   105  		client:           client,
   106  		eventBroadcaster: eventBroadcaster,
   107  		eventRecorder:    eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "deployment-controller"}),
   108  		queue: workqueue.NewTypedRateLimitingQueueWithConfig(
   109  			workqueue.DefaultTypedControllerRateLimiter[string](),
   110  			workqueue.TypedRateLimitingQueueConfig[string]{
   111  				Name: "deployment",
   112  			},
   113  		),
   114  	}
   115  	dc.rsControl = controller.RealRSControl{
   116  		KubeClient: client,
   117  		Recorder:   dc.eventRecorder,
   118  	}
   119  
   120  	dInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   121  		AddFunc: func(obj interface{}) {
   122  			dc.addDeployment(logger, obj)
   123  		},
   124  		UpdateFunc: func(oldObj, newObj interface{}) {
   125  			dc.updateDeployment(logger, oldObj, newObj)
   126  		},
   127  		// This will enter the sync loop and no-op, because the deployment has been deleted from the store.
   128  		DeleteFunc: func(obj interface{}) {
   129  			dc.deleteDeployment(logger, obj)
   130  		},
   131  	})
   132  	rsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   133  		AddFunc: func(obj interface{}) {
   134  			dc.addReplicaSet(logger, obj)
   135  		},
   136  		UpdateFunc: func(oldObj, newObj interface{}) {
   137  			dc.updateReplicaSet(logger, oldObj, newObj)
   138  		},
   139  		DeleteFunc: func(obj interface{}) {
   140  			dc.deleteReplicaSet(logger, obj)
   141  		},
   142  	})
   143  	podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   144  		DeleteFunc: func(obj interface{}) {
   145  			dc.deletePod(logger, obj)
   146  		},
   147  	})
   148  
   149  	dc.syncHandler = dc.syncDeployment
   150  	dc.enqueueDeployment = dc.enqueue
   151  
   152  	dc.dLister = dInformer.Lister()
   153  	dc.rsLister = rsInformer.Lister()
   154  	dc.podLister = podInformer.Lister()
   155  	dc.dListerSynced = dInformer.Informer().HasSynced
   156  	dc.rsListerSynced = rsInformer.Informer().HasSynced
   157  	dc.podListerSynced = podInformer.Informer().HasSynced
   158  	return dc, nil
   159  }
   160  
   161  // Run begins watching and syncing.
   162  func (dc *DeploymentController) Run(ctx context.Context, workers int) {
   163  	defer utilruntime.HandleCrash()
   164  
   165  	// Start events processing pipeline.
   166  	dc.eventBroadcaster.StartStructuredLogging(3)
   167  	dc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.client.CoreV1().Events("")})
   168  	defer dc.eventBroadcaster.Shutdown()
   169  
   170  	defer dc.queue.ShutDown()
   171  
   172  	logger := klog.FromContext(ctx)
   173  	logger.Info("Starting controller", "controller", "deployment")
   174  	defer logger.Info("Shutting down controller", "controller", "deployment")
   175  
   176  	if !cache.WaitForNamedCacheSync("deployment", ctx.Done(), dc.dListerSynced, dc.rsListerSynced, dc.podListerSynced) {
   177  		return
   178  	}
   179  
   180  	for i := 0; i < workers; i++ {
   181  		go wait.UntilWithContext(ctx, dc.worker, time.Second)
   182  	}
   183  
   184  	<-ctx.Done()
   185  }
   186  
   187  func (dc *DeploymentController) addDeployment(logger klog.Logger, obj interface{}) {
   188  	d := obj.(*apps.Deployment)
   189  	logger.V(4).Info("Adding deployment", "deployment", klog.KObj(d))
   190  	dc.enqueueDeployment(d)
   191  }
   192  
   193  func (dc *DeploymentController) updateDeployment(logger klog.Logger, old, cur interface{}) {
   194  	oldD := old.(*apps.Deployment)
   195  	curD := cur.(*apps.Deployment)
   196  	logger.V(4).Info("Updating deployment", "deployment", klog.KObj(oldD))
   197  	dc.enqueueDeployment(curD)
   198  }
   199  
   200  func (dc *DeploymentController) deleteDeployment(logger klog.Logger, obj interface{}) {
   201  	d, ok := obj.(*apps.Deployment)
   202  	if !ok {
   203  		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
   204  		if !ok {
   205  			utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj))
   206  			return
   207  		}
   208  		d, ok = tombstone.Obj.(*apps.Deployment)
   209  		if !ok {
   210  			utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Deployment %#v", obj))
   211  			return
   212  		}
   213  	}
   214  	logger.V(4).Info("Deleting deployment", "deployment", klog.KObj(d))
   215  	dc.enqueueDeployment(d)
   216  }
   217  
   218  // addReplicaSet enqueues the deployment that manages a ReplicaSet when the ReplicaSet is created.
   219  func (dc *DeploymentController) addReplicaSet(logger klog.Logger, obj interface{}) {
   220  	rs := obj.(*apps.ReplicaSet)
   221  
   222  	if rs.DeletionTimestamp != nil {
   223  		// On a restart of the controller manager, it's possible for an object to
   224  		// show up in a state that is already pending deletion.
   225  		dc.deleteReplicaSet(logger, rs)
   226  		return
   227  	}
   228  	// If it has a ControllerRef, that's all that matters.
   229  	if controllerRef := metav1.GetControllerOf(rs); controllerRef != nil {
   230  		d := dc.resolveControllerRef(rs.Namespace, controllerRef)
   231  		if d == nil {
   232  			return
   233  		}
   234  		logger.V(4).Info("ReplicaSet added", "replicaSet", klog.KObj(rs))
   235  		dc.enqueueDeployment(d)
   236  		return
   237  	}
   238  
   239  	// Otherwise, it's an orphan. Get a list of all matching Deployments and sync
   240  	// them to see if anyone wants to adopt it.
   241  	ds := dc.getDeploymentsForReplicaSet(logger, rs)
   242  	if len(ds) == 0 {
   243  		return
   244  	}
   245  	logger.V(4).Info("Orphan ReplicaSet added", "replicaSet", klog.KObj(rs))
   246  	for _, d := range ds {
   247  		dc.enqueueDeployment(d)
   248  	}
   249  }
   250  
   251  // getDeploymentsForReplicaSet returns a list of Deployments that potentially
   252  // match a ReplicaSet.
   253  func (dc *DeploymentController) getDeploymentsForReplicaSet(logger klog.Logger, rs *apps.ReplicaSet) []*apps.Deployment {
   254  	deployments, err := util.GetDeploymentsForReplicaSet(dc.dLister, rs)
   255  	if err != nil || len(deployments) == 0 {
   256  		return nil
   257  	}
   258  	// Because all ReplicaSet's belonging to a deployment should have a unique label key,
   259  	// there should never be more than one deployment returned by the above method.
   260  	// If that happens we should probably dynamically repair the situation by ultimately
   261  	// trying to clean up one of the controllers, for now we just return the older one
   262  	if len(deployments) > 1 {
   263  		// ControllerRef will ensure we don't do anything crazy, but more than one
   264  		// item in this list nevertheless constitutes user error.
   265  		logger.V(4).Info("user error! more than one deployment is selecting replica set",
   266  			"replicaSet", klog.KObj(rs), "labels", rs.Labels, "deployment", klog.KObj(deployments[0]))
   267  	}
   268  	return deployments
   269  }
   270  
   271  // updateReplicaSet figures out what deployment(s) manage a ReplicaSet when the ReplicaSet
   272  // is updated and wake them up. If the anything of the ReplicaSets have changed, we need to
   273  // awaken both the old and new deployments. old and cur must be *apps.ReplicaSet
   274  // types.
   275  func (dc *DeploymentController) updateReplicaSet(logger klog.Logger, old, cur interface{}) {
   276  	curRS := cur.(*apps.ReplicaSet)
   277  	oldRS := old.(*apps.ReplicaSet)
   278  	if curRS.ResourceVersion == oldRS.ResourceVersion {
   279  		// Periodic resync will send update events for all known replica sets.
   280  		// Two different versions of the same replica set will always have different RVs.
   281  		return
   282  	}
   283  
   284  	curControllerRef := metav1.GetControllerOf(curRS)
   285  	oldControllerRef := metav1.GetControllerOf(oldRS)
   286  	controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef)
   287  	if controllerRefChanged && oldControllerRef != nil {
   288  		// The ControllerRef was changed. Sync the old controller, if any.
   289  		if d := dc.resolveControllerRef(oldRS.Namespace, oldControllerRef); d != nil {
   290  			dc.enqueueDeployment(d)
   291  		}
   292  	}
   293  	// If it has a ControllerRef, that's all that matters.
   294  	if curControllerRef != nil {
   295  		d := dc.resolveControllerRef(curRS.Namespace, curControllerRef)
   296  		if d == nil {
   297  			return
   298  		}
   299  		logger.V(4).Info("ReplicaSet updated", "replicaSet", klog.KObj(curRS))
   300  		dc.enqueueDeployment(d)
   301  		return
   302  	}
   303  
   304  	// Otherwise, it's an orphan. If anything changed, sync matching controllers
   305  	// to see if anyone wants to adopt it now.
   306  	labelChanged := !reflect.DeepEqual(curRS.Labels, oldRS.Labels)
   307  	if labelChanged || controllerRefChanged {
   308  		ds := dc.getDeploymentsForReplicaSet(logger, curRS)
   309  		if len(ds) == 0 {
   310  			return
   311  		}
   312  		logger.V(4).Info("Orphan ReplicaSet updated", "replicaSet", klog.KObj(curRS))
   313  		for _, d := range ds {
   314  			dc.enqueueDeployment(d)
   315  		}
   316  	}
   317  }
   318  
   319  // deleteReplicaSet enqueues the deployment that manages a ReplicaSet when
   320  // the ReplicaSet is deleted. obj could be an *apps.ReplicaSet, or
   321  // a DeletionFinalStateUnknown marker item.
   322  func (dc *DeploymentController) deleteReplicaSet(logger klog.Logger, obj interface{}) {
   323  	rs, ok := obj.(*apps.ReplicaSet)
   324  
   325  	// When a delete is dropped, the relist will notice a pod in the store not
   326  	// in the list, leading to the insertion of a tombstone object which contains
   327  	// the deleted key/value. Note that this value might be stale. If the ReplicaSet
   328  	// changed labels the new deployment will not be woken up till the periodic resync.
   329  	if !ok {
   330  		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
   331  		if !ok {
   332  			utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj))
   333  			return
   334  		}
   335  		rs, ok = tombstone.Obj.(*apps.ReplicaSet)
   336  		if !ok {
   337  			utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a ReplicaSet %#v", obj))
   338  			return
   339  		}
   340  	}
   341  
   342  	controllerRef := metav1.GetControllerOf(rs)
   343  	if controllerRef == nil {
   344  		// No controller should care about orphans being deleted.
   345  		return
   346  	}
   347  	d := dc.resolveControllerRef(rs.Namespace, controllerRef)
   348  	if d == nil {
   349  		return
   350  	}
   351  	logger.V(4).Info("ReplicaSet deleted", "replicaSet", klog.KObj(rs))
   352  	dc.enqueueDeployment(d)
   353  }
   354  
   355  // deletePod will enqueue a Recreate Deployment once all of its pods have stopped running.
   356  func (dc *DeploymentController) deletePod(logger klog.Logger, obj interface{}) {
   357  	pod, ok := obj.(*v1.Pod)
   358  
   359  	// When a delete is dropped, the relist will notice a pod in the store not
   360  	// in the list, leading to the insertion of a tombstone object which contains
   361  	// the deleted key/value. Note that this value might be stale. If the Pod
   362  	// changed labels the new deployment will not be woken up till the periodic resync.
   363  	if !ok {
   364  		tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
   365  		if !ok {
   366  			utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj))
   367  			return
   368  		}
   369  		pod, ok = tombstone.Obj.(*v1.Pod)
   370  		if !ok {
   371  			utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a pod %#v", obj))
   372  			return
   373  		}
   374  	}
   375  	d := dc.getDeploymentForPod(logger, pod)
   376  	if d == nil {
   377  		return
   378  	}
   379  	logger.V(4).Info("Pod deleted", "pod", klog.KObj(pod))
   380  	if d.Spec.Strategy.Type == apps.RecreateDeploymentStrategyType {
   381  		// Sync if this Deployment now has no more Pods.
   382  		rsList, err := util.ListReplicaSets(d, util.RsListFromClient(dc.client.AppsV1()))
   383  		if err != nil {
   384  			return
   385  		}
   386  		podMap, err := dc.getPodMapForDeployment(d, rsList)
   387  		if err != nil {
   388  			return
   389  		}
   390  		numPods := 0
   391  		for _, podList := range podMap {
   392  			numPods += len(podList)
   393  		}
   394  		if numPods == 0 {
   395  			dc.enqueueDeployment(d)
   396  		}
   397  	}
   398  }
   399  
   400  func (dc *DeploymentController) enqueue(deployment *apps.Deployment) {
   401  	key, err := controller.KeyFunc(deployment)
   402  	if err != nil {
   403  		utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", deployment, err))
   404  		return
   405  	}
   406  
   407  	dc.queue.Add(key)
   408  }
   409  
   410  func (dc *DeploymentController) enqueueRateLimited(deployment *apps.Deployment) {
   411  	key, err := controller.KeyFunc(deployment)
   412  	if err != nil {
   413  		utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", deployment, err))
   414  		return
   415  	}
   416  
   417  	dc.queue.AddRateLimited(key)
   418  }
   419  
   420  // enqueueAfter will enqueue a deployment after the provided amount of time.
   421  func (dc *DeploymentController) enqueueAfter(deployment *apps.Deployment, after time.Duration) {
   422  	key, err := controller.KeyFunc(deployment)
   423  	if err != nil {
   424  		utilruntime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", deployment, err))
   425  		return
   426  	}
   427  
   428  	dc.queue.AddAfter(key, after)
   429  }
   430  
   431  // getDeploymentForPod returns the deployment managing the given Pod.
   432  func (dc *DeploymentController) getDeploymentForPod(logger klog.Logger, pod *v1.Pod) *apps.Deployment {
   433  	// Find the owning replica set
   434  	var rs *apps.ReplicaSet
   435  	var err error
   436  	controllerRef := metav1.GetControllerOf(pod)
   437  	if controllerRef == nil {
   438  		// No controller owns this Pod.
   439  		return nil
   440  	}
   441  	if controllerRef.Kind != apps.SchemeGroupVersion.WithKind("ReplicaSet").Kind {
   442  		// Not a pod owned by a replica set.
   443  		return nil
   444  	}
   445  	rs, err = dc.rsLister.ReplicaSets(pod.Namespace).Get(controllerRef.Name)
   446  	if err != nil || rs.UID != controllerRef.UID {
   447  		logger.V(4).Info("Cannot get replicaset for pod", "ownerReference", controllerRef.Name, "pod", klog.KObj(pod), "err", err)
   448  		return nil
   449  	}
   450  
   451  	// Now find the Deployment that owns that ReplicaSet.
   452  	controllerRef = metav1.GetControllerOf(rs)
   453  	if controllerRef == nil {
   454  		return nil
   455  	}
   456  	return dc.resolveControllerRef(rs.Namespace, controllerRef)
   457  }
   458  
   459  // resolveControllerRef returns the controller referenced by a ControllerRef,
   460  // or nil if the ControllerRef could not be resolved to a matching controller
   461  // of the correct Kind.
   462  func (dc *DeploymentController) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *apps.Deployment {
   463  	// We can't look up by UID, so look up by Name and then verify UID.
   464  	// Don't even try to look up by Name if it's the wrong Kind.
   465  	if controllerRef.Kind != controllerKind.Kind {
   466  		return nil
   467  	}
   468  	d, err := dc.dLister.Deployments(namespace).Get(controllerRef.Name)
   469  	if err != nil {
   470  		return nil
   471  	}
   472  	if d.UID != controllerRef.UID {
   473  		// The controller we found with this Name is not the same one that the
   474  		// ControllerRef points to.
   475  		return nil
   476  	}
   477  	return d
   478  }
   479  
   480  // worker runs a worker thread that just dequeues items, processes them, and marks them done.
   481  // It enforces that the syncHandler is never invoked concurrently with the same key.
   482  func (dc *DeploymentController) worker(ctx context.Context) {
   483  	for dc.processNextWorkItem(ctx) {
   484  	}
   485  }
   486  
   487  func (dc *DeploymentController) processNextWorkItem(ctx context.Context) bool {
   488  	key, quit := dc.queue.Get()
   489  	if quit {
   490  		return false
   491  	}
   492  	defer dc.queue.Done(key)
   493  
   494  	err := dc.syncHandler(ctx, key)
   495  	dc.handleErr(ctx, err, key)
   496  
   497  	return true
   498  }
   499  
   500  func (dc *DeploymentController) handleErr(ctx context.Context, err error, key string) {
   501  	logger := klog.FromContext(ctx)
   502  	if err == nil || errors.HasStatusCause(err, v1.NamespaceTerminatingCause) {
   503  		dc.queue.Forget(key)
   504  		return
   505  	}
   506  	ns, name, keyErr := cache.SplitMetaNamespaceKey(key)
   507  	if keyErr != nil {
   508  		logger.Error(err, "Failed to split meta namespace cache key", "cacheKey", key)
   509  	}
   510  
   511  	if dc.queue.NumRequeues(key) < maxRetries {
   512  		logger.V(2).Info("Error syncing deployment", "deployment", klog.KRef(ns, name), "err", err)
   513  		dc.queue.AddRateLimited(key)
   514  		return
   515  	}
   516  
   517  	utilruntime.HandleError(err)
   518  	logger.V(2).Info("Dropping deployment out of the queue", "deployment", klog.KRef(ns, name), "err", err)
   519  	dc.queue.Forget(key)
   520  }
   521  
   522  // getReplicaSetsForDeployment uses ControllerRefManager to reconcile
   523  // ControllerRef by adopting and orphaning.
   524  // It returns the list of ReplicaSets that this Deployment should manage.
   525  func (dc *DeploymentController) getReplicaSetsForDeployment(ctx context.Context, d *apps.Deployment) ([]*apps.ReplicaSet, error) {
   526  	// List all ReplicaSets to find those we own but that no longer match our
   527  	// selector. They will be orphaned by ClaimReplicaSets().
   528  	rsList, err := dc.rsLister.ReplicaSets(d.Namespace).List(labels.Everything())
   529  	if err != nil {
   530  		return nil, err
   531  	}
   532  	deploymentSelector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
   533  	if err != nil {
   534  		return nil, fmt.Errorf("deployment %s/%s has invalid label selector: %v", d.Namespace, d.Name, err)
   535  	}
   536  	// If any adoptions are attempted, we should first recheck for deletion with
   537  	// an uncached quorum read sometime after listing ReplicaSets (see #42639).
   538  	canAdoptFunc := controller.RecheckDeletionTimestamp(func(ctx context.Context) (metav1.Object, error) {
   539  		fresh, err := dc.client.AppsV1().Deployments(d.Namespace).Get(ctx, d.Name, metav1.GetOptions{})
   540  		if err != nil {
   541  			return nil, err
   542  		}
   543  		if fresh.UID != d.UID {
   544  			return nil, fmt.Errorf("original Deployment %v/%v is gone: got uid %v, wanted %v", d.Namespace, d.Name, fresh.UID, d.UID)
   545  		}
   546  		return fresh, nil
   547  	})
   548  	cm := controller.NewReplicaSetControllerRefManager(dc.rsControl, d, deploymentSelector, controllerKind, canAdoptFunc)
   549  	return cm.ClaimReplicaSets(ctx, rsList)
   550  }
   551  
   552  // getPodMapForDeployment returns the Pods managed by a Deployment.
   553  //
   554  // It returns a map from ReplicaSet UID to a list of Pods controlled by that RS,
   555  // according to the Pod's ControllerRef.
   556  // NOTE: The pod pointers returned by this method point the pod objects in the cache and thus
   557  // shouldn't be modified in any way.
   558  func (dc *DeploymentController) getPodMapForDeployment(d *apps.Deployment, rsList []*apps.ReplicaSet) (map[types.UID][]*v1.Pod, error) {
   559  	// Get all Pods that potentially belong to this Deployment.
   560  	selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector)
   561  	if err != nil {
   562  		return nil, err
   563  	}
   564  	pods, err := dc.podLister.Pods(d.Namespace).List(selector)
   565  	if err != nil {
   566  		return nil, err
   567  	}
   568  	// Group Pods by their controller (if it's in rsList).
   569  	podMap := make(map[types.UID][]*v1.Pod, len(rsList))
   570  	for _, rs := range rsList {
   571  		podMap[rs.UID] = []*v1.Pod{}
   572  	}
   573  	for _, pod := range pods {
   574  		// Do not ignore inactive Pods because Recreate Deployments need to verify that no
   575  		// Pods from older versions are running before spinning up new Pods.
   576  		controllerRef := metav1.GetControllerOf(pod)
   577  		if controllerRef == nil {
   578  			continue
   579  		}
   580  		// Only append if we care about this UID.
   581  		if _, ok := podMap[controllerRef.UID]; ok {
   582  			podMap[controllerRef.UID] = append(podMap[controllerRef.UID], pod)
   583  		}
   584  	}
   585  	return podMap, nil
   586  }
   587  
   588  // syncDeployment will sync the deployment with the given key.
   589  // This function is not meant to be invoked concurrently with the same key.
   590  func (dc *DeploymentController) syncDeployment(ctx context.Context, key string) error {
   591  	logger := klog.FromContext(ctx)
   592  	namespace, name, err := cache.SplitMetaNamespaceKey(key)
   593  	if err != nil {
   594  		logger.Error(err, "Failed to split meta namespace cache key", "cacheKey", key)
   595  		return err
   596  	}
   597  
   598  	startTime := time.Now()
   599  	logger.V(4).Info("Started syncing deployment", "deployment", klog.KRef(namespace, name), "startTime", startTime)
   600  	defer func() {
   601  		logger.V(4).Info("Finished syncing deployment", "deployment", klog.KRef(namespace, name), "duration", time.Since(startTime))
   602  	}()
   603  
   604  	deployment, err := dc.dLister.Deployments(namespace).Get(name)
   605  	if errors.IsNotFound(err) {
   606  		logger.V(2).Info("Deployment has been deleted", "deployment", klog.KRef(namespace, name))
   607  		return nil
   608  	}
   609  	if err != nil {
   610  		return err
   611  	}
   612  
   613  	// Deep-copy otherwise we are mutating our cache.
   614  	// TODO: Deep-copy only when needed.
   615  	d := deployment.DeepCopy()
   616  
   617  	everything := metav1.LabelSelector{}
   618  	if reflect.DeepEqual(d.Spec.Selector, &everything) {
   619  		dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
   620  		if d.Status.ObservedGeneration < d.Generation {
   621  			d.Status.ObservedGeneration = d.Generation
   622  			dc.client.AppsV1().Deployments(d.Namespace).UpdateStatus(ctx, d, metav1.UpdateOptions{})
   623  		}
   624  		return nil
   625  	}
   626  
   627  	// List ReplicaSets owned by this Deployment, while reconciling ControllerRef
   628  	// through adoption/orphaning.
   629  	rsList, err := dc.getReplicaSetsForDeployment(ctx, d)
   630  	if err != nil {
   631  		return err
   632  	}
   633  	// List all Pods owned by this Deployment, grouped by their ReplicaSet.
   634  	// Current uses of the podMap are:
   635  	//
   636  	// * check if a Pod is labeled correctly with the pod-template-hash label.
   637  	// * check that no old Pods are running in the middle of Recreate Deployments.
   638  	podMap, err := dc.getPodMapForDeployment(d, rsList)
   639  	if err != nil {
   640  		return err
   641  	}
   642  
   643  	if d.DeletionTimestamp != nil {
   644  		return dc.syncStatusOnly(ctx, d, rsList)
   645  	}
   646  
   647  	// Update deployment conditions with an Unknown condition when pausing/resuming
   648  	// a deployment. In this way, we can be sure that we won't timeout when a user
   649  	// resumes a Deployment with a set progressDeadlineSeconds.
   650  	if err = dc.checkPausedConditions(ctx, d); err != nil {
   651  		return err
   652  	}
   653  
   654  	if d.Spec.Paused {
   655  		return dc.sync(ctx, d, rsList)
   656  	}
   657  
   658  	// rollback is not re-entrant in case the underlying replica sets are updated with a new
   659  	// revision so we should ensure that we won't proceed to update replica sets until we
   660  	// make sure that the deployment has cleaned up its rollback spec in subsequent enqueues.
   661  	if getRollbackTo(d) != nil {
   662  		return dc.rollback(ctx, d, rsList)
   663  	}
   664  
   665  	scalingEvent, err := dc.isScalingEvent(ctx, d, rsList)
   666  	if err != nil {
   667  		return err
   668  	}
   669  	if scalingEvent {
   670  		return dc.sync(ctx, d, rsList)
   671  	}
   672  
   673  	switch d.Spec.Strategy.Type {
   674  	case apps.RecreateDeploymentStrategyType:
   675  		return dc.rolloutRecreate(ctx, d, rsList, podMap)
   676  	case apps.RollingUpdateDeploymentStrategyType:
   677  		return dc.rolloutRolling(ctx, d, rsList)
   678  	}
   679  	return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type)
   680  }