k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/controller/endpointslice/endpointslice_controller.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package endpointslice
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"time"
    23  
    24  	"golang.org/x/time/rate"
    25  
    26  	v1 "k8s.io/api/core/v1"
    27  	discovery "k8s.io/api/discovery/v1"
    28  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    29  	"k8s.io/apimachinery/pkg/labels"
    30  	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    31  	"k8s.io/apimachinery/pkg/util/wait"
    32  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    33  	coreinformers "k8s.io/client-go/informers/core/v1"
    34  	discoveryinformers "k8s.io/client-go/informers/discovery/v1"
    35  	clientset "k8s.io/client-go/kubernetes"
    36  	"k8s.io/client-go/kubernetes/scheme"
    37  	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
    38  	corelisters "k8s.io/client-go/listers/core/v1"
    39  	discoverylisters "k8s.io/client-go/listers/discovery/v1"
    40  	"k8s.io/client-go/tools/cache"
    41  	"k8s.io/client-go/tools/record"
    42  	"k8s.io/client-go/util/workqueue"
    43  	endpointslicerec "k8s.io/endpointslice"
    44  	endpointslicemetrics "k8s.io/endpointslice/metrics"
    45  	"k8s.io/endpointslice/topologycache"
    46  	endpointsliceutil "k8s.io/endpointslice/util"
    47  	"k8s.io/klog/v2"
    48  	"k8s.io/kubernetes/pkg/controller"
    49  	endpointslicepkg "k8s.io/kubernetes/pkg/controller/util/endpointslice"
    50  	"k8s.io/kubernetes/pkg/features"
    51  )
    52  
    53  const (
    54  	// maxRetries is the number of times a service will be retried before it is
    55  	// dropped out of the queue. Any sync error, such as a failure to create or
    56  	// update an EndpointSlice could trigger a retry. With the current
    57  	// rate-limiter in use (1s*2^(numRetries-1)) the following numbers represent
    58  	// the sequence of delays between successive queuings of a service.
    59  	//
    60  	// 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s, 512s, 1000s (max)
    61  	maxRetries = 15
    62  
    63  	// endpointSliceChangeMinSyncDelay indicates the minimum delay before
    64  	// queuing a syncService call after an EndpointSlice changes. If
    65  	// endpointUpdatesBatchPeriod is greater than this value, it will be used
    66  	// instead. This helps batch processing of changes to multiple
    67  	// EndpointSlices.
    68  	endpointSliceChangeMinSyncDelay = 1 * time.Second
    69  
    70  	// defaultSyncBackOff is the default backoff period for syncService calls.
    71  	defaultSyncBackOff = 1 * time.Second
    72  	// maxSyncBackOff is the max backoff period for syncService calls.
    73  	maxSyncBackOff = 1000 * time.Second
    74  
    75  	// controllerName is a unique value used with LabelManagedBy to indicated
    76  	// the component managing an EndpointSlice.
    77  	controllerName = "endpointslice-controller.k8s.io"
    78  )
    79  
    80  // NewController creates and initializes a new Controller
    81  func NewController(ctx context.Context, podInformer coreinformers.PodInformer,
    82  	serviceInformer coreinformers.ServiceInformer,
    83  	nodeInformer coreinformers.NodeInformer,
    84  	endpointSliceInformer discoveryinformers.EndpointSliceInformer,
    85  	maxEndpointsPerSlice int32,
    86  	client clientset.Interface,
    87  	endpointUpdatesBatchPeriod time.Duration,
    88  ) *Controller {
    89  	broadcaster := record.NewBroadcaster(record.WithContext(ctx))
    90  	recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"})
    91  
    92  	endpointslicemetrics.RegisterMetrics()
    93  
    94  	c := &Controller{
    95  		client: client,
    96  		// This is similar to the DefaultControllerRateLimiter, just with a
    97  		// significantly higher default backoff (1s vs 5ms). This controller
    98  		// processes events that can require significant EndpointSlice changes,
    99  		// such as an update to a Service or Deployment. A more significant
   100  		// rate limit back off here helps ensure that the Controller does not
   101  		// overwhelm the API Server.
   102  		queue: workqueue.NewTypedRateLimitingQueueWithConfig(
   103  			workqueue.NewTypedMaxOfRateLimiter(
   104  				workqueue.NewTypedItemExponentialFailureRateLimiter[string](defaultSyncBackOff, maxSyncBackOff),
   105  				// 10 qps, 100 bucket size. This is only for retry speed and its
   106  				// only the overall factor (not per item).
   107  				&workqueue.TypedBucketRateLimiter[string]{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
   108  			),
   109  			workqueue.TypedRateLimitingQueueConfig[string]{
   110  				Name: "endpoint_slice",
   111  			},
   112  		),
   113  		workerLoopPeriod: time.Second,
   114  	}
   115  
   116  	serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   117  		AddFunc: c.onServiceUpdate,
   118  		UpdateFunc: func(old, cur interface{}) {
   119  			c.onServiceUpdate(cur)
   120  		},
   121  		DeleteFunc: c.onServiceDelete,
   122  	})
   123  	c.serviceLister = serviceInformer.Lister()
   124  	c.servicesSynced = serviceInformer.Informer().HasSynced
   125  
   126  	podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   127  		AddFunc:    c.addPod,
   128  		UpdateFunc: c.updatePod,
   129  		DeleteFunc: c.deletePod,
   130  	})
   131  	c.podLister = podInformer.Lister()
   132  	c.podsSynced = podInformer.Informer().HasSynced
   133  
   134  	c.nodeLister = nodeInformer.Lister()
   135  	c.nodesSynced = nodeInformer.Informer().HasSynced
   136  
   137  	logger := klog.FromContext(ctx)
   138  	endpointSliceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   139  		AddFunc: c.onEndpointSliceAdd,
   140  		UpdateFunc: func(oldObj, newObj interface{}) {
   141  			c.onEndpointSliceUpdate(logger, oldObj, newObj)
   142  		},
   143  		DeleteFunc: c.onEndpointSliceDelete,
   144  	})
   145  
   146  	c.endpointSliceLister = endpointSliceInformer.Lister()
   147  	c.endpointSlicesSynced = endpointSliceInformer.Informer().HasSynced
   148  	c.endpointSliceTracker = endpointsliceutil.NewEndpointSliceTracker()
   149  
   150  	c.maxEndpointsPerSlice = maxEndpointsPerSlice
   151  
   152  	c.triggerTimeTracker = endpointsliceutil.NewTriggerTimeTracker()
   153  
   154  	c.eventBroadcaster = broadcaster
   155  	c.eventRecorder = recorder
   156  
   157  	c.endpointUpdatesBatchPeriod = endpointUpdatesBatchPeriod
   158  
   159  	if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) {
   160  		nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   161  			AddFunc: func(obj interface{}) {
   162  				c.addNode(logger, obj)
   163  			},
   164  			UpdateFunc: func(oldObj, newObj interface{}) {
   165  				c.updateNode(logger, oldObj, newObj)
   166  			},
   167  			DeleteFunc: func(obj interface{}) {
   168  				c.deleteNode(logger, obj)
   169  			},
   170  		})
   171  
   172  		c.topologyCache = topologycache.NewTopologyCache()
   173  	}
   174  
   175  	c.reconciler = endpointslicerec.NewReconciler(
   176  		c.client,
   177  		c.nodeLister,
   178  		c.maxEndpointsPerSlice,
   179  		c.endpointSliceTracker,
   180  		c.topologyCache,
   181  		c.eventRecorder,
   182  		controllerName,
   183  		endpointslicerec.WithTrafficDistributionEnabled(utilfeature.DefaultFeatureGate.Enabled(features.ServiceTrafficDistribution)),
   184  	)
   185  
   186  	return c
   187  }
   188  
   189  // Controller manages selector-based service endpoint slices
   190  type Controller struct {
   191  	client           clientset.Interface
   192  	eventBroadcaster record.EventBroadcaster
   193  	eventRecorder    record.EventRecorder
   194  
   195  	// serviceLister is able to list/get services and is populated by the
   196  	// shared informer passed to NewController
   197  	serviceLister corelisters.ServiceLister
   198  	// servicesSynced returns true if the service shared informer has been synced at least once.
   199  	// Added as a member to the struct to allow injection for testing.
   200  	servicesSynced cache.InformerSynced
   201  
   202  	// podLister is able to list/get pods and is populated by the
   203  	// shared informer passed to NewController
   204  	podLister corelisters.PodLister
   205  	// podsSynced returns true if the pod shared informer has been synced at least once.
   206  	// Added as a member to the struct to allow injection for testing.
   207  	podsSynced cache.InformerSynced
   208  
   209  	// endpointSliceLister is able to list/get endpoint slices and is populated by the
   210  	// shared informer passed to NewController
   211  	endpointSliceLister discoverylisters.EndpointSliceLister
   212  	// endpointSlicesSynced returns true if the endpoint slice shared informer has been synced at least once.
   213  	// Added as a member to the struct to allow injection for testing.
   214  	endpointSlicesSynced cache.InformerSynced
   215  	// endpointSliceTracker tracks the list of EndpointSlices and associated
   216  	// resource versions expected for each Service. It can help determine if a
   217  	// cached EndpointSlice is out of date.
   218  	endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
   219  
   220  	// nodeLister is able to list/get nodes and is populated by the
   221  	// shared informer passed to NewController
   222  	nodeLister corelisters.NodeLister
   223  	// nodesSynced returns true if the node shared informer has been synced at least once.
   224  	// Added as a member to the struct to allow injection for testing.
   225  	nodesSynced cache.InformerSynced
   226  
   227  	// reconciler is an util used to reconcile EndpointSlice changes.
   228  	reconciler *endpointslicerec.Reconciler
   229  
   230  	// triggerTimeTracker is an util used to compute and export the
   231  	// EndpointsLastChangeTriggerTime annotation.
   232  	triggerTimeTracker *endpointsliceutil.TriggerTimeTracker
   233  
   234  	// Services that need to be updated. A channel is inappropriate here,
   235  	// because it allows services with lots of pods to be serviced much
   236  	// more often than services with few pods; it also would cause a
   237  	// service that's inserted multiple times to be processed more than
   238  	// necessary.
   239  	queue workqueue.TypedRateLimitingInterface[string]
   240  
   241  	// maxEndpointsPerSlice references the maximum number of endpoints that
   242  	// should be added to an EndpointSlice
   243  	maxEndpointsPerSlice int32
   244  
   245  	// workerLoopPeriod is the time between worker runs. The workers
   246  	// process the queue of service and pod changes
   247  	workerLoopPeriod time.Duration
   248  
   249  	// endpointUpdatesBatchPeriod is an artificial delay added to all service syncs triggered by pod changes.
   250  	// This can be used to reduce overall number of all endpoint slice updates.
   251  	endpointUpdatesBatchPeriod time.Duration
   252  
   253  	// topologyCache tracks the distribution of Nodes and endpoints across zones
   254  	// to enable TopologyAwareHints.
   255  	topologyCache *topologycache.TopologyCache
   256  }
   257  
   258  // Run will not return until stopCh is closed.
   259  func (c *Controller) Run(ctx context.Context, workers int) {
   260  	defer utilruntime.HandleCrash()
   261  
   262  	// Start events processing pipeline.
   263  	c.eventBroadcaster.StartLogging(klog.Infof)
   264  	c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
   265  	defer c.eventBroadcaster.Shutdown()
   266  
   267  	defer c.queue.ShutDown()
   268  
   269  	logger := klog.FromContext(ctx)
   270  	logger.Info("Starting endpoint slice controller")
   271  	defer logger.Info("Shutting down endpoint slice controller")
   272  
   273  	if !cache.WaitForNamedCacheSync("endpoint_slice", ctx.Done(), c.podsSynced, c.servicesSynced, c.endpointSlicesSynced, c.nodesSynced) {
   274  		return
   275  	}
   276  
   277  	logger.V(2).Info("Starting worker threads", "total", workers)
   278  	for i := 0; i < workers; i++ {
   279  		go wait.Until(func() { c.worker(logger) }, c.workerLoopPeriod, ctx.Done())
   280  	}
   281  
   282  	<-ctx.Done()
   283  }
   284  
   285  // worker runs a worker thread that just dequeues items, processes them, and
   286  // marks them done. You may run as many of these in parallel as you wish; the
   287  // workqueue guarantees that they will not end up processing the same service
   288  // at the same time
   289  func (c *Controller) worker(logger klog.Logger) {
   290  	for c.processNextWorkItem(logger) {
   291  	}
   292  }
   293  
   294  func (c *Controller) processNextWorkItem(logger klog.Logger) bool {
   295  	cKey, quit := c.queue.Get()
   296  	if quit {
   297  		return false
   298  	}
   299  	defer c.queue.Done(cKey)
   300  
   301  	err := c.syncService(logger, cKey)
   302  	c.handleErr(logger, err, cKey)
   303  
   304  	return true
   305  }
   306  
   307  func (c *Controller) handleErr(logger klog.Logger, err error, key string) {
   308  	trackSync(err)
   309  
   310  	if err == nil {
   311  		c.queue.Forget(key)
   312  		return
   313  	}
   314  
   315  	if c.queue.NumRequeues(key) < maxRetries {
   316  		logger.Info("Error syncing endpoint slices for service, retrying", "key", key, "err", err)
   317  		c.queue.AddRateLimited(key)
   318  		return
   319  	}
   320  
   321  	logger.Info("Retry budget exceeded, dropping service out of the queue", "key", key, "err", err)
   322  	c.queue.Forget(key)
   323  	utilruntime.HandleError(err)
   324  }
   325  
   326  func (c *Controller) syncService(logger klog.Logger, key string) error {
   327  	startTime := time.Now()
   328  	defer func() {
   329  		logger.V(4).Info("Finished syncing service endpoint slices", "key", key, "elapsedTime", time.Since(startTime))
   330  	}()
   331  
   332  	namespace, name, err := cache.SplitMetaNamespaceKey(key)
   333  	if err != nil {
   334  		return err
   335  	}
   336  
   337  	service, err := c.serviceLister.Services(namespace).Get(name)
   338  	if err != nil {
   339  		if !apierrors.IsNotFound(err) {
   340  			return err
   341  		}
   342  
   343  		c.triggerTimeTracker.DeleteService(namespace, name)
   344  		c.reconciler.DeleteService(namespace, name)
   345  		c.endpointSliceTracker.DeleteService(namespace, name)
   346  		// The service has been deleted, return nil so that it won't be retried.
   347  		return nil
   348  	}
   349  
   350  	if service.Spec.Type == v1.ServiceTypeExternalName {
   351  		// services with Type ExternalName receive no endpoints from this controller;
   352  		// Ref: https://issues.k8s.io/105986
   353  		return nil
   354  	}
   355  
   356  	if service.Spec.Selector == nil {
   357  		// services without a selector receive no endpoint slices from this controller;
   358  		// these services will receive endpoint slices that are created out-of-band via the REST API.
   359  		return nil
   360  	}
   361  
   362  	logger.V(5).Info("About to update endpoint slices for service", "key", key)
   363  
   364  	podLabelSelector := labels.Set(service.Spec.Selector).AsSelectorPreValidated()
   365  	pods, err := c.podLister.Pods(service.Namespace).List(podLabelSelector)
   366  	if err != nil {
   367  		// Since we're getting stuff from a local cache, it is basically
   368  		// impossible to get this error.
   369  		c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListPods",
   370  			"Error listing Pods for Service %s/%s: %v", service.Namespace, service.Name, err)
   371  		return err
   372  	}
   373  
   374  	esLabelSelector := labels.Set(map[string]string{
   375  		discovery.LabelServiceName: service.Name,
   376  		discovery.LabelManagedBy:   c.reconciler.GetControllerName(),
   377  	}).AsSelectorPreValidated()
   378  	endpointSlices, err := c.endpointSliceLister.EndpointSlices(service.Namespace).List(esLabelSelector)
   379  
   380  	if err != nil {
   381  		// Since we're getting stuff from a local cache, it is basically
   382  		// impossible to get this error.
   383  		c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListEndpointSlices",
   384  			"Error listing Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
   385  		return err
   386  	}
   387  
   388  	// Drop EndpointSlices that have been marked for deletion to prevent the controller from getting stuck.
   389  	endpointSlices = dropEndpointSlicesPendingDeletion(endpointSlices)
   390  
   391  	if c.endpointSliceTracker.StaleSlices(service, endpointSlices) {
   392  		return endpointslicepkg.NewStaleInformerCache("EndpointSlice informer cache is out of date")
   393  	}
   394  
   395  	// We call ComputeEndpointLastChangeTriggerTime here to make sure that the
   396  	// state of the trigger time tracker gets updated even if the sync turns out
   397  	// to be no-op and we don't update the EndpointSlice objects.
   398  	lastChangeTriggerTime := c.triggerTimeTracker.
   399  		ComputeEndpointLastChangeTriggerTime(namespace, service, pods)
   400  
   401  	err = c.reconciler.Reconcile(logger, service, pods, endpointSlices, lastChangeTriggerTime)
   402  	if err != nil {
   403  		c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToUpdateEndpointSlices",
   404  			"Error updating Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
   405  		return err
   406  	}
   407  
   408  	return nil
   409  }
   410  
   411  // onServiceUpdate updates the Service Selector in the cache and queues the Service for processing.
   412  func (c *Controller) onServiceUpdate(obj interface{}) {
   413  	key, err := controller.KeyFunc(obj)
   414  	if err != nil {
   415  		utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
   416  		return
   417  	}
   418  
   419  	c.queue.Add(key)
   420  }
   421  
   422  // onServiceDelete removes the Service Selector from the cache and queues the Service for processing.
   423  func (c *Controller) onServiceDelete(obj interface{}) {
   424  	key, err := controller.KeyFunc(obj)
   425  	if err != nil {
   426  		utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
   427  		return
   428  	}
   429  
   430  	c.queue.Add(key)
   431  }
   432  
   433  // onEndpointSliceAdd queues a sync for the relevant Service for a sync if the
   434  // EndpointSlice resource version does not match the expected version in the
   435  // endpointSliceTracker.
   436  func (c *Controller) onEndpointSliceAdd(obj interface{}) {
   437  	endpointSlice := obj.(*discovery.EndpointSlice)
   438  	if endpointSlice == nil {
   439  		utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceAdd()"))
   440  		return
   441  	}
   442  	if c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice) {
   443  		c.queueServiceForEndpointSlice(endpointSlice)
   444  	}
   445  }
   446  
   447  // onEndpointSliceUpdate queues a sync for the relevant Service for a sync if
   448  // the EndpointSlice resource version does not match the expected version in the
   449  // endpointSliceTracker or the managed-by value of the EndpointSlice has changed
   450  // from or to this controller.
   451  func (c *Controller) onEndpointSliceUpdate(logger klog.Logger, prevObj, obj interface{}) {
   452  	prevEndpointSlice := prevObj.(*discovery.EndpointSlice)
   453  	endpointSlice := obj.(*discovery.EndpointSlice)
   454  	if endpointSlice == nil || prevEndpointSlice == nil {
   455  		utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceUpdate()"))
   456  		return
   457  	}
   458  	// EndpointSlice generation does not change when labels change. Although the
   459  	// controller will never change LabelServiceName, users might. This check
   460  	// ensures that we handle changes to this label.
   461  	svcName := endpointSlice.Labels[discovery.LabelServiceName]
   462  	prevSvcName := prevEndpointSlice.Labels[discovery.LabelServiceName]
   463  	if svcName != prevSvcName {
   464  		logger.Info("label changed", "label", discovery.LabelServiceName, "oldService", prevSvcName, "newService", svcName, "endpointslice", klog.KObj(endpointSlice))
   465  		c.queueServiceForEndpointSlice(endpointSlice)
   466  		c.queueServiceForEndpointSlice(prevEndpointSlice)
   467  		return
   468  	}
   469  	if c.reconciler.ManagedByChanged(prevEndpointSlice, endpointSlice) || (c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice)) {
   470  		c.queueServiceForEndpointSlice(endpointSlice)
   471  	}
   472  }
   473  
   474  // onEndpointSliceDelete queues a sync for the relevant Service for a sync if the
   475  // EndpointSlice resource version does not match the expected version in the
   476  // endpointSliceTracker.
   477  func (c *Controller) onEndpointSliceDelete(obj interface{}) {
   478  	endpointSlice := getEndpointSliceFromDeleteAction(obj)
   479  	if endpointSlice != nil && c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.Has(endpointSlice) {
   480  		// This returns false if we didn't expect the EndpointSlice to be
   481  		// deleted. If that is the case, we queue the Service for another sync.
   482  		if !c.endpointSliceTracker.HandleDeletion(endpointSlice) {
   483  			c.queueServiceForEndpointSlice(endpointSlice)
   484  		}
   485  	}
   486  }
   487  
   488  // queueServiceForEndpointSlice attempts to queue the corresponding Service for
   489  // the provided EndpointSlice.
   490  func (c *Controller) queueServiceForEndpointSlice(endpointSlice *discovery.EndpointSlice) {
   491  	key, err := endpointslicerec.ServiceControllerKey(endpointSlice)
   492  	if err != nil {
   493  		utilruntime.HandleError(fmt.Errorf("Couldn't get key for EndpointSlice %+v: %v", endpointSlice, err))
   494  		return
   495  	}
   496  
   497  	// queue after the max of endpointSliceChangeMinSyncDelay and
   498  	// endpointUpdatesBatchPeriod.
   499  	delay := endpointSliceChangeMinSyncDelay
   500  	if c.endpointUpdatesBatchPeriod > delay {
   501  		delay = c.endpointUpdatesBatchPeriod
   502  	}
   503  	c.queue.AddAfter(key, delay)
   504  }
   505  
   506  func (c *Controller) addPod(obj interface{}) {
   507  	pod := obj.(*v1.Pod)
   508  	services, err := endpointsliceutil.GetPodServiceMemberships(c.serviceLister, pod)
   509  	if err != nil {
   510  		utilruntime.HandleError(fmt.Errorf("Unable to get pod %s/%s's service memberships: %v", pod.Namespace, pod.Name, err))
   511  		return
   512  	}
   513  	for key := range services {
   514  		c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod)
   515  	}
   516  }
   517  
   518  func (c *Controller) updatePod(old, cur interface{}) {
   519  	services := endpointsliceutil.GetServicesToUpdateOnPodChange(c.serviceLister, old, cur)
   520  	for key := range services {
   521  		c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod)
   522  	}
   523  }
   524  
   525  // When a pod is deleted, enqueue the services the pod used to be a member of
   526  // obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
   527  func (c *Controller) deletePod(obj interface{}) {
   528  	pod := endpointsliceutil.GetPodFromDeleteAction(obj)
   529  	if pod != nil {
   530  		c.addPod(pod)
   531  	}
   532  }
   533  
   534  func (c *Controller) addNode(logger klog.Logger, obj interface{}) {
   535  	c.checkNodeTopologyDistribution(logger)
   536  }
   537  
   538  func (c *Controller) updateNode(logger klog.Logger, old, cur interface{}) {
   539  	oldNode := old.(*v1.Node)
   540  	curNode := cur.(*v1.Node)
   541  
   542  	// LabelTopologyZone may be added by cloud provider asynchronously after the Node is created.
   543  	// The topology cache should be updated in this case.
   544  	if isNodeReady(oldNode) != isNodeReady(curNode) ||
   545  		oldNode.Labels[v1.LabelTopologyZone] != curNode.Labels[v1.LabelTopologyZone] {
   546  		c.checkNodeTopologyDistribution(logger)
   547  	}
   548  }
   549  
   550  func (c *Controller) deleteNode(logger klog.Logger, obj interface{}) {
   551  	c.checkNodeTopologyDistribution(logger)
   552  }
   553  
   554  // checkNodeTopologyDistribution updates Nodes in the topology cache and then
   555  // queues any Services that are past the threshold.
   556  func (c *Controller) checkNodeTopologyDistribution(logger klog.Logger) {
   557  	if c.topologyCache == nil {
   558  		return
   559  	}
   560  	nodes, err := c.nodeLister.List(labels.Everything())
   561  	if err != nil {
   562  		logger.Error(err, "Error listing Nodes")
   563  		return
   564  	}
   565  	c.topologyCache.SetNodes(logger, nodes)
   566  	serviceKeys := c.topologyCache.GetOverloadedServices()
   567  	for _, serviceKey := range serviceKeys {
   568  		logger.V(2).Info("Queuing Service after Node change due to overloading", "key", serviceKey)
   569  		c.queue.Add(serviceKey)
   570  	}
   571  }
   572  
   573  // trackSync increments the EndpointSliceSyncs metric with the result of a sync.
   574  func trackSync(err error) {
   575  	metricLabel := "success"
   576  	if err != nil {
   577  		if endpointslicepkg.IsStaleInformerCacheErr(err) {
   578  			metricLabel = "stale"
   579  		} else {
   580  			metricLabel = "error"
   581  		}
   582  	}
   583  	endpointslicemetrics.EndpointSliceSyncs.WithLabelValues(metricLabel).Inc()
   584  }
   585  
   586  func dropEndpointSlicesPendingDeletion(endpointSlices []*discovery.EndpointSlice) []*discovery.EndpointSlice {
   587  	n := 0
   588  	for _, endpointSlice := range endpointSlices {
   589  		if endpointSlice.DeletionTimestamp == nil {
   590  			endpointSlices[n] = endpointSlice
   591  			n++
   592  		}
   593  	}
   594  	return endpointSlices[:n]
   595  }
   596  
   597  // getEndpointSliceFromDeleteAction parses an EndpointSlice from a delete action.
   598  func getEndpointSliceFromDeleteAction(obj interface{}) *discovery.EndpointSlice {
   599  	if endpointSlice, ok := obj.(*discovery.EndpointSlice); ok {
   600  		// Enqueue all the services that the pod used to be a member of.
   601  		// This is the same thing we do when we add a pod.
   602  		return endpointSlice
   603  	}
   604  	// If we reached here it means the pod was deleted but its final state is unrecorded.
   605  	tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
   606  	if !ok {
   607  		utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
   608  		return nil
   609  	}
   610  	endpointSlice, ok := tombstone.Obj.(*discovery.EndpointSlice)
   611  	if !ok {
   612  		utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a EndpointSlice: %#v", obj))
   613  		return nil
   614  	}
   615  	return endpointSlice
   616  }
   617  
   618  // isNodeReady returns true if a node is ready; false otherwise.
   619  func isNodeReady(node *v1.Node) bool {
   620  	for _, c := range node.Status.Conditions {
   621  		if c.Type == v1.NodeReady {
   622  			return c.Status == v1.ConditionTrue
   623  		}
   624  	}
   625  	return false
   626  }