k8s.io/kubernetes@v1.29.3/pkg/controller/endpointslice/endpointslice_controller.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package endpointslice
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"time"
    23  
    24  	"golang.org/x/time/rate"
    25  
    26  	v1 "k8s.io/api/core/v1"
    27  	discovery "k8s.io/api/discovery/v1"
    28  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    29  	"k8s.io/apimachinery/pkg/labels"
    30  	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    31  	"k8s.io/apimachinery/pkg/util/wait"
    32  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    33  	coreinformers "k8s.io/client-go/informers/core/v1"
    34  	discoveryinformers "k8s.io/client-go/informers/discovery/v1"
    35  	clientset "k8s.io/client-go/kubernetes"
    36  	"k8s.io/client-go/kubernetes/scheme"
    37  	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
    38  	corelisters "k8s.io/client-go/listers/core/v1"
    39  	discoverylisters "k8s.io/client-go/listers/discovery/v1"
    40  	"k8s.io/client-go/tools/cache"
    41  	"k8s.io/client-go/tools/record"
    42  	"k8s.io/client-go/util/workqueue"
    43  	endpointslicerec "k8s.io/endpointslice"
    44  	endpointslicemetrics "k8s.io/endpointslice/metrics"
    45  	"k8s.io/endpointslice/topologycache"
    46  	endpointsliceutil "k8s.io/endpointslice/util"
    47  	"k8s.io/klog/v2"
    48  	"k8s.io/kubernetes/pkg/controller"
    49  	endpointslicepkg "k8s.io/kubernetes/pkg/controller/util/endpointslice"
    50  	"k8s.io/kubernetes/pkg/features"
    51  )
    52  
    53  const (
    54  	// maxRetries is the number of times a service will be retried before it is
    55  	// dropped out of the queue. Any sync error, such as a failure to create or
    56  	// update an EndpointSlice could trigger a retry. With the current
    57  	// rate-limiter in use (1s*2^(numRetries-1)) the following numbers represent
    58  	// the sequence of delays between successive queuings of a service.
    59  	//
    60  	// 1s, 2s, 4s, 8s, 16s, 32s, 64s, 128s, 256s, 512s, 1000s (max)
    61  	maxRetries = 15
    62  
    63  	// endpointSliceChangeMinSyncDelay indicates the minimum delay before
    64  	// queuing a syncService call after an EndpointSlice changes. If
    65  	// endpointUpdatesBatchPeriod is greater than this value, it will be used
    66  	// instead. This helps batch processing of changes to multiple
    67  	// EndpointSlices.
    68  	endpointSliceChangeMinSyncDelay = 1 * time.Second
    69  
    70  	// defaultSyncBackOff is the default backoff period for syncService calls.
    71  	defaultSyncBackOff = 1 * time.Second
    72  	// maxSyncBackOff is the max backoff period for syncService calls.
    73  	maxSyncBackOff = 1000 * time.Second
    74  
    75  	// controllerName is a unique value used with LabelManagedBy to indicated
    76  	// the component managing an EndpointSlice.
    77  	controllerName = "endpointslice-controller.k8s.io"
    78  )
    79  
    80  // NewController creates and initializes a new Controller
    81  func NewController(ctx context.Context, podInformer coreinformers.PodInformer,
    82  	serviceInformer coreinformers.ServiceInformer,
    83  	nodeInformer coreinformers.NodeInformer,
    84  	endpointSliceInformer discoveryinformers.EndpointSliceInformer,
    85  	maxEndpointsPerSlice int32,
    86  	client clientset.Interface,
    87  	endpointUpdatesBatchPeriod time.Duration,
    88  ) *Controller {
    89  	broadcaster := record.NewBroadcaster()
    90  	recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"})
    91  
    92  	endpointslicemetrics.RegisterMetrics()
    93  
    94  	c := &Controller{
    95  		client: client,
    96  		// This is similar to the DefaultControllerRateLimiter, just with a
    97  		// significantly higher default backoff (1s vs 5ms). This controller
    98  		// processes events that can require significant EndpointSlice changes,
    99  		// such as an update to a Service or Deployment. A more significant
   100  		// rate limit back off here helps ensure that the Controller does not
   101  		// overwhelm the API Server.
   102  		queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter(
   103  			workqueue.NewItemExponentialFailureRateLimiter(defaultSyncBackOff, maxSyncBackOff),
   104  			// 10 qps, 100 bucket size. This is only for retry speed and its
   105  			// only the overall factor (not per item).
   106  			&workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
   107  		), "endpoint_slice"),
   108  		workerLoopPeriod: time.Second,
   109  	}
   110  
   111  	serviceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   112  		AddFunc: c.onServiceUpdate,
   113  		UpdateFunc: func(old, cur interface{}) {
   114  			c.onServiceUpdate(cur)
   115  		},
   116  		DeleteFunc: c.onServiceDelete,
   117  	})
   118  	c.serviceLister = serviceInformer.Lister()
   119  	c.servicesSynced = serviceInformer.Informer().HasSynced
   120  
   121  	podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   122  		AddFunc:    c.addPod,
   123  		UpdateFunc: c.updatePod,
   124  		DeleteFunc: c.deletePod,
   125  	})
   126  	c.podLister = podInformer.Lister()
   127  	c.podsSynced = podInformer.Informer().HasSynced
   128  
   129  	c.nodeLister = nodeInformer.Lister()
   130  	c.nodesSynced = nodeInformer.Informer().HasSynced
   131  
   132  	logger := klog.FromContext(ctx)
   133  	endpointSliceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   134  		AddFunc: c.onEndpointSliceAdd,
   135  		UpdateFunc: func(oldObj, newObj interface{}) {
   136  			c.onEndpointSliceUpdate(logger, oldObj, newObj)
   137  		},
   138  		DeleteFunc: c.onEndpointSliceDelete,
   139  	})
   140  
   141  	c.endpointSliceLister = endpointSliceInformer.Lister()
   142  	c.endpointSlicesSynced = endpointSliceInformer.Informer().HasSynced
   143  	c.endpointSliceTracker = endpointsliceutil.NewEndpointSliceTracker()
   144  
   145  	c.maxEndpointsPerSlice = maxEndpointsPerSlice
   146  
   147  	c.triggerTimeTracker = endpointsliceutil.NewTriggerTimeTracker()
   148  
   149  	c.eventBroadcaster = broadcaster
   150  	c.eventRecorder = recorder
   151  
   152  	c.endpointUpdatesBatchPeriod = endpointUpdatesBatchPeriod
   153  
   154  	if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) {
   155  		nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
   156  			AddFunc: func(obj interface{}) {
   157  				c.addNode(logger, obj)
   158  			},
   159  			UpdateFunc: func(oldObj, newObj interface{}) {
   160  				c.updateNode(logger, oldObj, newObj)
   161  			},
   162  			DeleteFunc: func(obj interface{}) {
   163  				c.deleteNode(logger, obj)
   164  			},
   165  		})
   166  
   167  		c.topologyCache = topologycache.NewTopologyCache()
   168  	}
   169  
   170  	c.reconciler = endpointslicerec.NewReconciler(
   171  		c.client,
   172  		c.nodeLister,
   173  		c.maxEndpointsPerSlice,
   174  		c.endpointSliceTracker,
   175  		c.topologyCache,
   176  		c.eventRecorder,
   177  		controllerName,
   178  	)
   179  
   180  	return c
   181  }
   182  
   183  // Controller manages selector-based service endpoint slices
   184  type Controller struct {
   185  	client           clientset.Interface
   186  	eventBroadcaster record.EventBroadcaster
   187  	eventRecorder    record.EventRecorder
   188  
   189  	// serviceLister is able to list/get services and is populated by the
   190  	// shared informer passed to NewController
   191  	serviceLister corelisters.ServiceLister
   192  	// servicesSynced returns true if the service shared informer has been synced at least once.
   193  	// Added as a member to the struct to allow injection for testing.
   194  	servicesSynced cache.InformerSynced
   195  
   196  	// podLister is able to list/get pods and is populated by the
   197  	// shared informer passed to NewController
   198  	podLister corelisters.PodLister
   199  	// podsSynced returns true if the pod shared informer has been synced at least once.
   200  	// Added as a member to the struct to allow injection for testing.
   201  	podsSynced cache.InformerSynced
   202  
   203  	// endpointSliceLister is able to list/get endpoint slices and is populated by the
   204  	// shared informer passed to NewController
   205  	endpointSliceLister discoverylisters.EndpointSliceLister
   206  	// endpointSlicesSynced returns true if the endpoint slice shared informer has been synced at least once.
   207  	// Added as a member to the struct to allow injection for testing.
   208  	endpointSlicesSynced cache.InformerSynced
   209  	// endpointSliceTracker tracks the list of EndpointSlices and associated
   210  	// resource versions expected for each Service. It can help determine if a
   211  	// cached EndpointSlice is out of date.
   212  	endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
   213  
   214  	// nodeLister is able to list/get nodes and is populated by the
   215  	// shared informer passed to NewController
   216  	nodeLister corelisters.NodeLister
   217  	// nodesSynced returns true if the node shared informer has been synced at least once.
   218  	// Added as a member to the struct to allow injection for testing.
   219  	nodesSynced cache.InformerSynced
   220  
   221  	// reconciler is an util used to reconcile EndpointSlice changes.
   222  	reconciler *endpointslicerec.Reconciler
   223  
   224  	// triggerTimeTracker is an util used to compute and export the
   225  	// EndpointsLastChangeTriggerTime annotation.
   226  	triggerTimeTracker *endpointsliceutil.TriggerTimeTracker
   227  
   228  	// Services that need to be updated. A channel is inappropriate here,
   229  	// because it allows services with lots of pods to be serviced much
   230  	// more often than services with few pods; it also would cause a
   231  	// service that's inserted multiple times to be processed more than
   232  	// necessary.
   233  	queue workqueue.RateLimitingInterface
   234  
   235  	// maxEndpointsPerSlice references the maximum number of endpoints that
   236  	// should be added to an EndpointSlice
   237  	maxEndpointsPerSlice int32
   238  
   239  	// workerLoopPeriod is the time between worker runs. The workers
   240  	// process the queue of service and pod changes
   241  	workerLoopPeriod time.Duration
   242  
   243  	// endpointUpdatesBatchPeriod is an artificial delay added to all service syncs triggered by pod changes.
   244  	// This can be used to reduce overall number of all endpoint slice updates.
   245  	endpointUpdatesBatchPeriod time.Duration
   246  
   247  	// topologyCache tracks the distribution of Nodes and endpoints across zones
   248  	// to enable TopologyAwareHints.
   249  	topologyCache *topologycache.TopologyCache
   250  }
   251  
   252  // Run will not return until stopCh is closed.
   253  func (c *Controller) Run(ctx context.Context, workers int) {
   254  	defer utilruntime.HandleCrash()
   255  
   256  	// Start events processing pipeline.
   257  	c.eventBroadcaster.StartLogging(klog.Infof)
   258  	c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
   259  	defer c.eventBroadcaster.Shutdown()
   260  
   261  	defer c.queue.ShutDown()
   262  
   263  	logger := klog.FromContext(ctx)
   264  	logger.Info("Starting endpoint slice controller")
   265  	defer logger.Info("Shutting down endpoint slice controller")
   266  
   267  	if !cache.WaitForNamedCacheSync("endpoint_slice", ctx.Done(), c.podsSynced, c.servicesSynced, c.endpointSlicesSynced, c.nodesSynced) {
   268  		return
   269  	}
   270  
   271  	logger.V(2).Info("Starting worker threads", "total", workers)
   272  	for i := 0; i < workers; i++ {
   273  		go wait.Until(func() { c.worker(logger) }, c.workerLoopPeriod, ctx.Done())
   274  	}
   275  
   276  	<-ctx.Done()
   277  }
   278  
   279  // worker runs a worker thread that just dequeues items, processes them, and
   280  // marks them done. You may run as many of these in parallel as you wish; the
   281  // workqueue guarantees that they will not end up processing the same service
   282  // at the same time
   283  func (c *Controller) worker(logger klog.Logger) {
   284  	for c.processNextWorkItem(logger) {
   285  	}
   286  }
   287  
   288  func (c *Controller) processNextWorkItem(logger klog.Logger) bool {
   289  	cKey, quit := c.queue.Get()
   290  	if quit {
   291  		return false
   292  	}
   293  	defer c.queue.Done(cKey)
   294  
   295  	err := c.syncService(logger, cKey.(string))
   296  	c.handleErr(logger, err, cKey)
   297  
   298  	return true
   299  }
   300  
   301  func (c *Controller) handleErr(logger klog.Logger, err error, key interface{}) {
   302  	trackSync(err)
   303  
   304  	if err == nil {
   305  		c.queue.Forget(key)
   306  		return
   307  	}
   308  
   309  	if c.queue.NumRequeues(key) < maxRetries {
   310  		logger.Info("Error syncing endpoint slices for service, retrying", "key", key, "err", err)
   311  		c.queue.AddRateLimited(key)
   312  		return
   313  	}
   314  
   315  	logger.Info("Retry budget exceeded, dropping service out of the queue", "key", key, "err", err)
   316  	c.queue.Forget(key)
   317  	utilruntime.HandleError(err)
   318  }
   319  
   320  func (c *Controller) syncService(logger klog.Logger, key string) error {
   321  	startTime := time.Now()
   322  	defer func() {
   323  		logger.V(4).Info("Finished syncing service endpoint slices", "key", key, "elapsedTime", time.Since(startTime))
   324  	}()
   325  
   326  	namespace, name, err := cache.SplitMetaNamespaceKey(key)
   327  	if err != nil {
   328  		return err
   329  	}
   330  
   331  	service, err := c.serviceLister.Services(namespace).Get(name)
   332  	if err != nil {
   333  		if !apierrors.IsNotFound(err) {
   334  			return err
   335  		}
   336  
   337  		c.triggerTimeTracker.DeleteService(namespace, name)
   338  		c.reconciler.DeleteService(namespace, name)
   339  		c.endpointSliceTracker.DeleteService(namespace, name)
   340  		// The service has been deleted, return nil so that it won't be retried.
   341  		return nil
   342  	}
   343  
   344  	if service.Spec.Type == v1.ServiceTypeExternalName {
   345  		// services with Type ExternalName receive no endpoints from this controller;
   346  		// Ref: https://issues.k8s.io/105986
   347  		return nil
   348  	}
   349  
   350  	if service.Spec.Selector == nil {
   351  		// services without a selector receive no endpoint slices from this controller;
   352  		// these services will receive endpoint slices that are created out-of-band via the REST API.
   353  		return nil
   354  	}
   355  
   356  	logger.V(5).Info("About to update endpoint slices for service", "key", key)
   357  
   358  	podLabelSelector := labels.Set(service.Spec.Selector).AsSelectorPreValidated()
   359  	pods, err := c.podLister.Pods(service.Namespace).List(podLabelSelector)
   360  	if err != nil {
   361  		// Since we're getting stuff from a local cache, it is basically
   362  		// impossible to get this error.
   363  		c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListPods",
   364  			"Error listing Pods for Service %s/%s: %v", service.Namespace, service.Name, err)
   365  		return err
   366  	}
   367  
   368  	esLabelSelector := labels.Set(map[string]string{
   369  		discovery.LabelServiceName: service.Name,
   370  		discovery.LabelManagedBy:   c.reconciler.GetControllerName(),
   371  	}).AsSelectorPreValidated()
   372  	endpointSlices, err := c.endpointSliceLister.EndpointSlices(service.Namespace).List(esLabelSelector)
   373  
   374  	if err != nil {
   375  		// Since we're getting stuff from a local cache, it is basically
   376  		// impossible to get this error.
   377  		c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListEndpointSlices",
   378  			"Error listing Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
   379  		return err
   380  	}
   381  
   382  	// Drop EndpointSlices that have been marked for deletion to prevent the controller from getting stuck.
   383  	endpointSlices = dropEndpointSlicesPendingDeletion(endpointSlices)
   384  
   385  	if c.endpointSliceTracker.StaleSlices(service, endpointSlices) {
   386  		return endpointslicepkg.NewStaleInformerCache("EndpointSlice informer cache is out of date")
   387  	}
   388  
   389  	// We call ComputeEndpointLastChangeTriggerTime here to make sure that the
   390  	// state of the trigger time tracker gets updated even if the sync turns out
   391  	// to be no-op and we don't update the EndpointSlice objects.
   392  	lastChangeTriggerTime := c.triggerTimeTracker.
   393  		ComputeEndpointLastChangeTriggerTime(namespace, service, pods)
   394  
   395  	err = c.reconciler.Reconcile(logger, service, pods, endpointSlices, lastChangeTriggerTime)
   396  	if err != nil {
   397  		c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToUpdateEndpointSlices",
   398  			"Error updating Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
   399  		return err
   400  	}
   401  
   402  	return nil
   403  }
   404  
   405  // onServiceUpdate updates the Service Selector in the cache and queues the Service for processing.
   406  func (c *Controller) onServiceUpdate(obj interface{}) {
   407  	key, err := controller.KeyFunc(obj)
   408  	if err != nil {
   409  		utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
   410  		return
   411  	}
   412  
   413  	c.queue.Add(key)
   414  }
   415  
   416  // onServiceDelete removes the Service Selector from the cache and queues the Service for processing.
   417  func (c *Controller) onServiceDelete(obj interface{}) {
   418  	key, err := controller.KeyFunc(obj)
   419  	if err != nil {
   420  		utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
   421  		return
   422  	}
   423  
   424  	c.queue.Add(key)
   425  }
   426  
   427  // onEndpointSliceAdd queues a sync for the relevant Service for a sync if the
   428  // EndpointSlice resource version does not match the expected version in the
   429  // endpointSliceTracker.
   430  func (c *Controller) onEndpointSliceAdd(obj interface{}) {
   431  	endpointSlice := obj.(*discovery.EndpointSlice)
   432  	if endpointSlice == nil {
   433  		utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceAdd()"))
   434  		return
   435  	}
   436  	if c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice) {
   437  		c.queueServiceForEndpointSlice(endpointSlice)
   438  	}
   439  }
   440  
   441  // onEndpointSliceUpdate queues a sync for the relevant Service for a sync if
   442  // the EndpointSlice resource version does not match the expected version in the
   443  // endpointSliceTracker or the managed-by value of the EndpointSlice has changed
   444  // from or to this controller.
   445  func (c *Controller) onEndpointSliceUpdate(logger klog.Logger, prevObj, obj interface{}) {
   446  	prevEndpointSlice := prevObj.(*discovery.EndpointSlice)
   447  	endpointSlice := obj.(*discovery.EndpointSlice)
   448  	if endpointSlice == nil || prevEndpointSlice == nil {
   449  		utilruntime.HandleError(fmt.Errorf("Invalid EndpointSlice provided to onEndpointSliceUpdate()"))
   450  		return
   451  	}
   452  	// EndpointSlice generation does not change when labels change. Although the
   453  	// controller will never change LabelServiceName, users might. This check
   454  	// ensures that we handle changes to this label.
   455  	svcName := endpointSlice.Labels[discovery.LabelServiceName]
   456  	prevSvcName := prevEndpointSlice.Labels[discovery.LabelServiceName]
   457  	if svcName != prevSvcName {
   458  		logger.Info("label changed", "label", discovery.LabelServiceName, "oldService", prevSvcName, "newService", svcName, "endpointslice", klog.KObj(endpointSlice))
   459  		c.queueServiceForEndpointSlice(endpointSlice)
   460  		c.queueServiceForEndpointSlice(prevEndpointSlice)
   461  		return
   462  	}
   463  	if c.reconciler.ManagedByChanged(prevEndpointSlice, endpointSlice) || (c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.ShouldSync(endpointSlice)) {
   464  		c.queueServiceForEndpointSlice(endpointSlice)
   465  	}
   466  }
   467  
   468  // onEndpointSliceDelete queues a sync for the relevant Service for a sync if the
   469  // EndpointSlice resource version does not match the expected version in the
   470  // endpointSliceTracker.
   471  func (c *Controller) onEndpointSliceDelete(obj interface{}) {
   472  	endpointSlice := getEndpointSliceFromDeleteAction(obj)
   473  	if endpointSlice != nil && c.reconciler.ManagedByController(endpointSlice) && c.endpointSliceTracker.Has(endpointSlice) {
   474  		// This returns false if we didn't expect the EndpointSlice to be
   475  		// deleted. If that is the case, we queue the Service for another sync.
   476  		if !c.endpointSliceTracker.HandleDeletion(endpointSlice) {
   477  			c.queueServiceForEndpointSlice(endpointSlice)
   478  		}
   479  	}
   480  }
   481  
   482  // queueServiceForEndpointSlice attempts to queue the corresponding Service for
   483  // the provided EndpointSlice.
   484  func (c *Controller) queueServiceForEndpointSlice(endpointSlice *discovery.EndpointSlice) {
   485  	key, err := endpointslicerec.ServiceControllerKey(endpointSlice)
   486  	if err != nil {
   487  		utilruntime.HandleError(fmt.Errorf("Couldn't get key for EndpointSlice %+v: %v", endpointSlice, err))
   488  		return
   489  	}
   490  
   491  	// queue after the max of endpointSliceChangeMinSyncDelay and
   492  	// endpointUpdatesBatchPeriod.
   493  	delay := endpointSliceChangeMinSyncDelay
   494  	if c.endpointUpdatesBatchPeriod > delay {
   495  		delay = c.endpointUpdatesBatchPeriod
   496  	}
   497  	c.queue.AddAfter(key, delay)
   498  }
   499  
   500  func (c *Controller) addPod(obj interface{}) {
   501  	pod := obj.(*v1.Pod)
   502  	services, err := endpointsliceutil.GetPodServiceMemberships(c.serviceLister, pod)
   503  	if err != nil {
   504  		utilruntime.HandleError(fmt.Errorf("Unable to get pod %s/%s's service memberships: %v", pod.Namespace, pod.Name, err))
   505  		return
   506  	}
   507  	for key := range services {
   508  		c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod)
   509  	}
   510  }
   511  
   512  func (c *Controller) updatePod(old, cur interface{}) {
   513  	services := endpointsliceutil.GetServicesToUpdateOnPodChange(c.serviceLister, old, cur)
   514  	for key := range services {
   515  		c.queue.AddAfter(key, c.endpointUpdatesBatchPeriod)
   516  	}
   517  }
   518  
   519  // When a pod is deleted, enqueue the services the pod used to be a member of
   520  // obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
   521  func (c *Controller) deletePod(obj interface{}) {
   522  	pod := endpointsliceutil.GetPodFromDeleteAction(obj)
   523  	if pod != nil {
   524  		c.addPod(pod)
   525  	}
   526  }
   527  
   528  func (c *Controller) addNode(logger klog.Logger, obj interface{}) {
   529  	c.checkNodeTopologyDistribution(logger)
   530  }
   531  
   532  func (c *Controller) updateNode(logger klog.Logger, old, cur interface{}) {
   533  	oldNode := old.(*v1.Node)
   534  	curNode := cur.(*v1.Node)
   535  
   536  	// LabelTopologyZone may be added by cloud provider asynchronously after the Node is created.
   537  	// The topology cache should be updated in this case.
   538  	if isNodeReady(oldNode) != isNodeReady(curNode) ||
   539  		oldNode.Labels[v1.LabelTopologyZone] != curNode.Labels[v1.LabelTopologyZone] {
   540  		c.checkNodeTopologyDistribution(logger)
   541  	}
   542  }
   543  
   544  func (c *Controller) deleteNode(logger klog.Logger, obj interface{}) {
   545  	c.checkNodeTopologyDistribution(logger)
   546  }
   547  
   548  // checkNodeTopologyDistribution updates Nodes in the topology cache and then
   549  // queues any Services that are past the threshold.
   550  func (c *Controller) checkNodeTopologyDistribution(logger klog.Logger) {
   551  	if c.topologyCache == nil {
   552  		return
   553  	}
   554  	nodes, err := c.nodeLister.List(labels.Everything())
   555  	if err != nil {
   556  		logger.Error(err, "Error listing Nodes")
   557  		return
   558  	}
   559  	c.topologyCache.SetNodes(logger, nodes)
   560  	serviceKeys := c.topologyCache.GetOverloadedServices()
   561  	for _, serviceKey := range serviceKeys {
   562  		logger.V(2).Info("Queuing Service after Node change due to overloading", "key", serviceKey)
   563  		c.queue.Add(serviceKey)
   564  	}
   565  }
   566  
   567  // trackSync increments the EndpointSliceSyncs metric with the result of a sync.
   568  func trackSync(err error) {
   569  	metricLabel := "success"
   570  	if err != nil {
   571  		if endpointslicepkg.IsStaleInformerCacheErr(err) {
   572  			metricLabel = "stale"
   573  		} else {
   574  			metricLabel = "error"
   575  		}
   576  	}
   577  	endpointslicemetrics.EndpointSliceSyncs.WithLabelValues(metricLabel).Inc()
   578  }
   579  
   580  func dropEndpointSlicesPendingDeletion(endpointSlices []*discovery.EndpointSlice) []*discovery.EndpointSlice {
   581  	n := 0
   582  	for _, endpointSlice := range endpointSlices {
   583  		if endpointSlice.DeletionTimestamp == nil {
   584  			endpointSlices[n] = endpointSlice
   585  			n++
   586  		}
   587  	}
   588  	return endpointSlices[:n]
   589  }
   590  
   591  // getEndpointSliceFromDeleteAction parses an EndpointSlice from a delete action.
   592  func getEndpointSliceFromDeleteAction(obj interface{}) *discovery.EndpointSlice {
   593  	if endpointSlice, ok := obj.(*discovery.EndpointSlice); ok {
   594  		// Enqueue all the services that the pod used to be a member of.
   595  		// This is the same thing we do when we add a pod.
   596  		return endpointSlice
   597  	}
   598  	// If we reached here it means the pod was deleted but its final state is unrecorded.
   599  	tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
   600  	if !ok {
   601  		utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
   602  		return nil
   603  	}
   604  	endpointSlice, ok := tombstone.Obj.(*discovery.EndpointSlice)
   605  	if !ok {
   606  		utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a EndpointSlice: %#v", obj))
   607  		return nil
   608  	}
   609  	return endpointSlice
   610  }
   611  
   612  // isNodeReady returns true if a node is ready; false otherwise.
   613  func isNodeReady(node *v1.Node) bool {
   614  	for _, c := range node.Status.Conditions {
   615  		if c.Type == v1.NodeReady {
   616  			return c.Status == v1.ConditionTrue
   617  		}
   618  	}
   619  	return false
   620  }