k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/controller/podautoscaler/horizontal.go (about)

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package podautoscaler
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"sync"
    25  	"time"
    26  
    27  	autoscalingv1 "k8s.io/api/autoscaling/v1"
    28  	autoscalingv2 "k8s.io/api/autoscaling/v2"
    29  	v1 "k8s.io/api/core/v1"
    30  	apiequality "k8s.io/apimachinery/pkg/api/equality"
    31  	k8serrors "k8s.io/apimachinery/pkg/api/errors"
    32  	apimeta "k8s.io/apimachinery/pkg/api/meta"
    33  	"k8s.io/apimachinery/pkg/api/resource"
    34  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    35  	"k8s.io/apimachinery/pkg/labels"
    36  	"k8s.io/apimachinery/pkg/runtime"
    37  	"k8s.io/apimachinery/pkg/runtime/schema"
    38  	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    39  	"k8s.io/apimachinery/pkg/util/wait"
    40  	autoscalinginformers "k8s.io/client-go/informers/autoscaling/v2"
    41  	coreinformers "k8s.io/client-go/informers/core/v1"
    42  	"k8s.io/client-go/kubernetes/scheme"
    43  	autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v2"
    44  	v1core "k8s.io/client-go/kubernetes/typed/core/v1"
    45  	autoscalinglisters "k8s.io/client-go/listers/autoscaling/v2"
    46  	corelisters "k8s.io/client-go/listers/core/v1"
    47  	scaleclient "k8s.io/client-go/scale"
    48  	"k8s.io/client-go/tools/cache"
    49  	"k8s.io/client-go/tools/record"
    50  	"k8s.io/client-go/util/workqueue"
    51  	"k8s.io/klog/v2"
    52  	"k8s.io/kubernetes/pkg/controller"
    53  	metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
    54  	"k8s.io/kubernetes/pkg/controller/podautoscaler/monitor"
    55  	"k8s.io/kubernetes/pkg/controller/util/selectors"
    56  )
    57  
    58  var (
    59  	scaleUpLimitFactor  = 2.0
    60  	scaleUpLimitMinimum = 4.0
    61  )
    62  
    63  var (
    64  	// errSpec is used to determine if the error comes from the spec of HPA object in reconcileAutoscaler.
    65  	// All such errors should have this error as a root error so that the upstream function can distinguish spec errors from internal errors.
    66  	// e.g., fmt.Errorf("invalid spec%w", errSpec)
    67  	errSpec error = errors.New("")
    68  )
    69  
    70  type timestampedRecommendation struct {
    71  	recommendation int32
    72  	timestamp      time.Time
    73  }
    74  
    75  type timestampedScaleEvent struct {
    76  	replicaChange int32 // absolute value, non-negative
    77  	timestamp     time.Time
    78  	outdated      bool
    79  }
    80  
    81  // HorizontalController is responsible for the synchronizing HPA objects stored
    82  // in the system with the actual deployments/replication controllers they
    83  // control.
    84  type HorizontalController struct {
    85  	scaleNamespacer scaleclient.ScalesGetter
    86  	hpaNamespacer   autoscalingclient.HorizontalPodAutoscalersGetter
    87  	mapper          apimeta.RESTMapper
    88  
    89  	replicaCalc   *ReplicaCalculator
    90  	eventRecorder record.EventRecorder
    91  
    92  	downscaleStabilisationWindow time.Duration
    93  
    94  	monitor monitor.Monitor
    95  
    96  	// hpaLister is able to list/get HPAs from the shared cache from the informer passed in to
    97  	// NewHorizontalController.
    98  	hpaLister       autoscalinglisters.HorizontalPodAutoscalerLister
    99  	hpaListerSynced cache.InformerSynced
   100  
   101  	// podLister is able to list/get Pods from the shared cache from the informer passed in to
   102  	// NewHorizontalController.
   103  	podLister       corelisters.PodLister
   104  	podListerSynced cache.InformerSynced
   105  
   106  	// Controllers that need to be synced
   107  	queue workqueue.TypedRateLimitingInterface[string]
   108  
   109  	// Latest unstabilized recommendations for each autoscaler.
   110  	recommendations     map[string][]timestampedRecommendation
   111  	recommendationsLock sync.Mutex
   112  
   113  	// Latest autoscaler events
   114  	scaleUpEvents       map[string][]timestampedScaleEvent
   115  	scaleUpEventsLock   sync.RWMutex
   116  	scaleDownEvents     map[string][]timestampedScaleEvent
   117  	scaleDownEventsLock sync.RWMutex
   118  
   119  	// Storage of HPAs and their selectors.
   120  	hpaSelectors    *selectors.BiMultimap
   121  	hpaSelectorsMux sync.Mutex
   122  }
   123  
   124  // NewHorizontalController creates a new HorizontalController.
   125  func NewHorizontalController(
   126  	ctx context.Context,
   127  	evtNamespacer v1core.EventsGetter,
   128  	scaleNamespacer scaleclient.ScalesGetter,
   129  	hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter,
   130  	mapper apimeta.RESTMapper,
   131  	metricsClient metricsclient.MetricsClient,
   132  	hpaInformer autoscalinginformers.HorizontalPodAutoscalerInformer,
   133  	podInformer coreinformers.PodInformer,
   134  	resyncPeriod time.Duration,
   135  	downscaleStabilisationWindow time.Duration,
   136  	tolerance float64,
   137  	cpuInitializationPeriod,
   138  	delayOfInitialReadinessStatus time.Duration,
   139  ) *HorizontalController {
   140  	broadcaster := record.NewBroadcaster(record.WithContext(ctx))
   141  	broadcaster.StartStructuredLogging(3)
   142  	broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
   143  	recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "horizontal-pod-autoscaler"})
   144  
   145  	hpaController := &HorizontalController{
   146  		eventRecorder:                recorder,
   147  		scaleNamespacer:              scaleNamespacer,
   148  		hpaNamespacer:                hpaNamespacer,
   149  		downscaleStabilisationWindow: downscaleStabilisationWindow,
   150  		monitor:                      monitor.New(),
   151  		queue: workqueue.NewTypedRateLimitingQueueWithConfig(
   152  			NewDefaultHPARateLimiter(resyncPeriod),
   153  			workqueue.TypedRateLimitingQueueConfig[string]{
   154  				Name: "horizontalpodautoscaler",
   155  			},
   156  		),
   157  		mapper:              mapper,
   158  		recommendations:     map[string][]timestampedRecommendation{},
   159  		recommendationsLock: sync.Mutex{},
   160  		scaleUpEvents:       map[string][]timestampedScaleEvent{},
   161  		scaleUpEventsLock:   sync.RWMutex{},
   162  		scaleDownEvents:     map[string][]timestampedScaleEvent{},
   163  		scaleDownEventsLock: sync.RWMutex{},
   164  		hpaSelectors:        selectors.NewBiMultimap(),
   165  	}
   166  
   167  	hpaInformer.Informer().AddEventHandlerWithResyncPeriod(
   168  		cache.ResourceEventHandlerFuncs{
   169  			AddFunc:    hpaController.enqueueHPA,
   170  			UpdateFunc: hpaController.updateHPA,
   171  			DeleteFunc: hpaController.deleteHPA,
   172  		},
   173  		resyncPeriod,
   174  	)
   175  	hpaController.hpaLister = hpaInformer.Lister()
   176  	hpaController.hpaListerSynced = hpaInformer.Informer().HasSynced
   177  
   178  	hpaController.podLister = podInformer.Lister()
   179  	hpaController.podListerSynced = podInformer.Informer().HasSynced
   180  
   181  	replicaCalc := NewReplicaCalculator(
   182  		metricsClient,
   183  		hpaController.podLister,
   184  		tolerance,
   185  		cpuInitializationPeriod,
   186  		delayOfInitialReadinessStatus,
   187  	)
   188  	hpaController.replicaCalc = replicaCalc
   189  
   190  	monitor.Register()
   191  
   192  	return hpaController
   193  }
   194  
   195  // Run begins watching and syncing.
   196  func (a *HorizontalController) Run(ctx context.Context, workers int) {
   197  	defer utilruntime.HandleCrash()
   198  	defer a.queue.ShutDown()
   199  
   200  	logger := klog.FromContext(ctx)
   201  	logger.Info("Starting HPA controller")
   202  	defer logger.Info("Shutting down HPA controller")
   203  
   204  	if !cache.WaitForNamedCacheSync("HPA", ctx.Done(), a.hpaListerSynced, a.podListerSynced) {
   205  		return
   206  	}
   207  
   208  	for i := 0; i < workers; i++ {
   209  		go wait.UntilWithContext(ctx, a.worker, time.Second)
   210  	}
   211  
   212  	<-ctx.Done()
   213  }
   214  
   215  // obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item.
   216  func (a *HorizontalController) updateHPA(old, cur interface{}) {
   217  	a.enqueueHPA(cur)
   218  }
   219  
   220  // obj could be an *v1.HorizontalPodAutoscaler, or a DeletionFinalStateUnknown marker item.
   221  func (a *HorizontalController) enqueueHPA(obj interface{}) {
   222  	key, err := controller.KeyFunc(obj)
   223  	if err != nil {
   224  		utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
   225  		return
   226  	}
   227  
   228  	// Requests are always added to queue with resyncPeriod delay.  If there's already
   229  	// request for the HPA in the queue then a new request is always dropped. Requests spend resync
   230  	// interval in queue so HPAs are processed every resync interval.
   231  	a.queue.AddRateLimited(key)
   232  
   233  	// Register HPA in the hpaSelectors map if it's not present yet. Attaching the Nothing selector
   234  	// that does not select objects. The actual selector is going to be updated
   235  	// when it's available during the autoscaler reconciliation.
   236  	a.hpaSelectorsMux.Lock()
   237  	defer a.hpaSelectorsMux.Unlock()
   238  	if hpaKey := selectors.Parse(key); !a.hpaSelectors.SelectorExists(hpaKey) {
   239  		a.hpaSelectors.PutSelector(hpaKey, labels.Nothing())
   240  	}
   241  }
   242  
   243  func (a *HorizontalController) deleteHPA(obj interface{}) {
   244  	key, err := controller.KeyFunc(obj)
   245  	if err != nil {
   246  		utilruntime.HandleError(fmt.Errorf("couldn't get key for object %+v: %v", obj, err))
   247  		return
   248  	}
   249  
   250  	// TODO: could we leak if we fail to get the key?
   251  	a.queue.Forget(key)
   252  
   253  	// Remove HPA and attached selector.
   254  	a.hpaSelectorsMux.Lock()
   255  	defer a.hpaSelectorsMux.Unlock()
   256  	a.hpaSelectors.DeleteSelector(selectors.Parse(key))
   257  }
   258  
   259  func (a *HorizontalController) worker(ctx context.Context) {
   260  	for a.processNextWorkItem(ctx) {
   261  	}
   262  	logger := klog.FromContext(ctx)
   263  	logger.Info("Horizontal Pod Autoscaler controller worker shutting down")
   264  }
   265  
   266  func (a *HorizontalController) processNextWorkItem(ctx context.Context) bool {
   267  	key, quit := a.queue.Get()
   268  	if quit {
   269  		return false
   270  	}
   271  	defer a.queue.Done(key)
   272  
   273  	deleted, err := a.reconcileKey(ctx, key)
   274  	if err != nil {
   275  		utilruntime.HandleError(err)
   276  	}
   277  	// Add request processing HPA to queue with resyncPeriod delay.
   278  	// Requests are always added to queue with resyncPeriod delay. If there's already request
   279  	// for the HPA in the queue then a new request is always dropped. Requests spend resyncPeriod
   280  	// in queue so HPAs are processed every resyncPeriod.
   281  	// Request is added here just in case last resync didn't insert request into the queue. This
   282  	// happens quite often because there is race condition between adding request after resyncPeriod
   283  	// and removing them from queue. Request can be added by resync before previous request is
   284  	// removed from queue. If we didn't add request here then in this case one request would be dropped
   285  	// and HPA would process after 2 x resyncPeriod.
   286  	if !deleted {
   287  		a.queue.AddRateLimited(key)
   288  	}
   289  
   290  	return true
   291  }
   292  
   293  // computeReplicasForMetrics computes the desired number of replicas for the metric specifications listed in the HPA,
   294  // returning the maximum of the computed replica counts, a description of the associated metric, and the statuses of
   295  // all metrics computed.
   296  // It may return both valid metricDesiredReplicas and an error,
   297  // when some metrics still work and HPA should perform scaling based on them.
   298  // If HPA cannot do anything due to error, it returns -1 in metricDesiredReplicas as a failure signal.
   299  func (a *HorizontalController) computeReplicasForMetrics(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale,
   300  	metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) {
   301  
   302  	selector, err := a.validateAndParseSelector(hpa, scale.Status.Selector)
   303  	if err != nil {
   304  		return -1, "", nil, time.Time{}, err
   305  	}
   306  
   307  	specReplicas := scale.Spec.Replicas
   308  	statusReplicas := scale.Status.Replicas
   309  	statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs))
   310  
   311  	invalidMetricsCount := 0
   312  	var invalidMetricError error
   313  	var invalidMetricCondition autoscalingv2.HorizontalPodAutoscalerCondition
   314  
   315  	for i, metricSpec := range metricSpecs {
   316  		replicaCountProposal, metricNameProposal, timestampProposal, condition, err := a.computeReplicasForMetric(ctx, hpa, metricSpec, specReplicas, statusReplicas, selector, &statuses[i])
   317  
   318  		if err != nil {
   319  			if invalidMetricsCount <= 0 {
   320  				invalidMetricCondition = condition
   321  				invalidMetricError = err
   322  			}
   323  			invalidMetricsCount++
   324  			continue
   325  		}
   326  		if replicas == 0 || replicaCountProposal > replicas {
   327  			timestamp = timestampProposal
   328  			replicas = replicaCountProposal
   329  			metric = metricNameProposal
   330  		}
   331  	}
   332  
   333  	if invalidMetricError != nil {
   334  		invalidMetricError = fmt.Errorf("invalid metrics (%v invalid out of %v), first error is: %v", invalidMetricsCount, len(metricSpecs), invalidMetricError)
   335  	}
   336  
   337  	// If all metrics are invalid or some are invalid and we would scale down,
   338  	// return an error and set the condition of the hpa based on the first invalid metric.
   339  	// Otherwise set the condition as scaling active as we're going to scale
   340  	if invalidMetricsCount >= len(metricSpecs) || (invalidMetricsCount > 0 && replicas < specReplicas) {
   341  		setCondition(hpa, invalidMetricCondition.Type, invalidMetricCondition.Status, invalidMetricCondition.Reason, invalidMetricCondition.Message)
   342  		return -1, "", statuses, time.Time{}, invalidMetricError
   343  	}
   344  	setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to successfully calculate a replica count from %s", metric)
   345  
   346  	return replicas, metric, statuses, timestamp, invalidMetricError
   347  }
   348  
   349  // hpasControllingPodsUnderSelector returns a list of keys of all HPAs that control a given list of pods.
   350  func (a *HorizontalController) hpasControllingPodsUnderSelector(pods []*v1.Pod) []selectors.Key {
   351  	a.hpaSelectorsMux.Lock()
   352  	defer a.hpaSelectorsMux.Unlock()
   353  
   354  	hpas := map[selectors.Key]struct{}{}
   355  	for _, p := range pods {
   356  		podKey := selectors.Key{Name: p.Name, Namespace: p.Namespace}
   357  		a.hpaSelectors.Put(podKey, p.Labels)
   358  
   359  		selectingHpas, ok := a.hpaSelectors.ReverseSelect(podKey)
   360  		if !ok {
   361  			continue
   362  		}
   363  		for _, hpa := range selectingHpas {
   364  			hpas[hpa] = struct{}{}
   365  		}
   366  	}
   367  	// Clean up all added pods.
   368  	a.hpaSelectors.KeepOnly([]selectors.Key{})
   369  
   370  	hpaList := []selectors.Key{}
   371  	for hpa := range hpas {
   372  		hpaList = append(hpaList, hpa)
   373  	}
   374  	return hpaList
   375  }
   376  
   377  // validateAndParseSelector verifies that:
   378  // - selector is not empty;
   379  // - selector format is valid;
   380  // - all pods by current selector are controlled by only one HPA.
   381  // Returns an error if the check has failed or the parsed selector if succeeded.
   382  // In case of an error the ScalingActive is set to false with the corresponding reason.
   383  func (a *HorizontalController) validateAndParseSelector(hpa *autoscalingv2.HorizontalPodAutoscaler, selector string) (labels.Selector, error) {
   384  	if selector == "" {
   385  		errMsg := "selector is required"
   386  		a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
   387  		setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
   388  		return nil, fmt.Errorf(errMsg)
   389  	}
   390  
   391  	parsedSelector, err := labels.Parse(selector)
   392  	if err != nil {
   393  		errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
   394  		a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
   395  		setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg)
   396  		return nil, fmt.Errorf(errMsg)
   397  	}
   398  
   399  	hpaKey := selectors.Key{Name: hpa.Name, Namespace: hpa.Namespace}
   400  	a.hpaSelectorsMux.Lock()
   401  	if a.hpaSelectors.SelectorExists(hpaKey) {
   402  		// Update HPA selector only if the HPA was registered in enqueueHPA.
   403  		a.hpaSelectors.PutSelector(hpaKey, parsedSelector)
   404  	}
   405  	a.hpaSelectorsMux.Unlock()
   406  
   407  	pods, err := a.podLister.Pods(hpa.Namespace).List(parsedSelector)
   408  	if err != nil {
   409  		return nil, err
   410  	}
   411  
   412  	selectingHpas := a.hpasControllingPodsUnderSelector(pods)
   413  	if len(selectingHpas) > 1 {
   414  		errMsg := fmt.Sprintf("pods by selector %v are controlled by multiple HPAs: %v", selector, selectingHpas)
   415  		a.eventRecorder.Event(hpa, v1.EventTypeWarning, "AmbiguousSelector", errMsg)
   416  		setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "AmbiguousSelector", errMsg)
   417  		return nil, fmt.Errorf(errMsg)
   418  	}
   419  
   420  	return parsedSelector, nil
   421  }
   422  
   423  // Computes the desired number of replicas for a specific hpa and metric specification,
   424  // returning the metric status and a proposed condition to be set on the HPA object.
   425  func (a *HorizontalController) computeReplicasForMetric(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler, spec autoscalingv2.MetricSpec,
   426  	specReplicas, statusReplicas int32, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, metricNameProposal string,
   427  	timestampProposal time.Time, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
   428  	// actionLabel is used to report which actions this reconciliation has taken.
   429  	start := time.Now()
   430  	defer func() {
   431  		actionLabel := monitor.ActionLabelNone
   432  		switch {
   433  		case replicaCountProposal > hpa.Status.CurrentReplicas:
   434  			actionLabel = monitor.ActionLabelScaleUp
   435  		case replicaCountProposal < hpa.Status.CurrentReplicas:
   436  			actionLabel = monitor.ActionLabelScaleDown
   437  		}
   438  
   439  		errorLabel := monitor.ErrorLabelNone
   440  		if err != nil {
   441  			// In case of error, set "internal" as default.
   442  			errorLabel = monitor.ErrorLabelInternal
   443  			actionLabel = monitor.ActionLabelNone
   444  		}
   445  		if errors.Is(err, errSpec) {
   446  			errorLabel = monitor.ErrorLabelSpec
   447  		}
   448  
   449  		a.monitor.ObserveMetricComputationResult(actionLabel, errorLabel, time.Since(start), spec.Type)
   450  	}()
   451  
   452  	switch spec.Type {
   453  	case autoscalingv2.ObjectMetricSourceType:
   454  		metricSelector, err := metav1.LabelSelectorAsSelector(spec.Object.Metric.Selector)
   455  		if err != nil {
   456  			condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
   457  			return 0, "", time.Time{}, condition, fmt.Errorf("failed to get object metric value: %v", err)
   458  		}
   459  		replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForObjectMetric(specReplicas, statusReplicas, spec, hpa, selector, status, metricSelector)
   460  		if err != nil {
   461  			return 0, "", time.Time{}, condition, fmt.Errorf("failed to get object metric value: %v", err)
   462  		}
   463  	case autoscalingv2.PodsMetricSourceType:
   464  		metricSelector, err := metav1.LabelSelectorAsSelector(spec.Pods.Metric.Selector)
   465  		if err != nil {
   466  			condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetPodsMetric", err)
   467  			return 0, "", time.Time{}, condition, fmt.Errorf("failed to get pods metric value: %v", err)
   468  		}
   469  		replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForPodsMetric(specReplicas, spec, hpa, selector, status, metricSelector)
   470  		if err != nil {
   471  			return 0, "", time.Time{}, condition, fmt.Errorf("failed to get pods metric value: %v", err)
   472  		}
   473  	case autoscalingv2.ResourceMetricSourceType:
   474  		replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForResourceMetric(ctx, specReplicas, spec, hpa, selector, status)
   475  		if err != nil {
   476  			return 0, "", time.Time{}, condition, fmt.Errorf("failed to get %s resource metric value: %v", spec.Resource.Name, err)
   477  		}
   478  	case autoscalingv2.ContainerResourceMetricSourceType:
   479  		replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForContainerResourceMetric(ctx, specReplicas, spec, hpa, selector, status)
   480  		if err != nil {
   481  			return 0, "", time.Time{}, condition, fmt.Errorf("failed to get %s container metric value: %v", spec.ContainerResource.Container, err)
   482  		}
   483  	case autoscalingv2.ExternalMetricSourceType:
   484  		replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForExternalMetric(specReplicas, statusReplicas, spec, hpa, selector, status)
   485  		if err != nil {
   486  			return 0, "", time.Time{}, condition, fmt.Errorf("failed to get %s external metric value: %v", spec.External.Metric.Name, err)
   487  		}
   488  	default:
   489  		// It shouldn't reach here as invalid metric source type is filtered out in the api-server's validation.
   490  		err = fmt.Errorf("unknown metric source type %q%w", string(spec.Type), errSpec)
   491  		condition := a.getUnableComputeReplicaCountCondition(hpa, "InvalidMetricSourceType", err)
   492  		return 0, "", time.Time{}, condition, err
   493  	}
   494  	return replicaCountProposal, metricNameProposal, timestampProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
   495  }
   496  
   497  func (a *HorizontalController) reconcileKey(ctx context.Context, key string) (deleted bool, err error) {
   498  	namespace, name, err := cache.SplitMetaNamespaceKey(key)
   499  	if err != nil {
   500  		return true, err
   501  	}
   502  
   503  	logger := klog.FromContext(ctx)
   504  
   505  	hpa, err := a.hpaLister.HorizontalPodAutoscalers(namespace).Get(name)
   506  	if k8serrors.IsNotFound(err) {
   507  		logger.Info("Horizontal Pod Autoscaler has been deleted", "HPA", klog.KRef(namespace, name))
   508  
   509  		a.recommendationsLock.Lock()
   510  		delete(a.recommendations, key)
   511  		a.recommendationsLock.Unlock()
   512  
   513  		a.scaleUpEventsLock.Lock()
   514  		delete(a.scaleUpEvents, key)
   515  		a.scaleUpEventsLock.Unlock()
   516  
   517  		a.scaleDownEventsLock.Lock()
   518  		delete(a.scaleDownEvents, key)
   519  		a.scaleDownEventsLock.Unlock()
   520  
   521  		return true, nil
   522  	}
   523  	if err != nil {
   524  		return false, err
   525  	}
   526  
   527  	return false, a.reconcileAutoscaler(ctx, hpa, key)
   528  }
   529  
   530  // computeStatusForObjectMetric computes the desired number of replicas for the specified metric of type ObjectMetricSourceType.
   531  func (a *HorizontalController) computeStatusForObjectMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (replicas int32, timestamp time.Time, metricName string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
   532  	if metricSpec.Object.Target.Type == autoscalingv2.ValueMetricType && metricSpec.Object.Target.Value != nil {
   533  		replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetObjectMetricReplicas(specReplicas, metricSpec.Object.Target.Value.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, selector, metricSelector)
   534  		if err != nil {
   535  			condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
   536  			return 0, timestampProposal, "", condition, err
   537  		}
   538  		*status = autoscalingv2.MetricStatus{
   539  			Type: autoscalingv2.ObjectMetricSourceType,
   540  			Object: &autoscalingv2.ObjectMetricStatus{
   541  				DescribedObject: metricSpec.Object.DescribedObject,
   542  				Metric: autoscalingv2.MetricIdentifier{
   543  					Name:     metricSpec.Object.Metric.Name,
   544  					Selector: metricSpec.Object.Metric.Selector,
   545  				},
   546  				Current: autoscalingv2.MetricValueStatus{
   547  					Value: resource.NewMilliQuantity(usageProposal, resource.DecimalSI),
   548  				},
   549  			},
   550  		}
   551  		return replicaCountProposal, timestampProposal, fmt.Sprintf("%s metric %s", metricSpec.Object.DescribedObject.Kind, metricSpec.Object.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
   552  	} else if metricSpec.Object.Target.Type == autoscalingv2.AverageValueMetricType && metricSpec.Object.Target.AverageValue != nil {
   553  		replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetObjectPerPodMetricReplicas(statusReplicas, metricSpec.Object.Target.AverageValue.MilliValue(), metricSpec.Object.Metric.Name, hpa.Namespace, &metricSpec.Object.DescribedObject, metricSelector)
   554  		if err != nil {
   555  			condition := a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
   556  			return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s object metric: %v", metricSpec.Object.Metric.Name, err)
   557  		}
   558  		*status = autoscalingv2.MetricStatus{
   559  			Type: autoscalingv2.ObjectMetricSourceType,
   560  			Object: &autoscalingv2.ObjectMetricStatus{
   561  				Metric: autoscalingv2.MetricIdentifier{
   562  					Name:     metricSpec.Object.Metric.Name,
   563  					Selector: metricSpec.Object.Metric.Selector,
   564  				},
   565  				Current: autoscalingv2.MetricValueStatus{
   566  					AverageValue: resource.NewMilliQuantity(usageProposal, resource.DecimalSI),
   567  				},
   568  			},
   569  		}
   570  		return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.Object.Metric.Name, metricSpec.Object.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
   571  	}
   572  	errMsg := "invalid object metric source: neither a value target nor an average value target was set"
   573  	err = fmt.Errorf(errMsg)
   574  	condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetObjectMetric", err)
   575  	return 0, time.Time{}, "", condition, err
   576  }
   577  
   578  // computeStatusForPodsMetric computes the desired number of replicas for the specified metric of type PodsMetricSourceType.
   579  func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus, metricSelector labels.Selector) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
   580  	replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetMetricReplicas(currentReplicas, metricSpec.Pods.Target.AverageValue.MilliValue(), metricSpec.Pods.Metric.Name, hpa.Namespace, selector, metricSelector)
   581  	if err != nil {
   582  		condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetPodsMetric", err)
   583  		return 0, timestampProposal, "", condition, err
   584  	}
   585  	*status = autoscalingv2.MetricStatus{
   586  		Type: autoscalingv2.PodsMetricSourceType,
   587  		Pods: &autoscalingv2.PodsMetricStatus{
   588  			Metric: autoscalingv2.MetricIdentifier{
   589  				Name:     metricSpec.Pods.Metric.Name,
   590  				Selector: metricSpec.Pods.Metric.Selector,
   591  			},
   592  			Current: autoscalingv2.MetricValueStatus{
   593  				AverageValue: resource.NewMilliQuantity(usageProposal, resource.DecimalSI),
   594  			},
   595  		},
   596  	}
   597  
   598  	return replicaCountProposal, timestampProposal, fmt.Sprintf("pods metric %s", metricSpec.Pods.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
   599  }
   600  
   601  func (a *HorizontalController) computeStatusForResourceMetricGeneric(ctx context.Context, currentReplicas int32, target autoscalingv2.MetricTarget,
   602  	resourceName v1.ResourceName, namespace string, container string, selector labels.Selector, sourceType autoscalingv2.MetricSourceType) (replicaCountProposal int32,
   603  	metricStatus *autoscalingv2.MetricValueStatus, timestampProposal time.Time, metricNameProposal string,
   604  	condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
   605  	if target.AverageValue != nil {
   606  		var rawProposal int64
   607  		replicaCountProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetRawResourceReplicas(ctx, currentReplicas, target.AverageValue.MilliValue(), resourceName, namespace, selector, container)
   608  		if err != nil {
   609  			return 0, nil, time.Time{}, "", condition, fmt.Errorf("failed to get %s usage: %v", resourceName, err)
   610  		}
   611  		metricNameProposal = fmt.Sprintf("%s resource", resourceName.String())
   612  		status := autoscalingv2.MetricValueStatus{
   613  			AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
   614  		}
   615  		return replicaCountProposal, &status, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
   616  	}
   617  
   618  	if target.AverageUtilization == nil {
   619  		errMsg := "invalid resource metric source: neither an average utilization target nor an average value (usage) target was set"
   620  		return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg)
   621  	}
   622  
   623  	targetUtilization := *target.AverageUtilization
   624  	replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(ctx, currentReplicas, targetUtilization, resourceName, namespace, selector, container)
   625  	if err != nil {
   626  		return 0, nil, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", resourceName, err)
   627  	}
   628  
   629  	metricNameProposal = fmt.Sprintf("%s resource utilization (percentage of request)", resourceName)
   630  	if sourceType == autoscalingv2.ContainerResourceMetricSourceType {
   631  		metricNameProposal = fmt.Sprintf("%s container resource utilization (percentage of request)", resourceName)
   632  	}
   633  	status := autoscalingv2.MetricValueStatus{
   634  		AverageUtilization: &percentageProposal,
   635  		AverageValue:       resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
   636  	}
   637  	return replicaCountProposal, &status, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
   638  }
   639  
   640  // computeStatusForResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
   641  func (a *HorizontalController) computeStatusForResourceMetric(ctx context.Context, currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler,
   642  	selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time,
   643  	metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
   644  	replicaCountProposal, metricValueStatus, timestampProposal, metricNameProposal, condition, err := a.computeStatusForResourceMetricGeneric(ctx, currentReplicas, metricSpec.Resource.Target, metricSpec.Resource.Name, hpa.Namespace, "", selector, autoscalingv2.ResourceMetricSourceType)
   645  	if err != nil {
   646  		condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
   647  		return replicaCountProposal, timestampProposal, metricNameProposal, condition, err
   648  	}
   649  	*status = autoscalingv2.MetricStatus{
   650  		Type: autoscalingv2.ResourceMetricSourceType,
   651  		Resource: &autoscalingv2.ResourceMetricStatus{
   652  			Name:    metricSpec.Resource.Name,
   653  			Current: *metricValueStatus,
   654  		},
   655  	}
   656  	return replicaCountProposal, timestampProposal, metricNameProposal, condition, nil
   657  }
   658  
   659  // computeStatusForContainerResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
   660  func (a *HorizontalController) computeStatusForContainerResourceMetric(ctx context.Context, currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler,
   661  	selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time,
   662  	metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
   663  	replicaCountProposal, metricValueStatus, timestampProposal, metricNameProposal, condition, err := a.computeStatusForResourceMetricGeneric(ctx, currentReplicas, metricSpec.ContainerResource.Target, metricSpec.ContainerResource.Name, hpa.Namespace, metricSpec.ContainerResource.Container, selector, autoscalingv2.ContainerResourceMetricSourceType)
   664  	if err != nil {
   665  		condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetContainerResourceMetric", err)
   666  		return replicaCountProposal, timestampProposal, metricNameProposal, condition, err
   667  	}
   668  	*status = autoscalingv2.MetricStatus{
   669  		Type: autoscalingv2.ContainerResourceMetricSourceType,
   670  		ContainerResource: &autoscalingv2.ContainerResourceMetricStatus{
   671  			Name:      metricSpec.ContainerResource.Name,
   672  			Container: metricSpec.ContainerResource.Container,
   673  			Current:   *metricValueStatus,
   674  		},
   675  	}
   676  	return replicaCountProposal, timestampProposal, metricNameProposal, condition, nil
   677  }
   678  
   679  // computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
   680  func (a *HorizontalController) computeStatusForExternalMetric(specReplicas, statusReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
   681  	if metricSpec.External.Target.AverageValue != nil {
   682  		replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetExternalPerPodMetricReplicas(statusReplicas, metricSpec.External.Target.AverageValue.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector)
   683  		if err != nil {
   684  			condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
   685  			return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s external metric: %v", metricSpec.External.Metric.Name, err)
   686  		}
   687  		*status = autoscalingv2.MetricStatus{
   688  			Type: autoscalingv2.ExternalMetricSourceType,
   689  			External: &autoscalingv2.ExternalMetricStatus{
   690  				Metric: autoscalingv2.MetricIdentifier{
   691  					Name:     metricSpec.External.Metric.Name,
   692  					Selector: metricSpec.External.Metric.Selector,
   693  				},
   694  				Current: autoscalingv2.MetricValueStatus{
   695  					AverageValue: resource.NewMilliQuantity(usageProposal, resource.DecimalSI),
   696  				},
   697  			},
   698  		}
   699  		return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
   700  	}
   701  	if metricSpec.External.Target.Value != nil {
   702  		replicaCountProposal, usageProposal, timestampProposal, err := a.replicaCalc.GetExternalMetricReplicas(specReplicas, metricSpec.External.Target.Value.MilliValue(), metricSpec.External.Metric.Name, hpa.Namespace, metricSpec.External.Metric.Selector, selector)
   703  		if err != nil {
   704  			condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
   705  			return 0, time.Time{}, "", condition, fmt.Errorf("failed to get external metric %s: %v", metricSpec.External.Metric.Name, err)
   706  		}
   707  		*status = autoscalingv2.MetricStatus{
   708  			Type: autoscalingv2.ExternalMetricSourceType,
   709  			External: &autoscalingv2.ExternalMetricStatus{
   710  				Metric: autoscalingv2.MetricIdentifier{
   711  					Name:     metricSpec.External.Metric.Name,
   712  					Selector: metricSpec.External.Metric.Selector,
   713  				},
   714  				Current: autoscalingv2.MetricValueStatus{
   715  					Value: resource.NewMilliQuantity(usageProposal, resource.DecimalSI),
   716  				},
   717  			},
   718  		}
   719  		return replicaCountProposal, timestampProposal, fmt.Sprintf("external metric %s(%+v)", metricSpec.External.Metric.Name, metricSpec.External.Metric.Selector), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
   720  	}
   721  	errMsg := "invalid external metric source: neither a value target nor an average value target was set"
   722  	err = fmt.Errorf(errMsg)
   723  	condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetExternalMetric", err)
   724  	return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
   725  }
   726  
   727  func (a *HorizontalController) recordInitialRecommendation(currentReplicas int32, key string) {
   728  	a.recommendationsLock.Lock()
   729  	defer a.recommendationsLock.Unlock()
   730  	if a.recommendations[key] == nil {
   731  		a.recommendations[key] = []timestampedRecommendation{{currentReplicas, time.Now()}}
   732  	}
   733  }
   734  
   735  func (a *HorizontalController) reconcileAutoscaler(ctx context.Context, hpaShared *autoscalingv2.HorizontalPodAutoscaler, key string) (retErr error) {
   736  	// actionLabel is used to report which actions this reconciliation has taken.
   737  	actionLabel := monitor.ActionLabelNone
   738  	start := time.Now()
   739  	defer func() {
   740  		errorLabel := monitor.ErrorLabelNone
   741  		if retErr != nil {
   742  			// In case of error, set "internal" as default.
   743  			errorLabel = monitor.ErrorLabelInternal
   744  		}
   745  		if errors.Is(retErr, errSpec) {
   746  			errorLabel = monitor.ErrorLabelSpec
   747  		}
   748  
   749  		a.monitor.ObserveReconciliationResult(actionLabel, errorLabel, time.Since(start))
   750  	}()
   751  
   752  	// make a copy so that we never mutate the shared informer cache (conversion can mutate the object)
   753  	hpa := hpaShared.DeepCopy()
   754  	hpaStatusOriginal := hpa.Status.DeepCopy()
   755  
   756  	reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name)
   757  
   758  	targetGV, err := schema.ParseGroupVersion(hpa.Spec.ScaleTargetRef.APIVersion)
   759  	if err != nil {
   760  		a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
   761  		setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
   762  		if err := a.updateStatusIfNeeded(ctx, hpaStatusOriginal, hpa); err != nil {
   763  			utilruntime.HandleError(err)
   764  		}
   765  		return fmt.Errorf("invalid API version in scale target reference: %v%w", err, errSpec)
   766  	}
   767  
   768  	targetGK := schema.GroupKind{
   769  		Group: targetGV.Group,
   770  		Kind:  hpa.Spec.ScaleTargetRef.Kind,
   771  	}
   772  
   773  	mappings, err := a.mapper.RESTMappings(targetGK)
   774  	if err != nil {
   775  		a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
   776  		setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
   777  		if err := a.updateStatusIfNeeded(ctx, hpaStatusOriginal, hpa); err != nil {
   778  			utilruntime.HandleError(err)
   779  		}
   780  		return fmt.Errorf("unable to determine resource for scale target reference: %v", err)
   781  	}
   782  
   783  	scale, targetGR, err := a.scaleForResourceMappings(ctx, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, mappings)
   784  	if err != nil {
   785  		a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
   786  		setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
   787  		if err := a.updateStatusIfNeeded(ctx, hpaStatusOriginal, hpa); err != nil {
   788  			utilruntime.HandleError(err)
   789  		}
   790  		return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
   791  	}
   792  	setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededGetScale", "the HPA controller was able to get the target's current scale")
   793  	currentReplicas := scale.Spec.Replicas
   794  	a.recordInitialRecommendation(currentReplicas, key)
   795  
   796  	var (
   797  		metricStatuses        []autoscalingv2.MetricStatus
   798  		metricDesiredReplicas int32
   799  		metricName            string
   800  	)
   801  
   802  	desiredReplicas := int32(0)
   803  	rescaleReason := ""
   804  
   805  	var minReplicas int32
   806  
   807  	if hpa.Spec.MinReplicas != nil {
   808  		minReplicas = *hpa.Spec.MinReplicas
   809  	} else {
   810  		// Default value
   811  		minReplicas = 1
   812  	}
   813  
   814  	rescale := true
   815  	logger := klog.FromContext(ctx)
   816  
   817  	if currentReplicas == 0 && minReplicas != 0 {
   818  		// Autoscaling is disabled for this resource
   819  		desiredReplicas = 0
   820  		rescale = false
   821  		setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "ScalingDisabled", "scaling is disabled since the replica count of the target is zero")
   822  	} else if currentReplicas > hpa.Spec.MaxReplicas {
   823  		rescaleReason = "Current number of replicas above Spec.MaxReplicas"
   824  		desiredReplicas = hpa.Spec.MaxReplicas
   825  	} else if currentReplicas < minReplicas {
   826  		rescaleReason = "Current number of replicas below Spec.MinReplicas"
   827  		desiredReplicas = minReplicas
   828  	} else {
   829  		var metricTimestamp time.Time
   830  		metricDesiredReplicas, metricName, metricStatuses, metricTimestamp, err = a.computeReplicasForMetrics(ctx, hpa, scale, hpa.Spec.Metrics)
   831  		// computeReplicasForMetrics may return both non-zero metricDesiredReplicas and an error.
   832  		// That means some metrics still work and HPA should perform scaling based on them.
   833  		if err != nil && metricDesiredReplicas == -1 {
   834  			a.setCurrentReplicasAndMetricsInStatus(hpa, currentReplicas, metricStatuses)
   835  			if err := a.updateStatusIfNeeded(ctx, hpaStatusOriginal, hpa); err != nil {
   836  				utilruntime.HandleError(err)
   837  			}
   838  			a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedComputeMetricsReplicas", err.Error())
   839  			return fmt.Errorf("failed to compute desired number of replicas based on listed metrics for %s: %v", reference, err)
   840  		}
   841  		if err != nil {
   842  			// We proceed to scaling, but return this error from reconcileAutoscaler() finally.
   843  			retErr = err
   844  		}
   845  
   846  		logger.V(4).Info("Proposing desired replicas",
   847  			"desiredReplicas", metricDesiredReplicas,
   848  			"metric", metricName,
   849  			"timestamp", metricTimestamp,
   850  			"scaleTarget", reference)
   851  
   852  		rescaleMetric := ""
   853  		if metricDesiredReplicas > desiredReplicas {
   854  			desiredReplicas = metricDesiredReplicas
   855  			rescaleMetric = metricName
   856  		}
   857  		if desiredReplicas > currentReplicas {
   858  			rescaleReason = fmt.Sprintf("%s above target", rescaleMetric)
   859  		}
   860  		if desiredReplicas < currentReplicas {
   861  			rescaleReason = "All metrics below target"
   862  		}
   863  		if hpa.Spec.Behavior == nil {
   864  			desiredReplicas = a.normalizeDesiredReplicas(hpa, key, currentReplicas, desiredReplicas, minReplicas)
   865  		} else {
   866  			desiredReplicas = a.normalizeDesiredReplicasWithBehaviors(hpa, key, currentReplicas, desiredReplicas, minReplicas)
   867  		}
   868  		rescale = desiredReplicas != currentReplicas
   869  	}
   870  
   871  	if rescale {
   872  		scale.Spec.Replicas = desiredReplicas
   873  		_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(ctx, targetGR, scale, metav1.UpdateOptions{})
   874  		if err != nil {
   875  			a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
   876  			setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedUpdateScale", "the HPA controller was unable to update the target scale: %v", err)
   877  			a.setCurrentReplicasAndMetricsInStatus(hpa, currentReplicas, metricStatuses)
   878  			if err := a.updateStatusIfNeeded(ctx, hpaStatusOriginal, hpa); err != nil {
   879  				utilruntime.HandleError(err)
   880  			}
   881  			return fmt.Errorf("failed to rescale %s: %v", reference, err)
   882  		}
   883  		setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededRescale", "the HPA controller was able to update the target scale to %d", desiredReplicas)
   884  		a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
   885  		a.storeScaleEvent(hpa.Spec.Behavior, key, currentReplicas, desiredReplicas)
   886  		logger.Info("Successfully rescaled",
   887  			"HPA", klog.KObj(hpa),
   888  			"currentReplicas", currentReplicas,
   889  			"desiredReplicas", desiredReplicas,
   890  			"reason", rescaleReason)
   891  
   892  		if desiredReplicas > currentReplicas {
   893  			actionLabel = monitor.ActionLabelScaleUp
   894  		} else {
   895  			actionLabel = monitor.ActionLabelScaleDown
   896  		}
   897  	} else {
   898  		logger.V(4).Info("Decided not to scale",
   899  			"scaleTarget", reference,
   900  			"desiredReplicas", desiredReplicas,
   901  			"lastScaleTime", hpa.Status.LastScaleTime)
   902  		desiredReplicas = currentReplicas
   903  	}
   904  
   905  	a.setStatus(hpa, currentReplicas, desiredReplicas, metricStatuses, rescale)
   906  
   907  	err = a.updateStatusIfNeeded(ctx, hpaStatusOriginal, hpa)
   908  	if err != nil {
   909  		// we can overwrite retErr in this case because it's an internal error.
   910  		return err
   911  	}
   912  
   913  	return retErr
   914  }
   915  
   916  // stabilizeRecommendation:
   917  // - replaces old recommendation with the newest recommendation,
   918  // - returns max of recommendations that are not older than downscaleStabilisationWindow.
   919  func (a *HorizontalController) stabilizeRecommendation(key string, prenormalizedDesiredReplicas int32) int32 {
   920  	maxRecommendation := prenormalizedDesiredReplicas
   921  	foundOldSample := false
   922  	oldSampleIndex := 0
   923  	cutoff := time.Now().Add(-a.downscaleStabilisationWindow)
   924  
   925  	a.recommendationsLock.Lock()
   926  	defer a.recommendationsLock.Unlock()
   927  	for i, rec := range a.recommendations[key] {
   928  		if rec.timestamp.Before(cutoff) {
   929  			foundOldSample = true
   930  			oldSampleIndex = i
   931  		} else if rec.recommendation > maxRecommendation {
   932  			maxRecommendation = rec.recommendation
   933  		}
   934  	}
   935  	if foundOldSample {
   936  		a.recommendations[key][oldSampleIndex] = timestampedRecommendation{prenormalizedDesiredReplicas, time.Now()}
   937  	} else {
   938  		a.recommendations[key] = append(a.recommendations[key], timestampedRecommendation{prenormalizedDesiredReplicas, time.Now()})
   939  	}
   940  	return maxRecommendation
   941  }
   942  
   943  // normalizeDesiredReplicas takes the metrics desired replicas value and normalizes it based on the appropriate conditions (i.e. < maxReplicas, >
   944  // minReplicas, etc...)
   945  func (a *HorizontalController) normalizeDesiredReplicas(hpa *autoscalingv2.HorizontalPodAutoscaler, key string, currentReplicas int32, prenormalizedDesiredReplicas int32, minReplicas int32) int32 {
   946  	stabilizedRecommendation := a.stabilizeRecommendation(key, prenormalizedDesiredReplicas)
   947  	if stabilizedRecommendation != prenormalizedDesiredReplicas {
   948  		setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ScaleDownStabilized", "recent recommendations were higher than current one, applying the highest recent recommendation")
   949  	} else {
   950  		setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
   951  	}
   952  
   953  	desiredReplicas, condition, reason := convertDesiredReplicasWithRules(currentReplicas, stabilizedRecommendation, minReplicas, hpa.Spec.MaxReplicas)
   954  
   955  	if desiredReplicas == stabilizedRecommendation {
   956  		setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, condition, reason)
   957  	} else {
   958  		setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, condition, reason)
   959  	}
   960  
   961  	return desiredReplicas
   962  }
   963  
   964  // NormalizationArg is used to pass all needed information between functions as one structure
   965  type NormalizationArg struct {
   966  	Key               string
   967  	ScaleUpBehavior   *autoscalingv2.HPAScalingRules
   968  	ScaleDownBehavior *autoscalingv2.HPAScalingRules
   969  	MinReplicas       int32
   970  	MaxReplicas       int32
   971  	CurrentReplicas   int32
   972  	DesiredReplicas   int32
   973  }
   974  
   975  // normalizeDesiredReplicasWithBehaviors takes the metrics desired replicas value and normalizes it:
   976  // 1. Apply the basic conditions (i.e. < maxReplicas, > minReplicas, etc...)
   977  // 2. Apply the scale up/down limits from the hpaSpec.Behaviors (i.e. add no more than 4 pods)
   978  // 3. Apply the constraints period (i.e. add no more than 4 pods per minute)
   979  // 4. Apply the stabilization (i.e. add no more than 4 pods per minute, and pick the smallest recommendation during last 5 minutes)
   980  func (a *HorizontalController) normalizeDesiredReplicasWithBehaviors(hpa *autoscalingv2.HorizontalPodAutoscaler, key string, currentReplicas, prenormalizedDesiredReplicas, minReplicas int32) int32 {
   981  	a.maybeInitScaleDownStabilizationWindow(hpa)
   982  	normalizationArg := NormalizationArg{
   983  		Key:               key,
   984  		ScaleUpBehavior:   hpa.Spec.Behavior.ScaleUp,
   985  		ScaleDownBehavior: hpa.Spec.Behavior.ScaleDown,
   986  		MinReplicas:       minReplicas,
   987  		MaxReplicas:       hpa.Spec.MaxReplicas,
   988  		CurrentReplicas:   currentReplicas,
   989  		DesiredReplicas:   prenormalizedDesiredReplicas}
   990  	stabilizedRecommendation, reason, message := a.stabilizeRecommendationWithBehaviors(normalizationArg)
   991  	normalizationArg.DesiredReplicas = stabilizedRecommendation
   992  	if stabilizedRecommendation != prenormalizedDesiredReplicas {
   993  		// "ScaleUpStabilized" || "ScaleDownStabilized"
   994  		setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, reason, message)
   995  	} else {
   996  		setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "recommended size matches current size")
   997  	}
   998  	desiredReplicas, reason, message := a.convertDesiredReplicasWithBehaviorRate(normalizationArg)
   999  	if desiredReplicas == stabilizedRecommendation {
  1000  		setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, reason, message)
  1001  	} else {
  1002  		setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, reason, message)
  1003  	}
  1004  
  1005  	return desiredReplicas
  1006  }
  1007  
  1008  func (a *HorizontalController) maybeInitScaleDownStabilizationWindow(hpa *autoscalingv2.HorizontalPodAutoscaler) {
  1009  	behavior := hpa.Spec.Behavior
  1010  	if behavior != nil && behavior.ScaleDown != nil && behavior.ScaleDown.StabilizationWindowSeconds == nil {
  1011  		stabilizationWindowSeconds := (int32)(a.downscaleStabilisationWindow.Seconds())
  1012  		hpa.Spec.Behavior.ScaleDown.StabilizationWindowSeconds = &stabilizationWindowSeconds
  1013  	}
  1014  }
  1015  
  1016  // getReplicasChangePerPeriod function find all the replica changes per period
  1017  func getReplicasChangePerPeriod(periodSeconds int32, scaleEvents []timestampedScaleEvent) int32 {
  1018  	period := time.Second * time.Duration(periodSeconds)
  1019  	cutoff := time.Now().Add(-period)
  1020  	var replicas int32
  1021  	for _, rec := range scaleEvents {
  1022  		if rec.timestamp.After(cutoff) {
  1023  			replicas += rec.replicaChange
  1024  		}
  1025  	}
  1026  	return replicas
  1027  
  1028  }
  1029  
  1030  func (a *HorizontalController) getUnableComputeReplicaCountCondition(hpa runtime.Object, reason string, err error) (condition autoscalingv2.HorizontalPodAutoscalerCondition) {
  1031  	a.eventRecorder.Event(hpa, v1.EventTypeWarning, reason, err.Error())
  1032  	return autoscalingv2.HorizontalPodAutoscalerCondition{
  1033  		Type:    autoscalingv2.ScalingActive,
  1034  		Status:  v1.ConditionFalse,
  1035  		Reason:  reason,
  1036  		Message: fmt.Sprintf("the HPA was unable to compute the replica count: %v", err),
  1037  	}
  1038  }
  1039  
  1040  // storeScaleEvent stores (adds or replaces outdated) scale event.
  1041  // outdated events to be replaced were marked as outdated in the `markScaleEventsOutdated` function
  1042  func (a *HorizontalController) storeScaleEvent(behavior *autoscalingv2.HorizontalPodAutoscalerBehavior, key string, prevReplicas, newReplicas int32) {
  1043  	if behavior == nil {
  1044  		return // we should not store any event as they will not be used
  1045  	}
  1046  	var oldSampleIndex int
  1047  	var longestPolicyPeriod int32
  1048  	foundOldSample := false
  1049  	if newReplicas > prevReplicas {
  1050  		longestPolicyPeriod = getLongestPolicyPeriod(behavior.ScaleUp)
  1051  
  1052  		a.scaleUpEventsLock.Lock()
  1053  		defer a.scaleUpEventsLock.Unlock()
  1054  		markScaleEventsOutdated(a.scaleUpEvents[key], longestPolicyPeriod)
  1055  		replicaChange := newReplicas - prevReplicas
  1056  		for i, event := range a.scaleUpEvents[key] {
  1057  			if event.outdated {
  1058  				foundOldSample = true
  1059  				oldSampleIndex = i
  1060  			}
  1061  		}
  1062  		newEvent := timestampedScaleEvent{replicaChange, time.Now(), false}
  1063  		if foundOldSample {
  1064  			a.scaleUpEvents[key][oldSampleIndex] = newEvent
  1065  		} else {
  1066  			a.scaleUpEvents[key] = append(a.scaleUpEvents[key], newEvent)
  1067  		}
  1068  	} else {
  1069  		longestPolicyPeriod = getLongestPolicyPeriod(behavior.ScaleDown)
  1070  
  1071  		a.scaleDownEventsLock.Lock()
  1072  		defer a.scaleDownEventsLock.Unlock()
  1073  		markScaleEventsOutdated(a.scaleDownEvents[key], longestPolicyPeriod)
  1074  		replicaChange := prevReplicas - newReplicas
  1075  		for i, event := range a.scaleDownEvents[key] {
  1076  			if event.outdated {
  1077  				foundOldSample = true
  1078  				oldSampleIndex = i
  1079  			}
  1080  		}
  1081  		newEvent := timestampedScaleEvent{replicaChange, time.Now(), false}
  1082  		if foundOldSample {
  1083  			a.scaleDownEvents[key][oldSampleIndex] = newEvent
  1084  		} else {
  1085  			a.scaleDownEvents[key] = append(a.scaleDownEvents[key], newEvent)
  1086  		}
  1087  	}
  1088  }
  1089  
  1090  // stabilizeRecommendationWithBehaviors:
  1091  // - replaces old recommendation with the newest recommendation,
  1092  // - returns {max,min} of recommendations that are not older than constraints.Scale{Up,Down}.DelaySeconds
  1093  func (a *HorizontalController) stabilizeRecommendationWithBehaviors(args NormalizationArg) (int32, string, string) {
  1094  	now := time.Now()
  1095  
  1096  	foundOldSample := false
  1097  	oldSampleIndex := 0
  1098  
  1099  	upRecommendation := args.DesiredReplicas
  1100  	upDelaySeconds := *args.ScaleUpBehavior.StabilizationWindowSeconds
  1101  	upCutoff := now.Add(-time.Second * time.Duration(upDelaySeconds))
  1102  
  1103  	downRecommendation := args.DesiredReplicas
  1104  	downDelaySeconds := *args.ScaleDownBehavior.StabilizationWindowSeconds
  1105  	downCutoff := now.Add(-time.Second * time.Duration(downDelaySeconds))
  1106  
  1107  	// Calculate the upper and lower stabilization limits.
  1108  	a.recommendationsLock.Lock()
  1109  	defer a.recommendationsLock.Unlock()
  1110  	for i, rec := range a.recommendations[args.Key] {
  1111  		if rec.timestamp.After(upCutoff) {
  1112  			upRecommendation = min(rec.recommendation, upRecommendation)
  1113  		}
  1114  		if rec.timestamp.After(downCutoff) {
  1115  			downRecommendation = max(rec.recommendation, downRecommendation)
  1116  		}
  1117  		if rec.timestamp.Before(upCutoff) && rec.timestamp.Before(downCutoff) {
  1118  			foundOldSample = true
  1119  			oldSampleIndex = i
  1120  		}
  1121  	}
  1122  
  1123  	// Bring the recommendation to within the upper and lower limits (stabilize).
  1124  	recommendation := args.CurrentReplicas
  1125  	if recommendation < upRecommendation {
  1126  		recommendation = upRecommendation
  1127  	}
  1128  	if recommendation > downRecommendation {
  1129  		recommendation = downRecommendation
  1130  	}
  1131  
  1132  	// Record the unstabilized recommendation.
  1133  	if foundOldSample {
  1134  		a.recommendations[args.Key][oldSampleIndex] = timestampedRecommendation{args.DesiredReplicas, time.Now()}
  1135  	} else {
  1136  		a.recommendations[args.Key] = append(a.recommendations[args.Key], timestampedRecommendation{args.DesiredReplicas, time.Now()})
  1137  	}
  1138  
  1139  	// Determine a human-friendly message.
  1140  	var reason, message string
  1141  	if args.DesiredReplicas >= args.CurrentReplicas {
  1142  		reason = "ScaleUpStabilized"
  1143  		message = "recent recommendations were lower than current one, applying the lowest recent recommendation"
  1144  	} else {
  1145  		reason = "ScaleDownStabilized"
  1146  		message = "recent recommendations were higher than current one, applying the highest recent recommendation"
  1147  	}
  1148  	return recommendation, reason, message
  1149  }
  1150  
  1151  // convertDesiredReplicasWithBehaviorRate performs the actual normalization, given the constraint rate
  1152  // It doesn't consider the stabilizationWindow, it is done separately
  1153  func (a *HorizontalController) convertDesiredReplicasWithBehaviorRate(args NormalizationArg) (int32, string, string) {
  1154  	var possibleLimitingReason, possibleLimitingMessage string
  1155  
  1156  	if args.DesiredReplicas > args.CurrentReplicas {
  1157  		a.scaleUpEventsLock.RLock()
  1158  		defer a.scaleUpEventsLock.RUnlock()
  1159  		a.scaleDownEventsLock.RLock()
  1160  		defer a.scaleDownEventsLock.RUnlock()
  1161  		scaleUpLimit := calculateScaleUpLimitWithScalingRules(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleUpBehavior)
  1162  
  1163  		if scaleUpLimit < args.CurrentReplicas {
  1164  			// We shouldn't scale up further until the scaleUpEvents will be cleaned up
  1165  			scaleUpLimit = args.CurrentReplicas
  1166  		}
  1167  		maximumAllowedReplicas := args.MaxReplicas
  1168  		if maximumAllowedReplicas > scaleUpLimit {
  1169  			maximumAllowedReplicas = scaleUpLimit
  1170  			possibleLimitingReason = "ScaleUpLimit"
  1171  			possibleLimitingMessage = "the desired replica count is increasing faster than the maximum scale rate"
  1172  		} else {
  1173  			possibleLimitingReason = "TooManyReplicas"
  1174  			possibleLimitingMessage = "the desired replica count is more than the maximum replica count"
  1175  		}
  1176  		if args.DesiredReplicas > maximumAllowedReplicas {
  1177  			return maximumAllowedReplicas, possibleLimitingReason, possibleLimitingMessage
  1178  		}
  1179  	} else if args.DesiredReplicas < args.CurrentReplicas {
  1180  		a.scaleUpEventsLock.RLock()
  1181  		defer a.scaleUpEventsLock.RUnlock()
  1182  		a.scaleDownEventsLock.RLock()
  1183  		defer a.scaleDownEventsLock.RUnlock()
  1184  		scaleDownLimit := calculateScaleDownLimitWithBehaviors(args.CurrentReplicas, a.scaleUpEvents[args.Key], a.scaleDownEvents[args.Key], args.ScaleDownBehavior)
  1185  
  1186  		if scaleDownLimit > args.CurrentReplicas {
  1187  			// We shouldn't scale down further until the scaleDownEvents will be cleaned up
  1188  			scaleDownLimit = args.CurrentReplicas
  1189  		}
  1190  		minimumAllowedReplicas := args.MinReplicas
  1191  		if minimumAllowedReplicas < scaleDownLimit {
  1192  			minimumAllowedReplicas = scaleDownLimit
  1193  			possibleLimitingReason = "ScaleDownLimit"
  1194  			possibleLimitingMessage = "the desired replica count is decreasing faster than the maximum scale rate"
  1195  		} else {
  1196  			possibleLimitingMessage = "the desired replica count is less than the minimum replica count"
  1197  			possibleLimitingReason = "TooFewReplicas"
  1198  		}
  1199  		if args.DesiredReplicas < minimumAllowedReplicas {
  1200  			return minimumAllowedReplicas, possibleLimitingReason, possibleLimitingMessage
  1201  		}
  1202  	}
  1203  	return args.DesiredReplicas, "DesiredWithinRange", "the desired count is within the acceptable range"
  1204  }
  1205  
  1206  // convertDesiredReplicas performs the actual normalization, without depending on `HorizontalController` or `HorizontalPodAutoscaler`
  1207  func convertDesiredReplicasWithRules(currentReplicas, desiredReplicas, hpaMinReplicas, hpaMaxReplicas int32) (int32, string, string) {
  1208  
  1209  	var minimumAllowedReplicas int32
  1210  	var maximumAllowedReplicas int32
  1211  
  1212  	var possibleLimitingCondition string
  1213  	var possibleLimitingReason string
  1214  
  1215  	minimumAllowedReplicas = hpaMinReplicas
  1216  
  1217  	// Do not scaleup too much to prevent incorrect rapid increase of the number of master replicas caused by
  1218  	// bogus CPU usage report from heapster/kubelet (like in issue #32304).
  1219  	scaleUpLimit := calculateScaleUpLimit(currentReplicas)
  1220  
  1221  	if hpaMaxReplicas > scaleUpLimit {
  1222  		maximumAllowedReplicas = scaleUpLimit
  1223  		possibleLimitingCondition = "ScaleUpLimit"
  1224  		possibleLimitingReason = "the desired replica count is increasing faster than the maximum scale rate"
  1225  	} else {
  1226  		maximumAllowedReplicas = hpaMaxReplicas
  1227  		possibleLimitingCondition = "TooManyReplicas"
  1228  		possibleLimitingReason = "the desired replica count is more than the maximum replica count"
  1229  	}
  1230  
  1231  	if desiredReplicas < minimumAllowedReplicas {
  1232  		possibleLimitingCondition = "TooFewReplicas"
  1233  		possibleLimitingReason = "the desired replica count is less than the minimum replica count"
  1234  
  1235  		return minimumAllowedReplicas, possibleLimitingCondition, possibleLimitingReason
  1236  	} else if desiredReplicas > maximumAllowedReplicas {
  1237  		return maximumAllowedReplicas, possibleLimitingCondition, possibleLimitingReason
  1238  	}
  1239  
  1240  	return desiredReplicas, "DesiredWithinRange", "the desired count is within the acceptable range"
  1241  }
  1242  
  1243  func calculateScaleUpLimit(currentReplicas int32) int32 {
  1244  	return int32(math.Max(scaleUpLimitFactor*float64(currentReplicas), scaleUpLimitMinimum))
  1245  }
  1246  
  1247  // markScaleEventsOutdated set 'outdated=true' flag for all scale events that are not used by any HPA object
  1248  func markScaleEventsOutdated(scaleEvents []timestampedScaleEvent, longestPolicyPeriod int32) {
  1249  	period := time.Second * time.Duration(longestPolicyPeriod)
  1250  	cutoff := time.Now().Add(-period)
  1251  	for i, event := range scaleEvents {
  1252  		if event.timestamp.Before(cutoff) {
  1253  			// outdated scale event are marked for later reuse
  1254  			scaleEvents[i].outdated = true
  1255  		}
  1256  	}
  1257  }
  1258  
  1259  func getLongestPolicyPeriod(scalingRules *autoscalingv2.HPAScalingRules) int32 {
  1260  	var longestPolicyPeriod int32
  1261  	for _, policy := range scalingRules.Policies {
  1262  		if policy.PeriodSeconds > longestPolicyPeriod {
  1263  			longestPolicyPeriod = policy.PeriodSeconds
  1264  		}
  1265  	}
  1266  	return longestPolicyPeriod
  1267  }
  1268  
  1269  // calculateScaleUpLimitWithScalingRules returns the maximum number of pods that could be added for the given HPAScalingRules
  1270  func calculateScaleUpLimitWithScalingRules(currentReplicas int32, scaleUpEvents, scaleDownEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
  1271  	var result int32
  1272  	var proposed int32
  1273  	var selectPolicyFn func(int32, int32) int32
  1274  	if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect {
  1275  		return currentReplicas // Scaling is disabled
  1276  	} else if *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect {
  1277  		result = math.MaxInt32
  1278  		selectPolicyFn = min // For scaling up, the lowest change ('min' policy) produces a minimum value
  1279  	} else {
  1280  		result = math.MinInt32
  1281  		selectPolicyFn = max // Use the default policy otherwise to produce a highest possible change
  1282  	}
  1283  	for _, policy := range scalingRules.Policies {
  1284  		replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
  1285  		replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleDownEvents)
  1286  		periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod + replicasDeletedInCurrentPeriod
  1287  		if policy.Type == autoscalingv2.PodsScalingPolicy {
  1288  			proposed = periodStartReplicas + policy.Value
  1289  		} else if policy.Type == autoscalingv2.PercentScalingPolicy {
  1290  			// the proposal has to be rounded up because the proposed change might not increase the replica count causing the target to never scale up
  1291  			proposed = int32(math.Ceil(float64(periodStartReplicas) * (1 + float64(policy.Value)/100)))
  1292  		}
  1293  		result = selectPolicyFn(result, proposed)
  1294  	}
  1295  	return result
  1296  }
  1297  
  1298  // calculateScaleDownLimitWithBehavior returns the maximum number of pods that could be deleted for the given HPAScalingRules
  1299  func calculateScaleDownLimitWithBehaviors(currentReplicas int32, scaleUpEvents, scaleDownEvents []timestampedScaleEvent, scalingRules *autoscalingv2.HPAScalingRules) int32 {
  1300  	var result int32
  1301  	var proposed int32
  1302  	var selectPolicyFn func(int32, int32) int32
  1303  	if *scalingRules.SelectPolicy == autoscalingv2.DisabledPolicySelect {
  1304  		return currentReplicas // Scaling is disabled
  1305  	} else if *scalingRules.SelectPolicy == autoscalingv2.MinChangePolicySelect {
  1306  		result = math.MinInt32
  1307  		selectPolicyFn = max // For scaling down, the lowest change ('min' policy) produces a maximum value
  1308  	} else {
  1309  		result = math.MaxInt32
  1310  		selectPolicyFn = min // Use the default policy otherwise to produce a highest possible change
  1311  	}
  1312  	for _, policy := range scalingRules.Policies {
  1313  		replicasAddedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleUpEvents)
  1314  		replicasDeletedInCurrentPeriod := getReplicasChangePerPeriod(policy.PeriodSeconds, scaleDownEvents)
  1315  		periodStartReplicas := currentReplicas - replicasAddedInCurrentPeriod + replicasDeletedInCurrentPeriod
  1316  		if policy.Type == autoscalingv2.PodsScalingPolicy {
  1317  			proposed = periodStartReplicas - policy.Value
  1318  		} else if policy.Type == autoscalingv2.PercentScalingPolicy {
  1319  			proposed = int32(float64(periodStartReplicas) * (1 - float64(policy.Value)/100))
  1320  		}
  1321  		result = selectPolicyFn(result, proposed)
  1322  	}
  1323  	return result
  1324  }
  1325  
  1326  // scaleForResourceMappings attempts to fetch the scale for the
  1327  // resource with the given name and namespace, trying each RESTMapping
  1328  // in turn until a working one is found.  If none work, the first error
  1329  // is returned.  It returns both the scale, as well as the group-resource from
  1330  // the working mapping.
  1331  func (a *HorizontalController) scaleForResourceMappings(ctx context.Context, namespace, name string, mappings []*apimeta.RESTMapping) (*autoscalingv1.Scale, schema.GroupResource, error) {
  1332  	var firstErr error
  1333  	for i, mapping := range mappings {
  1334  		targetGR := mapping.Resource.GroupResource()
  1335  		scale, err := a.scaleNamespacer.Scales(namespace).Get(ctx, targetGR, name, metav1.GetOptions{})
  1336  		if err == nil {
  1337  			return scale, targetGR, nil
  1338  		}
  1339  
  1340  		// if this is the first error, remember it,
  1341  		// then go on and try other mappings until we find a good one
  1342  		if i == 0 {
  1343  			firstErr = err
  1344  		}
  1345  	}
  1346  
  1347  	// make sure we handle an empty set of mappings
  1348  	if firstErr == nil {
  1349  		firstErr = fmt.Errorf("unrecognized resource")
  1350  	}
  1351  
  1352  	return nil, schema.GroupResource{}, firstErr
  1353  }
  1354  
  1355  // setCurrentReplicasAndMetricsInStatus sets the current replica count and metrics in the status of the HPA.
  1356  func (a *HorizontalController) setCurrentReplicasAndMetricsInStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32, metricStatuses []autoscalingv2.MetricStatus) {
  1357  	a.setStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, metricStatuses, false)
  1358  }
  1359  
  1360  // setStatus recreates the status of the given HPA, updating the current and
  1361  // desired replicas, as well as the metric statuses
  1362  func (a *HorizontalController) setStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, metricStatuses []autoscalingv2.MetricStatus, rescale bool) {
  1363  	hpa.Status = autoscalingv2.HorizontalPodAutoscalerStatus{
  1364  		CurrentReplicas: currentReplicas,
  1365  		DesiredReplicas: desiredReplicas,
  1366  		LastScaleTime:   hpa.Status.LastScaleTime,
  1367  		CurrentMetrics:  metricStatuses,
  1368  		Conditions:      hpa.Status.Conditions,
  1369  	}
  1370  
  1371  	if rescale {
  1372  		now := metav1.NewTime(time.Now())
  1373  		hpa.Status.LastScaleTime = &now
  1374  	}
  1375  }
  1376  
  1377  // updateStatusIfNeeded calls updateStatus only if the status of the new HPA is not the same as the old status
  1378  func (a *HorizontalController) updateStatusIfNeeded(ctx context.Context, oldStatus *autoscalingv2.HorizontalPodAutoscalerStatus, newHPA *autoscalingv2.HorizontalPodAutoscaler) error {
  1379  	// skip a write if we wouldn't need to update
  1380  	if apiequality.Semantic.DeepEqual(oldStatus, &newHPA.Status) {
  1381  		return nil
  1382  	}
  1383  	return a.updateStatus(ctx, newHPA)
  1384  }
  1385  
  1386  // updateStatus actually does the update request for the status of the given HPA
  1387  func (a *HorizontalController) updateStatus(ctx context.Context, hpa *autoscalingv2.HorizontalPodAutoscaler) error {
  1388  	_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(ctx, hpa, metav1.UpdateOptions{})
  1389  	if err != nil {
  1390  		a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
  1391  		return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
  1392  	}
  1393  	logger := klog.FromContext(ctx)
  1394  	logger.V(2).Info("Successfully updated status", "HPA", klog.KObj(hpa))
  1395  	return nil
  1396  }
  1397  
  1398  // setCondition sets the specific condition type on the given HPA to the specified value with the given reason
  1399  // and message.  The message and args are treated like a format string.  The condition will be added if it is
  1400  // not present.
  1401  func setCondition(hpa *autoscalingv2.HorizontalPodAutoscaler, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType, status v1.ConditionStatus, reason, message string, args ...interface{}) {
  1402  	hpa.Status.Conditions = setConditionInList(hpa.Status.Conditions, conditionType, status, reason, message, args...)
  1403  }
  1404  
  1405  // setConditionInList sets the specific condition type on the given HPA to the specified value with the given
  1406  // reason and message.  The message and args are treated like a format string.  The condition will be added if
  1407  // it is not present.  The new list will be returned.
  1408  func setConditionInList(inputList []autoscalingv2.HorizontalPodAutoscalerCondition, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType, status v1.ConditionStatus, reason, message string, args ...interface{}) []autoscalingv2.HorizontalPodAutoscalerCondition {
  1409  	resList := inputList
  1410  	var existingCond *autoscalingv2.HorizontalPodAutoscalerCondition
  1411  	for i, condition := range resList {
  1412  		if condition.Type == conditionType {
  1413  			// can't take a pointer to an iteration variable
  1414  			existingCond = &resList[i]
  1415  			break
  1416  		}
  1417  	}
  1418  
  1419  	if existingCond == nil {
  1420  		resList = append(resList, autoscalingv2.HorizontalPodAutoscalerCondition{
  1421  			Type: conditionType,
  1422  		})
  1423  		existingCond = &resList[len(resList)-1]
  1424  	}
  1425  
  1426  	if existingCond.Status != status {
  1427  		existingCond.LastTransitionTime = metav1.Now()
  1428  	}
  1429  
  1430  	existingCond.Status = status
  1431  	existingCond.Reason = reason
  1432  	existingCond.Message = fmt.Sprintf(message, args...)
  1433  
  1434  	return resList
  1435  }
  1436  
  1437  func max(a, b int32) int32 {
  1438  	if a >= b {
  1439  		return a
  1440  	}
  1441  	return b
  1442  }
  1443  
  1444  func min(a, b int32) int32 {
  1445  	if a <= b {
  1446  		return a
  1447  	}
  1448  	return b
  1449  }