k8s.io/kubernetes@v1.29.3/test/e2e/apimachinery/resource_quota.go (about)

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package apimachinery
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"fmt"
    23  	"strconv"
    24  	"time"
    25  
    26  	appsv1 "k8s.io/api/apps/v1"
    27  	v1 "k8s.io/api/core/v1"
    28  	schedulingv1 "k8s.io/api/scheduling/v1"
    29  	apiequality "k8s.io/apimachinery/pkg/api/equality"
    30  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    31  	"k8s.io/apimachinery/pkg/api/resource"
    32  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    33  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    34  	"k8s.io/apimachinery/pkg/labels"
    35  	"k8s.io/apimachinery/pkg/runtime"
    36  	"k8s.io/apimachinery/pkg/runtime/schema"
    37  	"k8s.io/apimachinery/pkg/types"
    38  	"k8s.io/apimachinery/pkg/util/intstr"
    39  	utilrand "k8s.io/apimachinery/pkg/util/rand"
    40  	"k8s.io/apimachinery/pkg/util/wait"
    41  	watch "k8s.io/apimachinery/pkg/watch"
    42  	quota "k8s.io/apiserver/pkg/quota/v1"
    43  	clientset "k8s.io/client-go/kubernetes"
    44  	clientscheme "k8s.io/client-go/kubernetes/scheme"
    45  	"k8s.io/client-go/tools/cache"
    46  	watchtools "k8s.io/client-go/tools/watch"
    47  	"k8s.io/client-go/util/retry"
    48  	"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
    49  	"k8s.io/kubernetes/test/e2e/feature"
    50  	"k8s.io/kubernetes/test/e2e/framework"
    51  	"k8s.io/kubernetes/test/utils/crd"
    52  	imageutils "k8s.io/kubernetes/test/utils/image"
    53  	admissionapi "k8s.io/pod-security-admission/api"
    54  	"k8s.io/utils/pointer"
    55  
    56  	"github.com/onsi/ginkgo/v2"
    57  	"github.com/onsi/gomega"
    58  )
    59  
    60  const (
    61  	// how long to wait for a resource quota update to occur
    62  	resourceQuotaTimeout = time.Minute
    63  	podName              = "pfpod"
    64  )
    65  
    66  var classGold = "gold"
    67  var extendedResourceName = "example.com/dongle"
    68  
    69  var _ = SIGDescribe("ResourceQuota", func() {
    70  	f := framework.NewDefaultFramework("resourcequota")
    71  	f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
    72  
    73  	/*
    74  		Release: v1.16
    75  		Testname: ResourceQuota, object count quota, resourcequotas
    76  		Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
    77  	*/
    78  	framework.ConformanceIt("should create a ResourceQuota and ensure its status is promptly calculated.", func(ctx context.Context) {
    79  		ginkgo.By("Counting existing ResourceQuota")
    80  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
    81  		framework.ExpectNoError(err)
    82  
    83  		ginkgo.By("Creating a ResourceQuota")
    84  		quotaName := "test-quota"
    85  		resourceQuota := newTestResourceQuota(quotaName)
    86  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
    87  		framework.ExpectNoError(err)
    88  
    89  		ginkgo.By("Ensuring resource quota status is calculated")
    90  		usedResources := v1.ResourceList{}
    91  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
    92  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
    93  		framework.ExpectNoError(err)
    94  	})
    95  
    96  	/*
    97  		Release: v1.16
    98  		Testname: ResourceQuota, object count quota, service
    99  		Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   100  		Create a Service. Its creation MUST be successful and resource usage count against the Service object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
   101  		Delete the Service. Deletion MUST succeed and resource usage count against the Service object MUST be released from ResourceQuotaStatus of the ResourceQuota.
   102  	*/
   103  	framework.ConformanceIt("should create a ResourceQuota and capture the life of a service.", func(ctx context.Context) {
   104  		ginkgo.By("Counting existing ResourceQuota")
   105  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
   106  		framework.ExpectNoError(err)
   107  
   108  		ginkgo.By("Creating a ResourceQuota")
   109  		quotaName := "test-quota"
   110  		resourceQuota := newTestResourceQuota(quotaName)
   111  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
   112  		framework.ExpectNoError(err)
   113  
   114  		ginkgo.By("Ensuring resource quota status is calculated")
   115  		usedResources := v1.ResourceList{}
   116  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   117  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   118  		framework.ExpectNoError(err)
   119  
   120  		ginkgo.By("Creating a Service")
   121  		service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP, false)
   122  		service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, service, metav1.CreateOptions{})
   123  		framework.ExpectNoError(err)
   124  
   125  		ginkgo.By("Creating a NodePort Service")
   126  		nodeport := newTestServiceForQuota("test-service-np", v1.ServiceTypeNodePort, false)
   127  		nodeport, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, nodeport, metav1.CreateOptions{})
   128  		framework.ExpectNoError(err)
   129  
   130  		ginkgo.By("Not allowing a LoadBalancer Service with NodePort to be created that exceeds remaining quota")
   131  		loadbalancer := newTestServiceForQuota("test-service-lb", v1.ServiceTypeLoadBalancer, true)
   132  		_, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, loadbalancer, metav1.CreateOptions{})
   133  		gomega.Expect(err).To(gomega.HaveOccurred())
   134  
   135  		ginkgo.By("Ensuring resource quota status captures service creation")
   136  		usedResources = v1.ResourceList{}
   137  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   138  		usedResources[v1.ResourceServices] = resource.MustParse("2")
   139  		usedResources[v1.ResourceServicesNodePorts] = resource.MustParse("1")
   140  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   141  		framework.ExpectNoError(err)
   142  
   143  		ginkgo.By("Deleting Services")
   144  		err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, service.Name, metav1.DeleteOptions{})
   145  		framework.ExpectNoError(err)
   146  		err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, nodeport.Name, metav1.DeleteOptions{})
   147  		framework.ExpectNoError(err)
   148  
   149  		ginkgo.By("Ensuring resource quota status released usage")
   150  		usedResources[v1.ResourceServices] = resource.MustParse("0")
   151  		usedResources[v1.ResourceServicesNodePorts] = resource.MustParse("0")
   152  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   153  		framework.ExpectNoError(err)
   154  	})
   155  
   156  	/*
   157  		Release: v1.16
   158  		Testname: ResourceQuota, object count quota, secret
   159  		Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   160  		Create a Secret. Its creation MUST be successful and resource usage count against the Secret object and resourceQuota object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
   161  		Delete the Secret. Deletion MUST succeed and resource usage count against the Secret object MUST be released from ResourceQuotaStatus of the ResourceQuota.
   162  	*/
   163  	framework.ConformanceIt("should create a ResourceQuota and capture the life of a secret.", func(ctx context.Context) {
   164  		ginkgo.By("Discovering how many secrets are in namespace by default")
   165  		found, unchanged := 0, 0
   166  		// On contended servers the service account controller can slow down, leading to the count changing during a run.
   167  		// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
   168  		err := wait.PollWithContext(ctx, 1*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
   169  			secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
   170  			framework.ExpectNoError(err)
   171  			if len(secrets.Items) == found {
   172  				// loop until the number of secrets has stabilized for 5 seconds
   173  				unchanged++
   174  				return unchanged > 4, nil
   175  			}
   176  			unchanged = 0
   177  			found = len(secrets.Items)
   178  			return false, nil
   179  		})
   180  		framework.ExpectNoError(err)
   181  		defaultSecrets := fmt.Sprintf("%d", found)
   182  		hardSecrets := fmt.Sprintf("%d", found+1)
   183  
   184  		ginkgo.By("Counting existing ResourceQuota")
   185  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
   186  		framework.ExpectNoError(err)
   187  
   188  		ginkgo.By("Creating a ResourceQuota")
   189  		quotaName := "test-quota"
   190  		resourceQuota := newTestResourceQuota(quotaName)
   191  		resourceQuota.Spec.Hard[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
   192  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
   193  		framework.ExpectNoError(err)
   194  
   195  		ginkgo.By("Ensuring resource quota status is calculated")
   196  		usedResources := v1.ResourceList{}
   197  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   198  		usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
   199  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   200  		framework.ExpectNoError(err)
   201  
   202  		ginkgo.By("Creating a Secret")
   203  		secret := newTestSecretForQuota("test-secret")
   204  		secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{})
   205  		framework.ExpectNoError(err)
   206  
   207  		ginkgo.By("Ensuring resource quota status captures secret creation")
   208  		usedResources = v1.ResourceList{}
   209  		usedResources[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
   210  		// we expect there to be two secrets because each namespace will receive
   211  		// a service account token secret by default
   212  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   213  		framework.ExpectNoError(err)
   214  
   215  		ginkgo.By("Deleting a secret")
   216  		err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, secret.Name, metav1.DeleteOptions{})
   217  		framework.ExpectNoError(err)
   218  
   219  		ginkgo.By("Ensuring resource quota status released usage")
   220  		usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
   221  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   222  		framework.ExpectNoError(err)
   223  	})
   224  
   225  	/*
   226  		Release: v1.16
   227  		Testname: ResourceQuota, object count quota, pod
   228  		Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   229  		Create a Pod with resource request count for CPU, Memory, EphemeralStorage and ExtendedResourceName. Pod creation MUST be successful and respective resource usage count MUST be captured in ResourceQuotaStatus of the ResourceQuota.
   230  		Create another Pod with resource request exceeding remaining quota. Pod creation MUST fail as the request exceeds ResourceQuota limits.
   231  		Update the successfully created pod's resource requests. Updation MUST fail as a Pod can not dynamically update its resource requirements.
   232  		Delete the successfully created Pod. Pod Deletion MUST be scuccessful and it MUST release the allocated resource counts from ResourceQuotaStatus of the ResourceQuota.
   233  	*/
   234  	framework.ConformanceIt("should create a ResourceQuota and capture the life of a pod.", func(ctx context.Context) {
   235  		ginkgo.By("Counting existing ResourceQuota")
   236  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
   237  		framework.ExpectNoError(err)
   238  
   239  		ginkgo.By("Creating a ResourceQuota")
   240  		quotaName := "test-quota"
   241  		resourceQuota := newTestResourceQuota(quotaName)
   242  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
   243  		framework.ExpectNoError(err)
   244  
   245  		ginkgo.By("Ensuring resource quota status is calculated")
   246  		usedResources := v1.ResourceList{}
   247  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   248  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   249  		framework.ExpectNoError(err)
   250  
   251  		ginkgo.By("Creating a Pod that fits quota")
   252  		podName := "test-pod"
   253  		requests := v1.ResourceList{}
   254  		limits := v1.ResourceList{}
   255  		requests[v1.ResourceCPU] = resource.MustParse("500m")
   256  		requests[v1.ResourceMemory] = resource.MustParse("252Mi")
   257  		requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
   258  		requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
   259  		limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
   260  		pod := newTestPodForQuota(f, podName, requests, limits)
   261  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   262  		framework.ExpectNoError(err)
   263  		podToUpdate := pod
   264  
   265  		ginkgo.By("Ensuring ResourceQuota status captures the pod usage")
   266  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   267  		usedResources[v1.ResourcePods] = resource.MustParse("1")
   268  		usedResources[v1.ResourceCPU] = requests[v1.ResourceCPU]
   269  		usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory]
   270  		usedResources[v1.ResourceEphemeralStorage] = requests[v1.ResourceEphemeralStorage]
   271  		usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = requests[v1.ResourceName(extendedResourceName)]
   272  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   273  		framework.ExpectNoError(err)
   274  
   275  		ginkgo.By("Not allowing a pod to be created that exceeds remaining quota")
   276  		requests = v1.ResourceList{}
   277  		requests[v1.ResourceCPU] = resource.MustParse("600m")
   278  		requests[v1.ResourceMemory] = resource.MustParse("100Mi")
   279  		pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
   280  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   281  		gomega.Expect(err).To(gomega.HaveOccurred())
   282  
   283  		ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)")
   284  		requests = v1.ResourceList{}
   285  		limits = v1.ResourceList{}
   286  		requests[v1.ResourceCPU] = resource.MustParse("500m")
   287  		requests[v1.ResourceMemory] = resource.MustParse("100Mi")
   288  		requests[v1.ResourceEphemeralStorage] = resource.MustParse("30Gi")
   289  		requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
   290  		limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
   291  		pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits)
   292  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   293  		gomega.Expect(err).To(gomega.HaveOccurred())
   294  
   295  		ginkgo.By("Ensuring a pod cannot update its resource requirements")
   296  		// a pod cannot dynamically update its resource requirements.
   297  		requests = v1.ResourceList{}
   298  		requests[v1.ResourceCPU] = resource.MustParse("100m")
   299  		requests[v1.ResourceMemory] = resource.MustParse("100Mi")
   300  		requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi")
   301  		podToUpdate.Spec.Containers[0].Resources.Requests = requests
   302  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, podToUpdate, metav1.UpdateOptions{})
   303  		gomega.Expect(err).To(gomega.HaveOccurred())
   304  
   305  		ginkgo.By("Ensuring attempts to update pod resource requirements did not change quota usage")
   306  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   307  		framework.ExpectNoError(err)
   308  
   309  		ginkgo.By("Deleting the pod")
   310  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0))
   311  		framework.ExpectNoError(err)
   312  
   313  		ginkgo.By("Ensuring resource quota status released the pod usage")
   314  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   315  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   316  		usedResources[v1.ResourceCPU] = resource.MustParse("0")
   317  		usedResources[v1.ResourceMemory] = resource.MustParse("0")
   318  		usedResources[v1.ResourceEphemeralStorage] = resource.MustParse("0")
   319  		usedResources[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("0")
   320  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   321  		framework.ExpectNoError(err)
   322  	})
   323  	/*
   324  		Release: v1.16
   325  		Testname: ResourceQuota, object count quota, configmap
   326  		Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   327  		Create a ConfigMap. Its creation MUST be successful and resource usage count against the ConfigMap object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
   328  		Delete the ConfigMap. Deletion MUST succeed and resource usage count against the ConfigMap object MUST be released from ResourceQuotaStatus of the ResourceQuota.
   329  	*/
   330  	framework.ConformanceIt("should create a ResourceQuota and capture the life of a configMap.", func(ctx context.Context) {
   331  		found, unchanged := 0, 0
   332  		// On contended servers the service account controller can slow down, leading to the count changing during a run.
   333  		// Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
   334  		err := wait.PollWithContext(ctx, 1*time.Second, time.Minute, func(ctx context.Context) (bool, error) {
   335  			configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(ctx, metav1.ListOptions{})
   336  			framework.ExpectNoError(err)
   337  			if len(configmaps.Items) == found {
   338  				// loop until the number of configmaps has stabilized for 15 seconds
   339  				unchanged++
   340  				return unchanged > 15, nil
   341  			}
   342  			unchanged = 0
   343  			found = len(configmaps.Items)
   344  			return false, nil
   345  		})
   346  		framework.ExpectNoError(err)
   347  		defaultConfigMaps := fmt.Sprintf("%d", found)
   348  		hardConfigMaps := fmt.Sprintf("%d", found+1)
   349  
   350  		ginkgo.By("Counting existing ResourceQuota")
   351  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
   352  		framework.ExpectNoError(err)
   353  
   354  		ginkgo.By("Creating a ResourceQuota")
   355  		quotaName := "test-quota"
   356  		resourceQuota := newTestResourceQuota(quotaName)
   357  		resourceQuota.Spec.Hard[v1.ResourceConfigMaps] = resource.MustParse(hardConfigMaps)
   358  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
   359  		framework.ExpectNoError(err)
   360  
   361  		ginkgo.By("Ensuring resource quota status is calculated")
   362  		usedResources := v1.ResourceList{}
   363  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   364  		usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps)
   365  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   366  		framework.ExpectNoError(err)
   367  
   368  		ginkgo.By("Creating a ConfigMap")
   369  		configMap := newTestConfigMapForQuota("test-configmap")
   370  		configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
   371  		framework.ExpectNoError(err)
   372  
   373  		ginkgo.By("Ensuring resource quota status captures configMap creation")
   374  		usedResources = v1.ResourceList{}
   375  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   376  		usedResources[v1.ResourceConfigMaps] = resource.MustParse(hardConfigMaps)
   377  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   378  		framework.ExpectNoError(err)
   379  
   380  		ginkgo.By("Deleting a ConfigMap")
   381  		err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMap.Name, metav1.DeleteOptions{})
   382  		framework.ExpectNoError(err)
   383  
   384  		ginkgo.By("Ensuring resource quota status released usage")
   385  		usedResources[v1.ResourceConfigMaps] = resource.MustParse(defaultConfigMaps)
   386  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   387  		framework.ExpectNoError(err)
   388  	})
   389  
   390  	/*
   391  		Release: v1.16
   392  		Testname: ResourceQuota, object count quota, replicationController
   393  		Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   394  		Create a ReplicationController. Its creation MUST be successful and resource usage count against the ReplicationController object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
   395  		Delete the ReplicationController. Deletion MUST succeed and resource usage count against the ReplicationController object MUST be released from ResourceQuotaStatus of the ResourceQuota.
   396  	*/
   397  	framework.ConformanceIt("should create a ResourceQuota and capture the life of a replication controller.", func(ctx context.Context) {
   398  		ginkgo.By("Counting existing ResourceQuota")
   399  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
   400  		framework.ExpectNoError(err)
   401  
   402  		ginkgo.By("Creating a ResourceQuota")
   403  		quotaName := "test-quota"
   404  		resourceQuota := newTestResourceQuota(quotaName)
   405  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
   406  		framework.ExpectNoError(err)
   407  
   408  		ginkgo.By("Ensuring resource quota status is calculated")
   409  		usedResources := v1.ResourceList{}
   410  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   411  		usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
   412  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   413  		framework.ExpectNoError(err)
   414  
   415  		ginkgo.By("Creating a ReplicationController")
   416  		replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0)
   417  		replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, replicationController, metav1.CreateOptions{})
   418  		framework.ExpectNoError(err)
   419  
   420  		ginkgo.By("Ensuring resource quota status captures replication controller creation")
   421  		usedResources = v1.ResourceList{}
   422  		usedResources[v1.ResourceReplicationControllers] = resource.MustParse("1")
   423  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   424  		framework.ExpectNoError(err)
   425  
   426  		ginkgo.By("Deleting a ReplicationController")
   427  		// Without the delete options, the object isn't actually
   428  		// removed until the GC verifies that all children have been
   429  		// detached. ReplicationControllers default to "orphan", which
   430  		// is different from most resources. (Why? To preserve a common
   431  		// workflow from prior to the GC's introduction.)
   432  		err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(ctx, replicationController.Name, metav1.DeleteOptions{
   433  			PropagationPolicy: func() *metav1.DeletionPropagation {
   434  				p := metav1.DeletePropagationBackground
   435  				return &p
   436  			}(),
   437  		})
   438  		framework.ExpectNoError(err)
   439  
   440  		ginkgo.By("Ensuring resource quota status released usage")
   441  		usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
   442  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   443  		framework.ExpectNoError(err)
   444  	})
   445  
   446  	/*
   447  		Release: v1.16
   448  		Testname: ResourceQuota, object count quota, replicaSet
   449  		Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   450  		Create a ReplicaSet. Its creation MUST be successful and resource usage count against the ReplicaSet object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
   451  		Delete the ReplicaSet. Deletion MUST succeed and resource usage count against the ReplicaSet object MUST be released from ResourceQuotaStatus of the ResourceQuota.
   452  	*/
   453  	framework.ConformanceIt("should create a ResourceQuota and capture the life of a replica set.", func(ctx context.Context) {
   454  		ginkgo.By("Counting existing ResourceQuota")
   455  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
   456  		framework.ExpectNoError(err)
   457  
   458  		ginkgo.By("Creating a ResourceQuota")
   459  		quotaName := "test-quota"
   460  		resourceQuota := newTestResourceQuota(quotaName)
   461  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
   462  		framework.ExpectNoError(err)
   463  
   464  		ginkgo.By("Ensuring resource quota status is calculated")
   465  		usedResources := v1.ResourceList{}
   466  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   467  		usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("0")
   468  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   469  		framework.ExpectNoError(err)
   470  
   471  		ginkgo.By("Creating a ReplicaSet")
   472  		replicaSet := newTestReplicaSetForQuota("test-rs", "nginx", 0)
   473  		replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, replicaSet, metav1.CreateOptions{})
   474  		framework.ExpectNoError(err)
   475  
   476  		ginkgo.By("Ensuring resource quota status captures replicaset creation")
   477  		usedResources = v1.ResourceList{}
   478  		usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("1")
   479  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   480  		framework.ExpectNoError(err)
   481  
   482  		ginkgo.By("Deleting a ReplicaSet")
   483  		err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(ctx, replicaSet.Name, metav1.DeleteOptions{})
   484  		framework.ExpectNoError(err)
   485  
   486  		ginkgo.By("Ensuring resource quota status released usage")
   487  		usedResources[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("0")
   488  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   489  		framework.ExpectNoError(err)
   490  	})
   491  
   492  	/*
   493  		Release: v1.16
   494  		Testname: ResourceQuota, object count quota, pvc
   495  		Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   496  		Create PersistentVolumeClaim (PVC) to request storage capacity of 1G. PVC creation MUST be successful and resource usage count against the PVC and storage object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
   497  		Delete the PVC. Deletion MUST succeed and resource usage count against its PVC and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota.
   498  		[NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour.
   499  	*/
   500  	ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim", func(ctx context.Context) {
   501  		ginkgo.By("Counting existing ResourceQuota")
   502  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
   503  		framework.ExpectNoError(err)
   504  
   505  		ginkgo.By("Creating a ResourceQuota")
   506  		quotaName := "test-quota"
   507  		resourceQuota := newTestResourceQuota(quotaName)
   508  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
   509  		framework.ExpectNoError(err)
   510  
   511  		ginkgo.By("Ensuring resource quota status is calculated")
   512  		usedResources := v1.ResourceList{}
   513  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   514  		usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
   515  		usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
   516  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   517  		framework.ExpectNoError(err)
   518  
   519  		ginkgo.By("Creating a PersistentVolumeClaim")
   520  		pvc := newTestPersistentVolumeClaimForQuota("test-claim")
   521  		pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(ctx, pvc, metav1.CreateOptions{})
   522  		framework.ExpectNoError(err)
   523  
   524  		ginkgo.By("Ensuring resource quota status captures persistent volume claim creation")
   525  		usedResources = v1.ResourceList{}
   526  		usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
   527  		usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
   528  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   529  		framework.ExpectNoError(err)
   530  
   531  		ginkgo.By("Deleting a PersistentVolumeClaim")
   532  		err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(ctx, pvc.Name, metav1.DeleteOptions{})
   533  		framework.ExpectNoError(err)
   534  
   535  		ginkgo.By("Ensuring resource quota status released usage")
   536  		usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
   537  		usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
   538  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   539  		framework.ExpectNoError(err)
   540  	})
   541  
   542  	/*
   543  		Release: v1.16
   544  		Testname: ResourceQuota, object count quota, storageClass
   545  		Description: Create a ResourceQuota. Creation MUST be successful and its ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   546  		Create PersistentVolumeClaim (PVC) with specified storageClass to request storage capacity of 1G. PVC creation MUST be successful and resource usage count against PVC, storageClass and storage object MUST be captured in ResourceQuotaStatus of the ResourceQuota.
   547  		Delete the PVC. Deletion MUST succeed and resource usage count against  PVC, storageClass and storage object MUST be released from ResourceQuotaStatus of the ResourceQuota.
   548  		[NotConformancePromotable] as test suite do not have any e2e at this moment which are explicitly verifying PV and PVC behaviour.
   549  	*/
   550  	ginkgo.It("should create a ResourceQuota and capture the life of a persistent volume claim with a storage class", func(ctx context.Context) {
   551  		ginkgo.By("Counting existing ResourceQuota")
   552  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
   553  		framework.ExpectNoError(err)
   554  
   555  		ginkgo.By("Creating a ResourceQuota")
   556  		quotaName := "test-quota"
   557  		resourceQuota := newTestResourceQuota(quotaName)
   558  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
   559  		framework.ExpectNoError(err)
   560  
   561  		ginkgo.By("Ensuring resource quota status is calculated")
   562  		usedResources := v1.ResourceList{}
   563  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   564  		usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
   565  		usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
   566  		usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
   567  		usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0")
   568  
   569  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   570  		framework.ExpectNoError(err)
   571  
   572  		ginkgo.By("Creating a PersistentVolumeClaim with storage class")
   573  		pvc := newTestPersistentVolumeClaimForQuota("test-claim")
   574  		pvc.Spec.StorageClassName = &classGold
   575  		pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(ctx, pvc, metav1.CreateOptions{})
   576  		framework.ExpectNoError(err)
   577  
   578  		ginkgo.By("Ensuring resource quota status captures persistent volume claim creation")
   579  		usedResources = v1.ResourceList{}
   580  		usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
   581  		usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
   582  		usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("1")
   583  		usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("1Gi")
   584  
   585  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   586  		framework.ExpectNoError(err)
   587  
   588  		ginkgo.By("Deleting a PersistentVolumeClaim")
   589  		err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(ctx, pvc.Name, metav1.DeleteOptions{})
   590  		framework.ExpectNoError(err)
   591  
   592  		ginkgo.By("Ensuring resource quota status released usage")
   593  		usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
   594  		usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
   595  		usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("0")
   596  		usedResources[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("0")
   597  
   598  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   599  		framework.ExpectNoError(err)
   600  	})
   601  
   602  	ginkgo.It("should create a ResourceQuota and capture the life of a custom resource.", func(ctx context.Context) {
   603  		ginkgo.By("Creating a Custom Resource Definition")
   604  		testcrd, err := crd.CreateTestCRD(f)
   605  		framework.ExpectNoError(err)
   606  		ginkgo.DeferCleanup(testcrd.CleanUp)
   607  		countResourceName := "count/" + testcrd.Crd.Spec.Names.Plural + "." + testcrd.Crd.Spec.Group
   608  		// resourcequota controller needs to take 30 seconds at most to detect the new custom resource.
   609  		// in order to make sure the resourcequota controller knows this resource, we create one test
   610  		// resourcequota object, and triggering updates on it until the status is updated.
   611  		quotaName := "quota-for-" + testcrd.Crd.Spec.Names.Plural
   612  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, &v1.ResourceQuota{
   613  			ObjectMeta: metav1.ObjectMeta{Name: quotaName},
   614  			Spec: v1.ResourceQuotaSpec{
   615  				Hard: v1.ResourceList{
   616  					v1.ResourceName(countResourceName): resource.MustParse("0"),
   617  				},
   618  			},
   619  		})
   620  		framework.ExpectNoError(err)
   621  		err = updateResourceQuotaUntilUsageAppears(ctx, f.ClientSet, f.Namespace.Name, quotaName, v1.ResourceName(countResourceName))
   622  		framework.ExpectNoError(err)
   623  		err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(ctx, quotaName, metav1.DeleteOptions{})
   624  		framework.ExpectNoError(err)
   625  
   626  		ginkgo.By("Counting existing ResourceQuota")
   627  		c, err := countResourceQuota(ctx, f.ClientSet, f.Namespace.Name)
   628  		framework.ExpectNoError(err)
   629  
   630  		ginkgo.By("Creating a ResourceQuota")
   631  		quotaName = "test-quota"
   632  		resourceQuota := newTestResourceQuota(quotaName)
   633  		resourceQuota.Spec.Hard[v1.ResourceName(countResourceName)] = resource.MustParse("1")
   634  		_, err = createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuota)
   635  		framework.ExpectNoError(err)
   636  
   637  		ginkgo.By("Ensuring resource quota status is calculated")
   638  		usedResources := v1.ResourceList{}
   639  		usedResources[v1.ResourceQuotas] = resource.MustParse(strconv.Itoa(c + 1))
   640  		usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("0")
   641  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   642  		framework.ExpectNoError(err)
   643  
   644  		ginkgo.By("Creating a custom resource")
   645  		resourceClient := testcrd.DynamicClients["v1"]
   646  		testcr, err := instantiateCustomResource(ctx, &unstructured.Unstructured{
   647  			Object: map[string]interface{}{
   648  				"apiVersion": testcrd.Crd.Spec.Group + "/" + testcrd.Crd.Spec.Versions[0].Name,
   649  				"kind":       testcrd.Crd.Spec.Names.Kind,
   650  				"metadata": map[string]interface{}{
   651  					"name": "test-cr-1",
   652  				},
   653  			},
   654  		}, resourceClient, testcrd.Crd)
   655  		framework.ExpectNoError(err)
   656  
   657  		ginkgo.By("Ensuring resource quota status captures custom resource creation")
   658  		usedResources = v1.ResourceList{}
   659  		usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("1")
   660  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   661  		framework.ExpectNoError(err)
   662  
   663  		ginkgo.By("Creating a second custom resource")
   664  		_, err = instantiateCustomResource(ctx, &unstructured.Unstructured{
   665  			Object: map[string]interface{}{
   666  				"apiVersion": testcrd.Crd.Spec.Group + "/" + testcrd.Crd.Spec.Versions[0].Name,
   667  				"kind":       testcrd.Crd.Spec.Names.Kind,
   668  				"metadata": map[string]interface{}{
   669  					"name": "test-cr-2",
   670  				},
   671  			},
   672  		}, resourceClient, testcrd.Crd)
   673  		// since we only give one quota, this creation should fail.
   674  		gomega.Expect(err).To(gomega.HaveOccurred())
   675  
   676  		ginkgo.By("Deleting a custom resource")
   677  		err = deleteCustomResource(ctx, resourceClient, testcr.GetName())
   678  		framework.ExpectNoError(err)
   679  
   680  		ginkgo.By("Ensuring resource quota status released usage")
   681  		usedResources[v1.ResourceName(countResourceName)] = resource.MustParse("0")
   682  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quotaName, usedResources)
   683  		framework.ExpectNoError(err)
   684  	})
   685  
   686  	/*
   687  		Release: v1.16
   688  		Testname: ResourceQuota, quota scope, Terminating and NotTerminating scope
   689  		Description: Create two ResourceQuotas, one with 'Terminating' scope and another 'NotTerminating' scope. Request and the limit counts for CPU and Memory resources are set for the ResourceQuota. Creation MUST be successful and their ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   690  		Create a Pod with specified CPU and Memory ResourceRequirements fall within quota limits. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'NotTerminating' scoped ResourceQuota but MUST NOT in 'Terminating' scoped ResourceQuota.
   691  		Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'NotTerminating' scoped ResourceQuota.
   692  		Create a pod with specified activeDeadlineSeconds and resourceRequirements for CPU and Memory fall within quota limits. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'Terminating' scoped ResourceQuota but MUST NOT in 'NotTerminating' scoped ResourceQuota.
   693  		Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'Terminating' scoped ResourceQuota.
   694  	*/
   695  	framework.ConformanceIt("should verify ResourceQuota with terminating scopes.", func(ctx context.Context) {
   696  		ginkgo.By("Creating a ResourceQuota with terminating scope")
   697  		quotaTerminatingName := "quota-terminating"
   698  		resourceQuotaTerminating, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
   699  		framework.ExpectNoError(err)
   700  
   701  		ginkgo.By("Ensuring ResourceQuota status is calculated")
   702  		usedResources := v1.ResourceList{}
   703  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   704  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
   705  		framework.ExpectNoError(err)
   706  
   707  		ginkgo.By("Creating a ResourceQuota with not terminating scope")
   708  		quotaNotTerminatingName := "quota-not-terminating"
   709  		resourceQuotaNotTerminating, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
   710  		framework.ExpectNoError(err)
   711  
   712  		ginkgo.By("Ensuring ResourceQuota status is calculated")
   713  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
   714  		framework.ExpectNoError(err)
   715  
   716  		ginkgo.By("Creating a long running pod")
   717  		podName := "test-pod"
   718  		requests := v1.ResourceList{}
   719  		requests[v1.ResourceCPU] = resource.MustParse("500m")
   720  		requests[v1.ResourceMemory] = resource.MustParse("200Mi")
   721  		limits := v1.ResourceList{}
   722  		limits[v1.ResourceCPU] = resource.MustParse("1")
   723  		limits[v1.ResourceMemory] = resource.MustParse("400Mi")
   724  		pod := newTestPodForQuota(f, podName, requests, limits)
   725  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   726  		framework.ExpectNoError(err)
   727  
   728  		ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
   729  		usedResources[v1.ResourcePods] = resource.MustParse("1")
   730  		usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
   731  		usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
   732  		usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
   733  		usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
   734  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
   735  		framework.ExpectNoError(err)
   736  
   737  		ginkgo.By("Ensuring resource quota with terminating scope ignored the pod usage")
   738  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   739  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
   740  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
   741  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
   742  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
   743  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
   744  		framework.ExpectNoError(err)
   745  
   746  		ginkgo.By("Deleting the pod")
   747  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0))
   748  		framework.ExpectNoError(err)
   749  
   750  		ginkgo.By("Ensuring resource quota status released the pod usage")
   751  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   752  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
   753  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
   754  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
   755  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
   756  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
   757  		framework.ExpectNoError(err)
   758  
   759  		ginkgo.By("Creating a terminating pod")
   760  		podName = "terminating-pod"
   761  		pod = newTestPodForQuota(f, podName, requests, limits)
   762  		activeDeadlineSeconds := int64(3600)
   763  		pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
   764  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   765  		framework.ExpectNoError(err)
   766  
   767  		ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
   768  		usedResources[v1.ResourcePods] = resource.MustParse("1")
   769  		usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
   770  		usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
   771  		usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
   772  		usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
   773  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
   774  		framework.ExpectNoError(err)
   775  
   776  		ginkgo.By("Ensuring resource quota with not terminating scope ignored the pod usage")
   777  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   778  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
   779  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
   780  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
   781  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
   782  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
   783  		framework.ExpectNoError(err)
   784  
   785  		ginkgo.By("Deleting the pod")
   786  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0))
   787  		framework.ExpectNoError(err)
   788  
   789  		ginkgo.By("Ensuring resource quota status released the pod usage")
   790  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   791  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
   792  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
   793  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
   794  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
   795  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
   796  		framework.ExpectNoError(err)
   797  	})
   798  
   799  	/*
   800  		Release: v1.16
   801  		Testname: ResourceQuota, quota scope, BestEffort and NotBestEffort scope
   802  		Description: Create two ResourceQuotas, one with 'BestEffort' scope and another with 'NotBestEffort' scope. Creation MUST be successful and their ResourceQuotaStatus MUST match to expected used and total allowed resource quota count within namespace.
   803  		Create a 'BestEffort' Pod by not explicitly specifying resource limits and requests. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'BestEffort' scoped ResourceQuota but MUST NOT in 'NotBestEffort' scoped ResourceQuota.
   804  		Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'BestEffort' scoped ResourceQuota.
   805  		Create a 'NotBestEffort' Pod by explicitly specifying resource limits and requests. Pod creation MUST be successful and usage count MUST be captured in ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota but MUST NOT in 'BestEffort' scoped ResourceQuota.
   806  		Delete the Pod. Pod deletion MUST succeed and Pod resource usage count MUST be released from ResourceQuotaStatus of 'NotBestEffort' scoped ResourceQuota.
   807  	*/
   808  	framework.ConformanceIt("should verify ResourceQuota with best effort scope.", func(ctx context.Context) {
   809  		ginkgo.By("Creating a ResourceQuota with best effort scope")
   810  		resourceQuotaBestEffort, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
   811  		framework.ExpectNoError(err)
   812  
   813  		ginkgo.By("Ensuring ResourceQuota status is calculated")
   814  		usedResources := v1.ResourceList{}
   815  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   816  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
   817  		framework.ExpectNoError(err)
   818  
   819  		ginkgo.By("Creating a ResourceQuota with not best effort scope")
   820  		resourceQuotaNotBestEffort, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
   821  		framework.ExpectNoError(err)
   822  
   823  		ginkgo.By("Ensuring ResourceQuota status is calculated")
   824  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
   825  		framework.ExpectNoError(err)
   826  
   827  		ginkgo.By("Creating a best-effort pod")
   828  		pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
   829  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   830  		framework.ExpectNoError(err)
   831  
   832  		ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage")
   833  		usedResources[v1.ResourcePods] = resource.MustParse("1")
   834  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
   835  		framework.ExpectNoError(err)
   836  
   837  		ginkgo.By("Ensuring resource quota with not best effort ignored the pod usage")
   838  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   839  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
   840  		framework.ExpectNoError(err)
   841  
   842  		ginkgo.By("Deleting the pod")
   843  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
   844  		framework.ExpectNoError(err)
   845  
   846  		ginkgo.By("Ensuring resource quota status released the pod usage")
   847  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   848  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
   849  		framework.ExpectNoError(err)
   850  
   851  		ginkgo.By("Creating a not best-effort pod")
   852  		requests := v1.ResourceList{}
   853  		requests[v1.ResourceCPU] = resource.MustParse("500m")
   854  		requests[v1.ResourceMemory] = resource.MustParse("200Mi")
   855  		limits := v1.ResourceList{}
   856  		limits[v1.ResourceCPU] = resource.MustParse("1")
   857  		limits[v1.ResourceMemory] = resource.MustParse("400Mi")
   858  		pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
   859  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   860  		framework.ExpectNoError(err)
   861  
   862  		ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage")
   863  		usedResources[v1.ResourcePods] = resource.MustParse("1")
   864  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
   865  		framework.ExpectNoError(err)
   866  
   867  		ginkgo.By("Ensuring resource quota with best effort scope ignored the pod usage")
   868  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   869  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
   870  		framework.ExpectNoError(err)
   871  
   872  		ginkgo.By("Deleting the pod")
   873  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
   874  		framework.ExpectNoError(err)
   875  
   876  		ginkgo.By("Ensuring resource quota status released the pod usage")
   877  		usedResources[v1.ResourcePods] = resource.MustParse("0")
   878  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
   879  		framework.ExpectNoError(err)
   880  	})
   881  
   882  	/*
   883  		Release: v1.16
   884  		Testname: ResourceQuota, update and delete
   885  		Description: Create a ResourceQuota for CPU and Memory quota limits. Creation MUST be successful.
   886  		When ResourceQuota is updated to modify CPU and Memory quota limits, update MUST succeed with updated values for CPU and Memory limits.
   887  		When ResourceQuota is deleted, it MUST not be available in the namespace.
   888  	*/
   889  	framework.ConformanceIt("should be able to update and delete ResourceQuota.", func(ctx context.Context) {
   890  		client := f.ClientSet
   891  		ns := f.Namespace.Name
   892  
   893  		ginkgo.By("Creating a ResourceQuota")
   894  		quotaName := "test-quota"
   895  		resourceQuota := &v1.ResourceQuota{
   896  			Spec: v1.ResourceQuotaSpec{
   897  				Hard: v1.ResourceList{},
   898  			},
   899  		}
   900  		resourceQuota.ObjectMeta.Name = quotaName
   901  		resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("1")
   902  		resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("500Mi")
   903  		_, err := createResourceQuota(ctx, client, ns, resourceQuota)
   904  		framework.ExpectNoError(err)
   905  
   906  		ginkgo.By("Getting a ResourceQuota")
   907  		resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
   908  		framework.ExpectNoError(err)
   909  		gomega.Expect(resourceQuotaResult.Spec.Hard).To(gomega.HaveKeyWithValue(v1.ResourceCPU, resource.MustParse("1")))
   910  		gomega.Expect(resourceQuotaResult.Spec.Hard).To(gomega.HaveKeyWithValue(v1.ResourceMemory, resource.MustParse("500Mi")))
   911  
   912  		ginkgo.By("Updating a ResourceQuota")
   913  		resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("2")
   914  		resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("1Gi")
   915  		resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(ctx, resourceQuota, metav1.UpdateOptions{})
   916  		framework.ExpectNoError(err)
   917  		gomega.Expect(resourceQuotaResult.Spec.Hard).To(gomega.HaveKeyWithValue(v1.ResourceCPU, resource.MustParse("2")))
   918  		gomega.Expect(resourceQuotaResult.Spec.Hard).To(gomega.HaveKeyWithValue(v1.ResourceMemory, resource.MustParse("1Gi")))
   919  
   920  		ginkgo.By("Verifying a ResourceQuota was modified")
   921  		resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
   922  		framework.ExpectNoError(err)
   923  		gomega.Expect(resourceQuotaResult.Spec.Hard).To(gomega.HaveKeyWithValue(v1.ResourceCPU, resource.MustParse("2")))
   924  		gomega.Expect(resourceQuotaResult.Spec.Hard).To(gomega.HaveKeyWithValue(v1.ResourceMemory, resource.MustParse("1Gi")))
   925  
   926  		ginkgo.By("Deleting a ResourceQuota")
   927  		err = deleteResourceQuota(ctx, client, ns, quotaName)
   928  		framework.ExpectNoError(err)
   929  
   930  		ginkgo.By("Verifying the deleted ResourceQuota")
   931  		_, err = client.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
   932  		if !apierrors.IsNotFound(err) {
   933  			framework.Failf("Expected `not found` error, got: %v", err)
   934  		}
   935  	})
   936  
   937  	/*
   938  		Release: v1.25
   939  		Testname: ResourceQuota, manage lifecycle of a ResourceQuota
   940  		Description: Attempt to create a ResourceQuota for CPU and Memory
   941  		quota limits. Creation MUST be successful. Attempt to list all
   942  		namespaces with a label selector which MUST succeed. One list
   943  		MUST be found. The ResourceQuota when patched MUST succeed.
   944  		Given the patching of the ResourceQuota, the fields MUST equal
   945  		the new values. It MUST succeed at deleting a collection of
   946  		ResourceQuota via a label selector.
   947  	*/
   948  	framework.ConformanceIt("should manage the lifecycle of a ResourceQuota", func(ctx context.Context) {
   949  		client := f.ClientSet
   950  		ns := f.Namespace.Name
   951  
   952  		rqName := "e2e-quota-" + utilrand.String(5)
   953  		label := map[string]string{"e2e-rq-label": rqName}
   954  		labelSelector := labels.SelectorFromSet(label).String()
   955  
   956  		ginkgo.By("Creating a ResourceQuota")
   957  		resourceQuota := &v1.ResourceQuota{
   958  			ObjectMeta: metav1.ObjectMeta{
   959  				Name:   rqName,
   960  				Labels: label,
   961  			},
   962  			Spec: v1.ResourceQuotaSpec{
   963  				Hard: v1.ResourceList{},
   964  			},
   965  		}
   966  		resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("1")
   967  		resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("500Mi")
   968  		_, err := createResourceQuota(ctx, client, ns, resourceQuota)
   969  		framework.ExpectNoError(err)
   970  
   971  		ginkgo.By("Getting a ResourceQuota")
   972  		resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(ctx, rqName, metav1.GetOptions{})
   973  		framework.ExpectNoError(err)
   974  		gomega.Expect(resourceQuotaResult.Spec.Hard[v1.ResourceCPU]).To(gomega.Equal(resource.MustParse("1")))
   975  		gomega.Expect(resourceQuotaResult.Spec.Hard[v1.ResourceMemory]).To(gomega.Equal(resource.MustParse("500Mi")))
   976  
   977  		ginkgo.By("Listing all ResourceQuotas with LabelSelector")
   978  		rq, err := client.CoreV1().ResourceQuotas("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
   979  		framework.ExpectNoError(err, "Failed to list job. %v", err)
   980  		gomega.Expect(rq.Items).To(gomega.HaveLen(1), "Failed to find ResourceQuotes %v", rqName)
   981  
   982  		ginkgo.By("Patching the ResourceQuota")
   983  		payload := "{\"metadata\":{\"labels\":{\"" + rqName + "\":\"patched\"}},\"spec\":{\"hard\":{ \"memory\":\"750Mi\"}}}"
   984  		patchedResourceQuota, err := client.CoreV1().ResourceQuotas(ns).Patch(ctx, rqName, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
   985  		framework.ExpectNoError(err, "failed to patch ResourceQuota %s in namespace %s", rqName, ns)
   986  		gomega.Expect(patchedResourceQuota.Labels[rqName]).To(gomega.Equal("patched"), "Failed to find the label for this ResourceQuota. Current labels: %v", patchedResourceQuota.Labels)
   987  		gomega.Expect(*patchedResourceQuota.Spec.Hard.Memory()).To(gomega.Equal(resource.MustParse("750Mi")), "Hard memory value for ResourceQuota %q is %s not 750Mi.", patchedResourceQuota.ObjectMeta.Name, patchedResourceQuota.Spec.Hard.Memory().String())
   988  
   989  		ginkgo.By("Deleting a Collection of ResourceQuotas")
   990  		err = client.CoreV1().ResourceQuotas(ns).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: labelSelector})
   991  		framework.ExpectNoError(err)
   992  
   993  		ginkgo.By("Verifying the deleted ResourceQuota")
   994  		_, err = client.CoreV1().ResourceQuotas(ns).Get(ctx, rqName, metav1.GetOptions{})
   995  		if !apierrors.IsNotFound(err) {
   996  			framework.Failf("Expected `not found` error, got: %v", err)
   997  		}
   998  	})
   999  
  1000  	/*
  1001  		Release: v1.26
  1002  		Testname: ResourceQuota, apply changes to a ResourceQuota status
  1003  		Description: Attempt to create a ResourceQuota for CPU and Memory
  1004  		quota limits. Creation MUST be successful. Updating the hard
  1005  		status values MUST succeed and the new values MUST be found. The
  1006  		reported hard status values MUST equal the spec hard values.
  1007  		Patching the spec hard values MUST succeed and the new values MUST
  1008  		be found. Patching the hard status values MUST succeed. The
  1009  		reported hard status values MUST equal the new spec hard values.
  1010  		Getting the /status MUST succeed and the reported hard status
  1011  		values MUST equal the spec hard values. Repatching the hard status
  1012  		values MUST succeed. The spec MUST NOT be changed when
  1013  		patching /status.
  1014  	*/
  1015  	framework.ConformanceIt("should apply changes to a resourcequota status", func(ctx context.Context) {
  1016  		ns := f.Namespace.Name
  1017  		rqClient := f.ClientSet.CoreV1().ResourceQuotas(ns)
  1018  		rqName := "e2e-rq-status-" + utilrand.String(5)
  1019  		label := map[string]string{"e2e-rq-label": rqName}
  1020  		labelSelector := labels.SelectorFromSet(label).String()
  1021  
  1022  		w := &cache.ListWatch{
  1023  			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
  1024  				options.LabelSelector = labelSelector
  1025  				return rqClient.Watch(ctx, options)
  1026  			},
  1027  		}
  1028  
  1029  		rqList, err := f.ClientSet.CoreV1().ResourceQuotas("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
  1030  		framework.ExpectNoError(err, "failed to list Services")
  1031  
  1032  		ginkgo.By(fmt.Sprintf("Creating resourceQuota %q", rqName))
  1033  		resourceQuota := &v1.ResourceQuota{
  1034  			ObjectMeta: metav1.ObjectMeta{
  1035  				Name:   rqName,
  1036  				Labels: label,
  1037  			},
  1038  			Spec: v1.ResourceQuotaSpec{
  1039  				Hard: v1.ResourceList{
  1040  					v1.ResourceCPU:    resource.MustParse("500m"),
  1041  					v1.ResourceMemory: resource.MustParse("500Mi"),
  1042  				},
  1043  			},
  1044  		}
  1045  		_, err = createResourceQuota(ctx, f.ClientSet, ns, resourceQuota)
  1046  		framework.ExpectNoError(err)
  1047  
  1048  		initialResourceQuota, err := rqClient.Get(ctx, rqName, metav1.GetOptions{})
  1049  		framework.ExpectNoError(err)
  1050  		gomega.Expect(*initialResourceQuota.Spec.Hard.Cpu()).To(gomega.Equal(resource.MustParse("500m")), "Hard cpu value for ResourceQuota %q is %s not 500m.", initialResourceQuota.Name, initialResourceQuota.Spec.Hard.Cpu().String())
  1051  		framework.Logf("Resource quota %q reports spec: hard cpu limit of %s", rqName, initialResourceQuota.Spec.Hard.Cpu())
  1052  		gomega.Expect(*initialResourceQuota.Spec.Hard.Memory()).To(gomega.Equal(resource.MustParse("500Mi")), "Hard memory value for ResourceQuota %q is %s not 500Mi.", initialResourceQuota.Name, initialResourceQuota.Spec.Hard.Memory().String())
  1053  		framework.Logf("Resource quota %q reports spec: hard memory limit of %s", rqName, initialResourceQuota.Spec.Hard.Memory())
  1054  
  1055  		ginkgo.By(fmt.Sprintf("Updating resourceQuota %q /status", rqName))
  1056  		var updatedResourceQuota *v1.ResourceQuota
  1057  		hardLimits := quota.Add(v1.ResourceList{}, initialResourceQuota.Spec.Hard)
  1058  
  1059  		err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
  1060  			updateStatus, err := rqClient.Get(ctx, rqName, metav1.GetOptions{})
  1061  			framework.ExpectNoError(err, "Unable to get ResourceQuota %q", rqName)
  1062  			updateStatus.Status = v1.ResourceQuotaStatus{
  1063  				Hard: hardLimits,
  1064  			}
  1065  			updatedResourceQuota, err = rqClient.UpdateStatus(ctx, updateStatus, metav1.UpdateOptions{})
  1066  			return err
  1067  		})
  1068  		framework.ExpectNoError(err, "Failed to update resourceQuota")
  1069  
  1070  		ginkgo.By(fmt.Sprintf("Confirm /status for %q resourceQuota via watch", rqName))
  1071  		ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStartShort)
  1072  		defer cancel()
  1073  
  1074  		_, err = watchtools.Until(ctxUntil, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  1075  			if rq, ok := event.Object.(*v1.ResourceQuota); ok {
  1076  				found := rq.Name == updatedResourceQuota.Name &&
  1077  					rq.Namespace == ns &&
  1078  					apiequality.Semantic.DeepEqual(rq.Status.Hard, updatedResourceQuota.Spec.Hard)
  1079  				if !found {
  1080  					framework.Logf("observed resourceQuota %q in namespace %q with hard status: %#v", rq.Name, rq.Namespace, rq.Status.Hard)
  1081  					return false, nil
  1082  				}
  1083  				framework.Logf("Found resourceQuota %q in namespace %q with hard status: %#v", rq.Name, rq.Namespace, rq.Status.Hard)
  1084  				return found, nil
  1085  			}
  1086  			framework.Logf("Observed event: %+v", event.Object)
  1087  			return false, nil
  1088  		})
  1089  		framework.ExpectNoError(err, "failed to locate ResourceQuota %q in namespace %q", updatedResourceQuota.Name, ns)
  1090  		framework.Logf("ResourceQuota %q /status was updated", updatedResourceQuota.Name)
  1091  
  1092  		// Sync resourceQuota list before patching /status
  1093  		rqList, err = f.ClientSet.CoreV1().ResourceQuotas("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
  1094  		framework.ExpectNoError(err, "failed to list Services")
  1095  
  1096  		ginkgo.By("Patching hard spec values for cpu & memory")
  1097  		xResourceQuota, err := rqClient.Patch(ctx, updatedResourceQuota.Name, types.StrategicMergePatchType,
  1098  			[]byte(`{"spec":{"hard":{"cpu":"1","memory":"1Gi"}}}`),
  1099  			metav1.PatchOptions{})
  1100  		framework.ExpectNoError(err, "Could not patch resourcequota %q. Error: %v", xResourceQuota.Name, err)
  1101  		framework.Logf("Resource quota %q reports spec: hard cpu limit of %s", rqName, xResourceQuota.Spec.Hard.Cpu())
  1102  		framework.Logf("Resource quota %q reports spec: hard memory limit of %s", rqName, xResourceQuota.Spec.Hard.Memory())
  1103  
  1104  		ginkgo.By(fmt.Sprintf("Patching %q /status", rqName))
  1105  		hardLimits = quota.Add(v1.ResourceList{}, xResourceQuota.Spec.Hard)
  1106  
  1107  		rqStatusJSON, err := json.Marshal(hardLimits)
  1108  		framework.ExpectNoError(err)
  1109  		patchedResourceQuota, err := rqClient.Patch(ctx, rqName, types.StrategicMergePatchType,
  1110  			[]byte(`{"status": {"hard": `+string(rqStatusJSON)+`}}`),
  1111  			metav1.PatchOptions{}, "status")
  1112  		framework.ExpectNoError(err)
  1113  
  1114  		ginkgo.By(fmt.Sprintf("Confirm /status for %q resourceQuota via watch", rqName))
  1115  		ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStartShort)
  1116  		defer cancel()
  1117  
  1118  		_, err = watchtools.Until(ctxUntil, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  1119  			if rq, ok := event.Object.(*v1.ResourceQuota); ok {
  1120  				found := rq.Name == patchedResourceQuota.Name &&
  1121  					rq.Namespace == ns &&
  1122  					apiequality.Semantic.DeepEqual(rq.Status.Hard, patchedResourceQuota.Spec.Hard)
  1123  				if !found {
  1124  					framework.Logf("observed resourceQuota %q in namespace %q with hard status: %#v", rq.Name, rq.Namespace, rq.Status.Hard)
  1125  					return false, nil
  1126  				}
  1127  				framework.Logf("Found resourceQuota %q in namespace %q with hard status: %#v", rq.Name, rq.Namespace, rq.Status.Hard)
  1128  				return found, nil
  1129  			}
  1130  			framework.Logf("Observed event: %+v", event.Object)
  1131  			return false, nil
  1132  		})
  1133  		framework.ExpectNoError(err, "failed to locate ResourceQuota %q in namespace %q", patchedResourceQuota.Name, ns)
  1134  		framework.Logf("ResourceQuota %q /status was patched", patchedResourceQuota.Name)
  1135  
  1136  		ginkgo.By(fmt.Sprintf("Get %q /status", rqName))
  1137  		rqResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "resourcequotas"}
  1138  		unstruct, err := f.DynamicClient.Resource(rqResource).Namespace(ns).Get(ctx, resourceQuota.Name, metav1.GetOptions{}, "status")
  1139  		framework.ExpectNoError(err)
  1140  
  1141  		rq, err := unstructuredToResourceQuota(unstruct)
  1142  		framework.ExpectNoError(err, "Getting the status of the resource quota %q", rq.Name)
  1143  
  1144  		gomega.Expect(*rq.Status.Hard.Cpu()).To(gomega.Equal(resource.MustParse("1")), "Hard cpu value for ResourceQuota %q is %s not 1.", rq.Name, rq.Status.Hard.Cpu().String())
  1145  		framework.Logf("Resourcequota %q reports status: hard cpu of %s", rqName, rq.Status.Hard.Cpu())
  1146  		gomega.Expect(*rq.Status.Hard.Memory()).To(gomega.Equal(resource.MustParse("1Gi")), "Hard memory value for ResourceQuota %q is %s not 1Gi.", rq.Name, rq.Status.Hard.Memory().String())
  1147  		framework.Logf("Resourcequota %q reports status: hard memory of %s", rqName, rq.Status.Hard.Memory())
  1148  
  1149  		// Sync resourceQuota list before repatching /status
  1150  		rqList, err = f.ClientSet.CoreV1().ResourceQuotas("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector})
  1151  		framework.ExpectNoError(err, "failed to list Services")
  1152  
  1153  		ginkgo.By(fmt.Sprintf("Repatching %q /status before checking Spec is unchanged", rqName))
  1154  		newHardLimits := v1.ResourceList{
  1155  			v1.ResourceCPU:    resource.MustParse("2"),
  1156  			v1.ResourceMemory: resource.MustParse("2Gi"),
  1157  		}
  1158  		rqStatusJSON, err = json.Marshal(newHardLimits)
  1159  		framework.ExpectNoError(err)
  1160  
  1161  		repatchedResourceQuota, err := rqClient.Patch(ctx, rqName, types.StrategicMergePatchType,
  1162  			[]byte(`{"status": {"hard": `+string(rqStatusJSON)+`}}`),
  1163  			metav1.PatchOptions{}, "status")
  1164  		framework.ExpectNoError(err)
  1165  
  1166  		gomega.Expect(*repatchedResourceQuota.Status.Hard.Cpu()).To(gomega.Equal(resource.MustParse("2")), "Hard cpu value for ResourceQuota %q is %s not 2.", repatchedResourceQuota.Name, repatchedResourceQuota.Status.Hard.Cpu().String())
  1167  		framework.Logf("Resourcequota %q reports status: hard cpu of %s", repatchedResourceQuota.Name, repatchedResourceQuota.Status.Hard.Cpu())
  1168  		gomega.Expect(*repatchedResourceQuota.Status.Hard.Memory()).To(gomega.Equal(resource.MustParse("2Gi")), "Hard memory value for ResourceQuota %q is %s not 2Gi.", repatchedResourceQuota.Name, repatchedResourceQuota.Status.Hard.Memory().String())
  1169  		framework.Logf("Resourcequota %q reports status: hard memory of %s", repatchedResourceQuota.Name, repatchedResourceQuota.Status.Hard.Memory())
  1170  
  1171  		_, err = watchtools.Until(ctxUntil, rqList.ResourceVersion, w, func(event watch.Event) (bool, error) {
  1172  			if rq, ok := event.Object.(*v1.ResourceQuota); ok {
  1173  				found := rq.Name == patchedResourceQuota.Name &&
  1174  					rq.Namespace == ns &&
  1175  					*rq.Status.Hard.Cpu() == resource.MustParse("2") &&
  1176  					*rq.Status.Hard.Memory() == resource.MustParse("2Gi")
  1177  				if !found {
  1178  					framework.Logf("observed resourceQuota %q in namespace %q with hard status: %#v", rq.Name, rq.Namespace, rq.Status.Hard)
  1179  					return false, nil
  1180  				}
  1181  				framework.Logf("Found resourceQuota %q in namespace %q with hard status: %#v", rq.Name, rq.Namespace, rq.Status.Hard)
  1182  				return found, nil
  1183  			}
  1184  			framework.Logf("Observed event: %+v", event.Object)
  1185  			return false, nil
  1186  		})
  1187  		framework.ExpectNoError(err, "failed to locate ResourceQuota %q in namespace %q", patchedResourceQuota.Name, ns)
  1188  
  1189  		// the resource_quota_controller ignores changes to the status so we have to wait for a full resync of the controller
  1190  		// to reconcile the status again, this full resync is set every 5 minutes by default so we need to poll at least one
  1191  		// minute more just in case we we start to poll just after the full resync has happened and he have to wait until
  1192  		// next full resync.
  1193  		// Ref: https://issues.k8s.io/121911
  1194  		err = wait.PollUntilContextTimeout(ctx, 5*time.Second, 6*time.Minute, true, func(ctx context.Context) (bool, error) {
  1195  			resourceQuotaResult, err := rqClient.Get(ctx, rqName, metav1.GetOptions{})
  1196  			if err != nil {
  1197  				return false, nil
  1198  			}
  1199  
  1200  			if *resourceQuotaResult.Spec.Hard.Cpu() == *resourceQuotaResult.Status.Hard.Cpu() {
  1201  				if *resourceQuotaResult.Status.Hard.Cpu() != resource.MustParse("1") {
  1202  					framework.Logf("Hard cpu status value for ResourceQuota %q is %s not 1.", repatchedResourceQuota.Name, resourceQuotaResult.Status.Hard.Cpu().String())
  1203  					return false, nil
  1204  				}
  1205  				if *resourceQuotaResult.Status.Hard.Memory() != resource.MustParse("1Gi") {
  1206  					framework.Logf("Hard memory status value for ResourceQuota %q is %s not 1Gi.", repatchedResourceQuota.Name, resourceQuotaResult.Status.Hard.Memory().String())
  1207  					return false, nil
  1208  				}
  1209  				framework.Logf("ResourceQuota %q Spec was unchanged and /status reset", resourceQuotaResult.Name)
  1210  				return true, nil
  1211  			}
  1212  			framework.Logf("ResourceQuota %q Spec and Status does not match: %#v", resourceQuotaResult.Name, resourceQuotaResult)
  1213  			return false, nil
  1214  		})
  1215  		if err != nil {
  1216  			framework.Failf("Error waiting for ResourceQuota %q to reset its Status: %v", patchedResourceQuota.Name, err)
  1217  		}
  1218  
  1219  	})
  1220  })
  1221  
  1222  var _ = SIGDescribe("ResourceQuota", feature.ScopeSelectors, func() {
  1223  	f := framework.NewDefaultFramework("scope-selectors")
  1224  	f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
  1225  	ginkgo.It("should verify ResourceQuota with best effort scope using scope-selectors.", func(ctx context.Context) {
  1226  		ginkgo.By("Creating a ResourceQuota with best effort scope")
  1227  		resourceQuotaBestEffort, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
  1228  		framework.ExpectNoError(err)
  1229  
  1230  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1231  		usedResources := v1.ResourceList{}
  1232  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1233  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
  1234  		framework.ExpectNoError(err)
  1235  
  1236  		ginkgo.By("Creating a ResourceQuota with not best effort scope")
  1237  		resourceQuotaNotBestEffort, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
  1238  		framework.ExpectNoError(err)
  1239  
  1240  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1241  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
  1242  		framework.ExpectNoError(err)
  1243  
  1244  		ginkgo.By("Creating a best-effort pod")
  1245  		pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
  1246  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1247  		framework.ExpectNoError(err)
  1248  
  1249  		ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage")
  1250  		usedResources[v1.ResourcePods] = resource.MustParse("1")
  1251  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
  1252  		framework.ExpectNoError(err)
  1253  
  1254  		ginkgo.By("Ensuring resource quota with not best effort ignored the pod usage")
  1255  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1256  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
  1257  		framework.ExpectNoError(err)
  1258  
  1259  		ginkgo.By("Deleting the pod")
  1260  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1261  		framework.ExpectNoError(err)
  1262  
  1263  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1264  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1265  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
  1266  		framework.ExpectNoError(err)
  1267  
  1268  		ginkgo.By("Creating a not best-effort pod")
  1269  		requests := v1.ResourceList{}
  1270  		requests[v1.ResourceCPU] = resource.MustParse("500m")
  1271  		requests[v1.ResourceMemory] = resource.MustParse("200Mi")
  1272  		limits := v1.ResourceList{}
  1273  		limits[v1.ResourceCPU] = resource.MustParse("1")
  1274  		limits[v1.ResourceMemory] = resource.MustParse("400Mi")
  1275  		pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
  1276  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1277  		framework.ExpectNoError(err)
  1278  
  1279  		ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage")
  1280  		usedResources[v1.ResourcePods] = resource.MustParse("1")
  1281  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
  1282  		framework.ExpectNoError(err)
  1283  
  1284  		ginkgo.By("Ensuring resource quota with best effort scope ignored the pod usage")
  1285  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1286  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
  1287  		framework.ExpectNoError(err)
  1288  
  1289  		ginkgo.By("Deleting the pod")
  1290  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1291  		framework.ExpectNoError(err)
  1292  
  1293  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1294  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1295  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
  1296  		framework.ExpectNoError(err)
  1297  	})
  1298  	ginkgo.It("should verify ResourceQuota with terminating scopes through scope selectors.", func(ctx context.Context) {
  1299  		ginkgo.By("Creating a ResourceQuota with terminating scope")
  1300  		quotaTerminatingName := "quota-terminating"
  1301  		resourceQuotaTerminating, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
  1302  		framework.ExpectNoError(err)
  1303  
  1304  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1305  		usedResources := v1.ResourceList{}
  1306  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1307  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
  1308  		framework.ExpectNoError(err)
  1309  
  1310  		ginkgo.By("Creating a ResourceQuota with not terminating scope")
  1311  		quotaNotTerminatingName := "quota-not-terminating"
  1312  		resourceQuotaNotTerminating, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
  1313  		framework.ExpectNoError(err)
  1314  
  1315  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1316  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
  1317  		framework.ExpectNoError(err)
  1318  
  1319  		ginkgo.By("Creating a long running pod")
  1320  		podName := "test-pod"
  1321  		requests := v1.ResourceList{}
  1322  		requests[v1.ResourceCPU] = resource.MustParse("500m")
  1323  		requests[v1.ResourceMemory] = resource.MustParse("200Mi")
  1324  		limits := v1.ResourceList{}
  1325  		limits[v1.ResourceCPU] = resource.MustParse("1")
  1326  		limits[v1.ResourceMemory] = resource.MustParse("400Mi")
  1327  		pod := newTestPodForQuota(f, podName, requests, limits)
  1328  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1329  		framework.ExpectNoError(err)
  1330  
  1331  		ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
  1332  		usedResources[v1.ResourcePods] = resource.MustParse("1")
  1333  		usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
  1334  		usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
  1335  		usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
  1336  		usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
  1337  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
  1338  		framework.ExpectNoError(err)
  1339  
  1340  		ginkgo.By("Ensuring resource quota with terminating scope ignored the pod usage")
  1341  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1342  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
  1343  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
  1344  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
  1345  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
  1346  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
  1347  		framework.ExpectNoError(err)
  1348  
  1349  		ginkgo.By("Deleting the pod")
  1350  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0))
  1351  		framework.ExpectNoError(err)
  1352  
  1353  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1354  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1355  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
  1356  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
  1357  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
  1358  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
  1359  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
  1360  		framework.ExpectNoError(err)
  1361  
  1362  		ginkgo.By("Creating a terminating pod")
  1363  		podName = "terminating-pod"
  1364  		pod = newTestPodForQuota(f, podName, requests, limits)
  1365  		activeDeadlineSeconds := int64(3600)
  1366  		pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
  1367  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1368  		framework.ExpectNoError(err)
  1369  
  1370  		ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
  1371  		usedResources[v1.ResourcePods] = resource.MustParse("1")
  1372  		usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
  1373  		usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
  1374  		usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
  1375  		usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
  1376  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
  1377  		framework.ExpectNoError(err)
  1378  
  1379  		ginkgo.By("Ensuring resource quota with not terminating scope ignored the pod usage")
  1380  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1381  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
  1382  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
  1383  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
  1384  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
  1385  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
  1386  		framework.ExpectNoError(err)
  1387  
  1388  		ginkgo.By("Deleting the pod")
  1389  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0))
  1390  		framework.ExpectNoError(err)
  1391  
  1392  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1393  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1394  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
  1395  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
  1396  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
  1397  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
  1398  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
  1399  		framework.ExpectNoError(err)
  1400  	})
  1401  })
  1402  
  1403  var _ = SIGDescribe("ResourceQuota", feature.PodPriority, func() {
  1404  	f := framework.NewDefaultFramework("resourcequota-priorityclass")
  1405  	f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
  1406  
  1407  	ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func(ctx context.Context) {
  1408  
  1409  		_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)}, metav1.CreateOptions{})
  1410  		if err != nil && !apierrors.IsAlreadyExists(err) {
  1411  			framework.Failf("unexpected error while creating priority class: %v", err)
  1412  		}
  1413  
  1414  		hard := v1.ResourceList{}
  1415  		hard[v1.ResourcePods] = resource.MustParse("1")
  1416  
  1417  		ginkgo.By("Creating a ResourceQuota with priority class scope")
  1418  		resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass1"}))
  1419  		framework.ExpectNoError(err)
  1420  
  1421  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1422  		usedResources := v1.ResourceList{}
  1423  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1424  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1425  		framework.ExpectNoError(err)
  1426  
  1427  		ginkgo.By("Creating a pod with priority class")
  1428  		podName := "testpod-pclass1"
  1429  		pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass1")
  1430  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1431  		framework.ExpectNoError(err)
  1432  
  1433  		ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
  1434  		usedResources[v1.ResourcePods] = resource.MustParse("1")
  1435  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1436  		framework.ExpectNoError(err)
  1437  
  1438  		ginkgo.By("Deleting the pod")
  1439  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1440  		framework.ExpectNoError(err)
  1441  
  1442  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1443  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1444  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1445  		framework.ExpectNoError(err)
  1446  	})
  1447  
  1448  	ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func(ctx context.Context) {
  1449  
  1450  		_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)}, metav1.CreateOptions{})
  1451  		if err != nil && !apierrors.IsAlreadyExists(err) {
  1452  			framework.Failf("unexpected error while creating priority class: %v", err)
  1453  		}
  1454  
  1455  		hard := v1.ResourceList{}
  1456  		hard[v1.ResourcePods] = resource.MustParse("1")
  1457  
  1458  		ginkgo.By("Creating a ResourceQuota with priority class scope")
  1459  		resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass2"}))
  1460  		framework.ExpectNoError(err)
  1461  
  1462  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1463  		usedResources := v1.ResourceList{}
  1464  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1465  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1466  		framework.ExpectNoError(err)
  1467  
  1468  		ginkgo.By("Creating first pod with priority class should pass")
  1469  		podName := "testpod-pclass2-1"
  1470  		pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
  1471  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1472  		framework.ExpectNoError(err)
  1473  
  1474  		ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
  1475  		usedResources[v1.ResourcePods] = resource.MustParse("1")
  1476  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1477  		framework.ExpectNoError(err)
  1478  
  1479  		ginkgo.By("Creating 2nd pod with priority class should fail")
  1480  		podName2 := "testpod-pclass2-2"
  1481  		pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
  1482  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod2, metav1.CreateOptions{})
  1483  		gomega.Expect(err).To(gomega.HaveOccurred())
  1484  
  1485  		ginkgo.By("Deleting first pod")
  1486  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1487  		framework.ExpectNoError(err)
  1488  
  1489  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1490  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1491  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1492  		framework.ExpectNoError(err)
  1493  	})
  1494  
  1495  	ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func(ctx context.Context) {
  1496  
  1497  		_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)}, metav1.CreateOptions{})
  1498  		if err != nil && !apierrors.IsAlreadyExists(err) {
  1499  			framework.Failf("unexpected error while creating priority class: %v", err)
  1500  		}
  1501  
  1502  		hard := v1.ResourceList{}
  1503  		hard[v1.ResourcePods] = resource.MustParse("1")
  1504  
  1505  		ginkgo.By("Creating a ResourceQuota with priority class scope")
  1506  		resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass4"}))
  1507  		framework.ExpectNoError(err)
  1508  
  1509  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1510  		usedResources := v1.ResourceList{}
  1511  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1512  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1513  		framework.ExpectNoError(err)
  1514  
  1515  		ginkgo.By("Creating a pod with priority class with pclass3")
  1516  		podName := "testpod-pclass3-1"
  1517  		pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
  1518  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1519  		framework.ExpectNoError(err)
  1520  
  1521  		ginkgo.By("Ensuring resource quota with priority class scope remains same")
  1522  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1523  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1524  		framework.ExpectNoError(err)
  1525  
  1526  		ginkgo.By("Creating a 2nd pod with priority class pclass3")
  1527  		podName2 := "testpod-pclass2-2"
  1528  		pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
  1529  		pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod2, metav1.CreateOptions{})
  1530  		framework.ExpectNoError(err)
  1531  
  1532  		ginkgo.By("Ensuring resource quota with priority class scope remains same")
  1533  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1534  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1535  		framework.ExpectNoError(err)
  1536  
  1537  		ginkgo.By("Deleting both pods")
  1538  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1539  		framework.ExpectNoError(err)
  1540  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod2.Name, *metav1.NewDeleteOptions(0))
  1541  		framework.ExpectNoError(err)
  1542  	})
  1543  
  1544  	ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func(ctx context.Context) {
  1545  		_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)}, metav1.CreateOptions{})
  1546  		if err != nil && !apierrors.IsAlreadyExists(err) {
  1547  			framework.Failf("unexpected error while creating priority class: %v", err)
  1548  		}
  1549  
  1550  		_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)}, metav1.CreateOptions{})
  1551  		if err != nil && !apierrors.IsAlreadyExists(err) {
  1552  			framework.Failf("unexpected error while creating priority class: %v", err)
  1553  		}
  1554  
  1555  		hard := v1.ResourceList{}
  1556  		hard[v1.ResourcePods] = resource.MustParse("2")
  1557  
  1558  		ginkgo.By("Creating a ResourceQuota with priority class scope")
  1559  		resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass5", "pclass6"}))
  1560  		framework.ExpectNoError(err)
  1561  
  1562  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1563  		usedResources := v1.ResourceList{}
  1564  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1565  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1566  		framework.ExpectNoError(err)
  1567  
  1568  		ginkgo.By("Creating a pod with priority class pclass5")
  1569  		podName := "testpod-pclass5"
  1570  		pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass5")
  1571  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1572  		framework.ExpectNoError(err)
  1573  
  1574  		ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage")
  1575  		usedResources[v1.ResourcePods] = resource.MustParse("1")
  1576  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1577  		framework.ExpectNoError(err)
  1578  
  1579  		ginkgo.By("Creating 2nd pod with priority class pclass6")
  1580  		podName2 := "testpod-pclass6"
  1581  		pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass6")
  1582  		pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod2, metav1.CreateOptions{})
  1583  		framework.ExpectNoError(err)
  1584  
  1585  		ginkgo.By("Ensuring resource quota with priority class scope is updated with the pod usage")
  1586  		usedResources[v1.ResourcePods] = resource.MustParse("2")
  1587  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1588  		framework.ExpectNoError(err)
  1589  
  1590  		ginkgo.By("Deleting both pods")
  1591  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1592  		framework.ExpectNoError(err)
  1593  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod2.Name, *metav1.NewDeleteOptions(0))
  1594  		framework.ExpectNoError(err)
  1595  
  1596  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1597  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1598  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1599  		framework.ExpectNoError(err)
  1600  	})
  1601  
  1602  	ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func(ctx context.Context) {
  1603  
  1604  		_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)}, metav1.CreateOptions{})
  1605  		if err != nil && !apierrors.IsAlreadyExists(err) {
  1606  			framework.Failf("unexpected error while creating priority class: %v", err)
  1607  		}
  1608  
  1609  		hard := v1.ResourceList{}
  1610  		hard[v1.ResourcePods] = resource.MustParse("1")
  1611  
  1612  		ginkgo.By("Creating a ResourceQuota with priority class scope")
  1613  		resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpNotIn, []string{"pclass7"}))
  1614  		framework.ExpectNoError(err)
  1615  
  1616  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1617  		usedResources := v1.ResourceList{}
  1618  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1619  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1620  		framework.ExpectNoError(err)
  1621  
  1622  		ginkgo.By("Creating a pod with priority class pclass7")
  1623  		podName := "testpod-pclass7"
  1624  		pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass7")
  1625  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1626  		framework.ExpectNoError(err)
  1627  
  1628  		ginkgo.By("Ensuring resource quota with priority class is not used")
  1629  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1630  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1631  		framework.ExpectNoError(err)
  1632  
  1633  		ginkgo.By("Deleting the pod")
  1634  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1635  		framework.ExpectNoError(err)
  1636  	})
  1637  
  1638  	ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func(ctx context.Context) {
  1639  
  1640  		_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)}, metav1.CreateOptions{})
  1641  		if err != nil && !apierrors.IsAlreadyExists(err) {
  1642  			framework.Failf("unexpected error while creating priority class: %v", err)
  1643  		}
  1644  
  1645  		hard := v1.ResourceList{}
  1646  		hard[v1.ResourcePods] = resource.MustParse("1")
  1647  
  1648  		ginkgo.By("Creating a ResourceQuota with priority class scope")
  1649  		resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpExists, []string{}))
  1650  		framework.ExpectNoError(err)
  1651  
  1652  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1653  		usedResources := v1.ResourceList{}
  1654  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1655  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1656  		framework.ExpectNoError(err)
  1657  
  1658  		ginkgo.By("Creating a pod with priority class pclass8")
  1659  		podName := "testpod-pclass8"
  1660  		pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass8")
  1661  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1662  		framework.ExpectNoError(err)
  1663  
  1664  		ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage")
  1665  		usedResources[v1.ResourcePods] = resource.MustParse("1")
  1666  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1667  		framework.ExpectNoError(err)
  1668  
  1669  		ginkgo.By("Deleting the pod")
  1670  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1671  		framework.ExpectNoError(err)
  1672  
  1673  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1674  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1675  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1676  		framework.ExpectNoError(err)
  1677  	})
  1678  
  1679  	ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func(ctx context.Context) {
  1680  
  1681  		_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(ctx, &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)}, metav1.CreateOptions{})
  1682  		if err != nil && !apierrors.IsAlreadyExists(err) {
  1683  			framework.Failf("unexpected error while creating priority class: %v", err)
  1684  		}
  1685  
  1686  		hard := v1.ResourceList{}
  1687  		hard[v1.ResourcePods] = resource.MustParse("1")
  1688  		hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
  1689  		hard[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
  1690  		hard[v1.ResourceLimitsCPU] = resource.MustParse("3")
  1691  		hard[v1.ResourceLimitsMemory] = resource.MustParse("3Gi")
  1692  
  1693  		ginkgo.By("Creating a ResourceQuota with priority class scope")
  1694  		resourceQuotaPriorityClass, err := createResourceQuota(ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeForPriorityClass("quota-priorityclass", hard, v1.ScopeSelectorOpIn, []string{"pclass9"}))
  1695  		framework.ExpectNoError(err)
  1696  
  1697  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1698  		usedResources := v1.ResourceList{}
  1699  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1700  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
  1701  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
  1702  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
  1703  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
  1704  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1705  		framework.ExpectNoError(err)
  1706  
  1707  		ginkgo.By("Creating a pod with priority class")
  1708  		podName := "testpod-pclass9"
  1709  		request := v1.ResourceList{}
  1710  		request[v1.ResourceCPU] = resource.MustParse("1")
  1711  		request[v1.ResourceMemory] = resource.MustParse("1Gi")
  1712  		limit := v1.ResourceList{}
  1713  		limit[v1.ResourceCPU] = resource.MustParse("2")
  1714  		limit[v1.ResourceMemory] = resource.MustParse("2Gi")
  1715  
  1716  		pod := newTestPodForQuotaWithPriority(f, podName, request, limit, "pclass9")
  1717  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1718  		framework.ExpectNoError(err)
  1719  
  1720  		ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
  1721  		usedResources[v1.ResourcePods] = resource.MustParse("1")
  1722  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("1")
  1723  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("1Gi")
  1724  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("2")
  1725  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("2Gi")
  1726  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1727  		framework.ExpectNoError(err)
  1728  
  1729  		ginkgo.By("Deleting the pod")
  1730  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1731  		framework.ExpectNoError(err)
  1732  
  1733  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1734  		usedResources[v1.ResourcePods] = resource.MustParse("0")
  1735  		usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
  1736  		usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0Gi")
  1737  		usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
  1738  		usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0Gi")
  1739  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaPriorityClass.Name, usedResources)
  1740  		framework.ExpectNoError(err)
  1741  	})
  1742  
  1743  })
  1744  
  1745  var _ = SIGDescribe("ResourceQuota", func() {
  1746  	f := framework.NewDefaultFramework("cross-namespace-pod-affinity")
  1747  	f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
  1748  	ginkgo.It("should verify ResourceQuota with cross namespace pod affinity scope using scope-selectors.", func(ctx context.Context) {
  1749  		ginkgo.By("Creating a ResourceQuota with cross namespace pod affinity scope")
  1750  		quota, err := createResourceQuota(
  1751  			ctx, f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScopeSelector("quota-cross-namespace-pod-affinity", v1.ResourceQuotaScopeCrossNamespacePodAffinity))
  1752  		framework.ExpectNoError(err)
  1753  
  1754  		ginkgo.By("Ensuring ResourceQuota status is calculated")
  1755  		wantUsedResources := v1.ResourceList{v1.ResourcePods: resource.MustParse("0")}
  1756  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources)
  1757  		framework.ExpectNoError(err)
  1758  
  1759  		ginkgo.By("Creating a pod that does not use cross namespace affinity")
  1760  		pod := newTestPodWithAffinityForQuota(f, "no-cross-namespace-affinity", &v1.Affinity{
  1761  			PodAntiAffinity: &v1.PodAntiAffinity{
  1762  				RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{{
  1763  					TopologyKey: "region",
  1764  				}}}})
  1765  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
  1766  		framework.ExpectNoError(err)
  1767  
  1768  		ginkgo.By("Creating a pod that uses namespaces field")
  1769  		podWithNamespaces := newTestPodWithAffinityForQuota(f, "with-namespaces", &v1.Affinity{
  1770  			PodAntiAffinity: &v1.PodAntiAffinity{
  1771  				RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{{
  1772  					TopologyKey: "region",
  1773  					Namespaces:  []string{"ns1"},
  1774  				}}}})
  1775  		podWithNamespaces, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podWithNamespaces, metav1.CreateOptions{})
  1776  		framework.ExpectNoError(err)
  1777  
  1778  		ginkgo.By("Ensuring resource quota captures podWithNamespaces usage")
  1779  		wantUsedResources[v1.ResourcePods] = resource.MustParse("1")
  1780  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources)
  1781  		framework.ExpectNoError(err)
  1782  
  1783  		ginkgo.By("Creating a pod that uses namespaceSelector field")
  1784  		podWithNamespaceSelector := newTestPodWithAffinityForQuota(f, "with-namespace-selector", &v1.Affinity{
  1785  			PodAntiAffinity: &v1.PodAntiAffinity{
  1786  				RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{{
  1787  					TopologyKey: "region",
  1788  					NamespaceSelector: &metav1.LabelSelector{
  1789  						MatchExpressions: []metav1.LabelSelectorRequirement{
  1790  							{
  1791  								Key:      "team",
  1792  								Operator: metav1.LabelSelectorOpIn,
  1793  								Values:   []string{"ads"},
  1794  							},
  1795  						},
  1796  					}}}}})
  1797  		podWithNamespaceSelector, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podWithNamespaceSelector, metav1.CreateOptions{})
  1798  		framework.ExpectNoError(err)
  1799  
  1800  		ginkgo.By("Ensuring resource quota captures podWithNamespaceSelector usage")
  1801  		wantUsedResources[v1.ResourcePods] = resource.MustParse("2")
  1802  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources)
  1803  		framework.ExpectNoError(err)
  1804  
  1805  		ginkgo.By("Deleting the pods")
  1806  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))
  1807  		framework.ExpectNoError(err)
  1808  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podWithNamespaces.Name, *metav1.NewDeleteOptions(0))
  1809  		framework.ExpectNoError(err)
  1810  		err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podWithNamespaceSelector.Name, *metav1.NewDeleteOptions(0))
  1811  		framework.ExpectNoError(err)
  1812  
  1813  		ginkgo.By("Ensuring resource quota status released the pod usage")
  1814  		wantUsedResources[v1.ResourcePods] = resource.MustParse("0")
  1815  		err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, quota.Name, wantUsedResources)
  1816  		framework.ExpectNoError(err)
  1817  	})
  1818  })
  1819  
  1820  // newTestResourceQuotaWithScopeSelector returns a quota that enforces default constraints for testing with scopeSelectors
  1821  func newTestResourceQuotaWithScopeSelector(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
  1822  	hard := v1.ResourceList{}
  1823  	hard[v1.ResourcePods] = resource.MustParse("5")
  1824  	switch scope {
  1825  	case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
  1826  		hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
  1827  		hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
  1828  		hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
  1829  		hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
  1830  	}
  1831  	return &v1.ResourceQuota{
  1832  		ObjectMeta: metav1.ObjectMeta{Name: name},
  1833  		Spec: v1.ResourceQuotaSpec{Hard: hard,
  1834  			ScopeSelector: &v1.ScopeSelector{
  1835  				MatchExpressions: []v1.ScopedResourceSelectorRequirement{
  1836  					{
  1837  						ScopeName: scope,
  1838  						Operator:  v1.ScopeSelectorOpExists},
  1839  				},
  1840  			},
  1841  		},
  1842  	}
  1843  }
  1844  
  1845  // newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes
  1846  func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
  1847  	hard := v1.ResourceList{}
  1848  	hard[v1.ResourcePods] = resource.MustParse("5")
  1849  	switch scope {
  1850  	case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
  1851  		hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
  1852  		hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
  1853  		hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
  1854  		hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
  1855  	}
  1856  	return &v1.ResourceQuota{
  1857  		ObjectMeta: metav1.ObjectMeta{Name: name},
  1858  		Spec:       v1.ResourceQuotaSpec{Hard: hard, Scopes: []v1.ResourceQuotaScope{scope}},
  1859  	}
  1860  }
  1861  
  1862  // newTestResourceQuotaWithScopeForPriorityClass returns a quota
  1863  // that enforces default constraints for testing with ResourceQuotaScopePriorityClass scope
  1864  func newTestResourceQuotaWithScopeForPriorityClass(name string, hard v1.ResourceList, op v1.ScopeSelectorOperator, values []string) *v1.ResourceQuota {
  1865  	return &v1.ResourceQuota{
  1866  		ObjectMeta: metav1.ObjectMeta{Name: name},
  1867  		Spec: v1.ResourceQuotaSpec{Hard: hard,
  1868  			ScopeSelector: &v1.ScopeSelector{
  1869  				MatchExpressions: []v1.ScopedResourceSelectorRequirement{
  1870  					{
  1871  						ScopeName: v1.ResourceQuotaScopePriorityClass,
  1872  						Operator:  op,
  1873  						Values:    values,
  1874  					},
  1875  				},
  1876  			},
  1877  		},
  1878  	}
  1879  }
  1880  
  1881  // newTestResourceQuota returns a quota that enforces default constraints for testing
  1882  func newTestResourceQuota(name string) *v1.ResourceQuota {
  1883  	hard := v1.ResourceList{}
  1884  	hard[v1.ResourcePods] = resource.MustParse("5")
  1885  	hard[v1.ResourceServices] = resource.MustParse("10")
  1886  	hard[v1.ResourceServicesNodePorts] = resource.MustParse("1")
  1887  	hard[v1.ResourceServicesLoadBalancers] = resource.MustParse("1")
  1888  	hard[v1.ResourceReplicationControllers] = resource.MustParse("10")
  1889  	hard[v1.ResourceQuotas] = resource.MustParse("1")
  1890  	hard[v1.ResourceCPU] = resource.MustParse("1")
  1891  	hard[v1.ResourceMemory] = resource.MustParse("500Mi")
  1892  	hard[v1.ResourceConfigMaps] = resource.MustParse("10")
  1893  	hard[v1.ResourceSecrets] = resource.MustParse("10")
  1894  	hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
  1895  	hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
  1896  	hard[v1.ResourceEphemeralStorage] = resource.MustParse("50Gi")
  1897  	hard[core.V1ResourceByStorageClass(classGold, v1.ResourcePersistentVolumeClaims)] = resource.MustParse("10")
  1898  	hard[core.V1ResourceByStorageClass(classGold, v1.ResourceRequestsStorage)] = resource.MustParse("10Gi")
  1899  	// test quota on discovered resource type
  1900  	hard[v1.ResourceName("count/replicasets.apps")] = resource.MustParse("5")
  1901  	// test quota on extended resource
  1902  	hard[v1.ResourceName(v1.DefaultResourceRequestsPrefix+extendedResourceName)] = resource.MustParse("3")
  1903  	return &v1.ResourceQuota{
  1904  		ObjectMeta: metav1.ObjectMeta{Name: name},
  1905  		Spec:       v1.ResourceQuotaSpec{Hard: hard},
  1906  	}
  1907  }
  1908  
  1909  // newTestPodForQuota returns a pod that has the specified requests and limits
  1910  func newTestPodForQuota(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
  1911  	return &v1.Pod{
  1912  		ObjectMeta: metav1.ObjectMeta{
  1913  			Name: name,
  1914  		},
  1915  		Spec: v1.PodSpec{
  1916  			// prevent disruption to other test workloads in parallel test runs by ensuring the quota
  1917  			// test pods don't get scheduled onto a node
  1918  			NodeSelector: map[string]string{
  1919  				"x-test.k8s.io/unsatisfiable": "not-schedulable",
  1920  			},
  1921  			Containers: []v1.Container{
  1922  				{
  1923  					Name:  "pause",
  1924  					Image: imageutils.GetPauseImageName(),
  1925  					Resources: v1.ResourceRequirements{
  1926  						Requests: requests,
  1927  						Limits:   limits,
  1928  					},
  1929  				},
  1930  			},
  1931  		},
  1932  	}
  1933  }
  1934  
  1935  // newTestPodForQuotaWithPriority returns a pod that has the specified requests, limits and priority class
  1936  func newTestPodForQuotaWithPriority(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList, pclass string) *v1.Pod {
  1937  	return &v1.Pod{
  1938  		ObjectMeta: metav1.ObjectMeta{
  1939  			Name: name,
  1940  		},
  1941  		Spec: v1.PodSpec{
  1942  			// prevent disruption to other test workloads in parallel test runs by ensuring the quota
  1943  			// test pods don't get scheduled onto a node
  1944  			NodeSelector: map[string]string{
  1945  				"x-test.k8s.io/unsatisfiable": "not-schedulable",
  1946  			},
  1947  			Containers: []v1.Container{
  1948  				{
  1949  					Name:  "pause",
  1950  					Image: imageutils.GetPauseImageName(),
  1951  					Resources: v1.ResourceRequirements{
  1952  						Requests: requests,
  1953  						Limits:   limits,
  1954  					},
  1955  				},
  1956  			},
  1957  			PriorityClassName: pclass,
  1958  		},
  1959  	}
  1960  }
  1961  
  1962  // newTestPodForQuota returns a pod that has the specified requests and limits
  1963  func newTestPodWithAffinityForQuota(f *framework.Framework, name string, affinity *v1.Affinity) *v1.Pod {
  1964  	return &v1.Pod{
  1965  		ObjectMeta: metav1.ObjectMeta{
  1966  			Name: name,
  1967  		},
  1968  		Spec: v1.PodSpec{
  1969  			// prevent disruption to other test workloads in parallel test runs by ensuring the quota
  1970  			// test pods don't get scheduled onto a node
  1971  			NodeSelector: map[string]string{
  1972  				"x-test.k8s.io/unsatisfiable": "not-schedulable",
  1973  			},
  1974  			Affinity: affinity,
  1975  			Containers: []v1.Container{
  1976  				{
  1977  					Name:      "pause",
  1978  					Image:     imageutils.GetPauseImageName(),
  1979  					Resources: v1.ResourceRequirements{},
  1980  				},
  1981  			},
  1982  		},
  1983  	}
  1984  }
  1985  
  1986  // newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim
  1987  func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim {
  1988  	return &v1.PersistentVolumeClaim{
  1989  		ObjectMeta: metav1.ObjectMeta{
  1990  			Name: name,
  1991  		},
  1992  		Spec: v1.PersistentVolumeClaimSpec{
  1993  			AccessModes: []v1.PersistentVolumeAccessMode{
  1994  				v1.ReadWriteOnce,
  1995  				v1.ReadOnlyMany,
  1996  			},
  1997  			Resources: v1.VolumeResourceRequirements{
  1998  				Requests: v1.ResourceList{
  1999  					v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
  2000  				},
  2001  			},
  2002  		},
  2003  	}
  2004  }
  2005  
  2006  // newTestReplicationControllerForQuota returns a simple replication controller
  2007  func newTestReplicationControllerForQuota(name, image string, replicas int32) *v1.ReplicationController {
  2008  	return &v1.ReplicationController{
  2009  		ObjectMeta: metav1.ObjectMeta{
  2010  			Name: name,
  2011  		},
  2012  		Spec: v1.ReplicationControllerSpec{
  2013  			Replicas: pointer.Int32(replicas),
  2014  			Selector: map[string]string{
  2015  				"name": name,
  2016  			},
  2017  			Template: &v1.PodTemplateSpec{
  2018  				ObjectMeta: metav1.ObjectMeta{
  2019  					Labels: map[string]string{"name": name},
  2020  				},
  2021  				Spec: v1.PodSpec{
  2022  					Containers: []v1.Container{
  2023  						{
  2024  							Name:  name,
  2025  							Image: image,
  2026  						},
  2027  					},
  2028  				},
  2029  			},
  2030  		},
  2031  	}
  2032  }
  2033  
  2034  // newTestReplicaSetForQuota returns a simple replica set
  2035  func newTestReplicaSetForQuota(name, image string, replicas int32) *appsv1.ReplicaSet {
  2036  	zero := int64(0)
  2037  	return &appsv1.ReplicaSet{
  2038  		ObjectMeta: metav1.ObjectMeta{
  2039  			Name: name,
  2040  		},
  2041  		Spec: appsv1.ReplicaSetSpec{
  2042  			Replicas: &replicas,
  2043  			Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}},
  2044  			Template: v1.PodTemplateSpec{
  2045  				ObjectMeta: metav1.ObjectMeta{
  2046  					Labels: map[string]string{"name": name},
  2047  				},
  2048  				Spec: v1.PodSpec{
  2049  					TerminationGracePeriodSeconds: &zero,
  2050  					Containers: []v1.Container{
  2051  						{
  2052  							Name:  name,
  2053  							Image: image,
  2054  						},
  2055  					},
  2056  				},
  2057  			},
  2058  		},
  2059  	}
  2060  }
  2061  
  2062  // newTestServiceForQuota returns a simple service
  2063  func newTestServiceForQuota(name string, serviceType v1.ServiceType, allocateLoadBalancerNodePorts bool) *v1.Service {
  2064  	var allocateNPs *bool
  2065  	// Only set allocateLoadBalancerNodePorts when service type is LB
  2066  	if serviceType == v1.ServiceTypeLoadBalancer {
  2067  		allocateNPs = &allocateLoadBalancerNodePorts
  2068  	}
  2069  
  2070  	return &v1.Service{
  2071  		ObjectMeta: metav1.ObjectMeta{
  2072  			Name: name,
  2073  		},
  2074  		Spec: v1.ServiceSpec{
  2075  			Type: serviceType,
  2076  			Ports: []v1.ServicePort{{
  2077  				Port:       80,
  2078  				TargetPort: intstr.FromInt32(80),
  2079  			}},
  2080  			AllocateLoadBalancerNodePorts: allocateNPs,
  2081  		},
  2082  	}
  2083  }
  2084  
  2085  func newTestConfigMapForQuota(name string) *v1.ConfigMap {
  2086  	return &v1.ConfigMap{
  2087  		ObjectMeta: metav1.ObjectMeta{
  2088  			Name: name,
  2089  		},
  2090  		Data: map[string]string{
  2091  			"a": "b",
  2092  		},
  2093  	}
  2094  }
  2095  
  2096  func newTestSecretForQuota(name string) *v1.Secret {
  2097  	return &v1.Secret{
  2098  		ObjectMeta: metav1.ObjectMeta{
  2099  			Name: name,
  2100  		},
  2101  		Data: map[string][]byte{
  2102  			"data-1": []byte("value-1\n"),
  2103  			"data-2": []byte("value-2\n"),
  2104  			"data-3": []byte("value-3\n"),
  2105  		},
  2106  	}
  2107  }
  2108  
  2109  // createResourceQuota in the specified namespace
  2110  func createResourceQuota(ctx context.Context, c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) {
  2111  	return c.CoreV1().ResourceQuotas(namespace).Create(ctx, resourceQuota, metav1.CreateOptions{})
  2112  }
  2113  
  2114  // deleteResourceQuota with the specified name
  2115  func deleteResourceQuota(ctx context.Context, c clientset.Interface, namespace, name string) error {
  2116  	return c.CoreV1().ResourceQuotas(namespace).Delete(ctx, name, metav1.DeleteOptions{})
  2117  }
  2118  
  2119  // countResourceQuota counts the number of ResourceQuota in the specified namespace
  2120  // On contended servers the service account controller can slow down, leading to the count changing during a run.
  2121  // Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
  2122  func countResourceQuota(ctx context.Context, c clientset.Interface, namespace string) (int, error) {
  2123  	found, unchanged := 0, 0
  2124  	return found, wait.PollWithContext(ctx, 1*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
  2125  		resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(ctx, metav1.ListOptions{})
  2126  		framework.ExpectNoError(err)
  2127  		if len(resourceQuotas.Items) == found {
  2128  			// loop until the number of resource quotas has stabilized for 5 seconds
  2129  			unchanged++
  2130  			return unchanged > 4, nil
  2131  		}
  2132  		unchanged = 0
  2133  		found = len(resourceQuotas.Items)
  2134  		return false, nil
  2135  	})
  2136  }
  2137  
  2138  // wait for resource quota status to show the expected used resources value
  2139  func waitForResourceQuota(ctx context.Context, c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
  2140  	return wait.PollWithContext(ctx, framework.Poll, resourceQuotaTimeout, func(ctx context.Context) (bool, error) {
  2141  		resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
  2142  		if err != nil {
  2143  			return false, err
  2144  		}
  2145  		// used may not yet be calculated
  2146  		if resourceQuota.Status.Used == nil {
  2147  			return false, nil
  2148  		}
  2149  		// verify that the quota shows the expected used resource values
  2150  		for k, v := range used {
  2151  			if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
  2152  				framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
  2153  				return false, nil
  2154  			}
  2155  		}
  2156  		return true, nil
  2157  	})
  2158  }
  2159  
  2160  // updateResourceQuotaUntilUsageAppears updates the resource quota object until the usage is populated
  2161  // for the specific resource name.
  2162  func updateResourceQuotaUntilUsageAppears(ctx context.Context, c clientset.Interface, ns, quotaName string, resourceName v1.ResourceName) error {
  2163  	return wait.PollWithContext(ctx, framework.Poll, resourceQuotaTimeout, func(ctx context.Context) (bool, error) {
  2164  		resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
  2165  		if err != nil {
  2166  			return false, err
  2167  		}
  2168  		// verify that the quota shows the expected used resource values
  2169  		_, ok := resourceQuota.Status.Used[resourceName]
  2170  		if ok {
  2171  			return true, nil
  2172  		}
  2173  
  2174  		current := resourceQuota.Spec.Hard[resourceName]
  2175  		current.Add(resource.MustParse("1"))
  2176  		resourceQuota.Spec.Hard[resourceName] = current
  2177  		_, err = c.CoreV1().ResourceQuotas(ns).Update(ctx, resourceQuota, metav1.UpdateOptions{})
  2178  		// ignoring conflicts since someone else may already updated it.
  2179  		if apierrors.IsConflict(err) {
  2180  			return false, nil
  2181  		}
  2182  		return false, err
  2183  	})
  2184  }
  2185  
  2186  func unstructuredToResourceQuota(obj *unstructured.Unstructured) (*v1.ResourceQuota, error) {
  2187  	json, err := runtime.Encode(unstructured.UnstructuredJSONScheme, obj)
  2188  	if err != nil {
  2189  		return nil, err
  2190  	}
  2191  	rq := &v1.ResourceQuota{}
  2192  	err = runtime.DecodeInto(clientscheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), json, rq)
  2193  
  2194  	return rq, err
  2195  }