k8s.io/kubernetes@v1.29.3/test/e2e/node/pod_resize.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package node
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"regexp"
    23  	"runtime"
    24  	"strconv"
    25  	"strings"
    26  	"time"
    27  
    28  	v1 "k8s.io/api/core/v1"
    29  	"k8s.io/apimachinery/pkg/api/resource"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/labels"
    32  	"k8s.io/apimachinery/pkg/types"
    33  	clientset "k8s.io/client-go/kubernetes"
    34  	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
    35  	resourceapi "k8s.io/kubernetes/pkg/api/v1/resource"
    36  	kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
    37  
    38  	"k8s.io/kubernetes/test/e2e/feature"
    39  	"k8s.io/kubernetes/test/e2e/framework"
    40  	e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
    41  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    42  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    43  	e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
    44  	imageutils "k8s.io/kubernetes/test/utils/image"
    45  
    46  	semver "github.com/blang/semver/v4"
    47  	"github.com/google/go-cmp/cmp"
    48  	"github.com/onsi/ginkgo/v2"
    49  	"github.com/onsi/gomega"
    50  )
    51  
    52  const (
    53  	CgroupCPUPeriod    string = "/sys/fs/cgroup/cpu/cpu.cfs_period_us"
    54  	CgroupCPUShares    string = "/sys/fs/cgroup/cpu/cpu.shares"
    55  	CgroupCPUQuota     string = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us"
    56  	CgroupMemLimit     string = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
    57  	Cgroupv2MemLimit   string = "/sys/fs/cgroup/memory.max"
    58  	Cgroupv2MemRequest string = "/sys/fs/cgroup/memory.min"
    59  	Cgroupv2CPULimit   string = "/sys/fs/cgroup/cpu.max"
    60  	Cgroupv2CPURequest string = "/sys/fs/cgroup/cpu.weight"
    61  	CpuPeriod          string = "100000"
    62  
    63  	PollInterval time.Duration = 2 * time.Second
    64  	PollTimeout  time.Duration = 4 * time.Minute
    65  )
    66  
    67  type ContainerResources struct {
    68  	CPUReq, CPULim, MemReq, MemLim, EphStorReq, EphStorLim string
    69  }
    70  
    71  type ContainerAllocations struct {
    72  	CPUAlloc, MemAlloc, ephStorAlloc string
    73  }
    74  
    75  type TestContainerInfo struct {
    76  	Name         string
    77  	Resources    *ContainerResources
    78  	Allocations  *ContainerAllocations
    79  	CPUPolicy    *v1.ResourceResizeRestartPolicy
    80  	MemPolicy    *v1.ResourceResizeRestartPolicy
    81  	RestartCount int32
    82  }
    83  
    84  func isInPlaceResizeSupportedByRuntime(c clientset.Interface, nodeName string) bool {
    85  	//TODO(vinaykul,InPlacePodVerticalScaling): Can we optimize this?
    86  	node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
    87  	if err != nil {
    88  		return false
    89  	}
    90  	re := regexp.MustCompile("containerd://(.*)")
    91  	match := re.FindStringSubmatch(node.Status.NodeInfo.ContainerRuntimeVersion)
    92  	if len(match) != 2 {
    93  		return false
    94  	}
    95  	if ver, verr := semver.ParseTolerant(match[1]); verr == nil {
    96  		if ver.Compare(semver.MustParse("1.6.9")) < 0 {
    97  			return false
    98  		}
    99  		return true
   100  	}
   101  	return false
   102  }
   103  
   104  func getTestResourceInfo(tcInfo TestContainerInfo) (v1.ResourceRequirements, v1.ResourceList, []v1.ContainerResizePolicy) {
   105  	var res v1.ResourceRequirements
   106  	var alloc v1.ResourceList
   107  	var resizePol []v1.ContainerResizePolicy
   108  
   109  	if tcInfo.Resources != nil {
   110  		var lim, req v1.ResourceList
   111  		if tcInfo.Resources.CPULim != "" || tcInfo.Resources.MemLim != "" || tcInfo.Resources.EphStorLim != "" {
   112  			lim = make(v1.ResourceList)
   113  		}
   114  		if tcInfo.Resources.CPUReq != "" || tcInfo.Resources.MemReq != "" || tcInfo.Resources.EphStorReq != "" {
   115  			req = make(v1.ResourceList)
   116  		}
   117  		if tcInfo.Resources.CPULim != "" {
   118  			lim[v1.ResourceCPU] = resource.MustParse(tcInfo.Resources.CPULim)
   119  		}
   120  		if tcInfo.Resources.MemLim != "" {
   121  			lim[v1.ResourceMemory] = resource.MustParse(tcInfo.Resources.MemLim)
   122  		}
   123  		if tcInfo.Resources.EphStorLim != "" {
   124  			lim[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorLim)
   125  		}
   126  		if tcInfo.Resources.CPUReq != "" {
   127  			req[v1.ResourceCPU] = resource.MustParse(tcInfo.Resources.CPUReq)
   128  		}
   129  		if tcInfo.Resources.MemReq != "" {
   130  			req[v1.ResourceMemory] = resource.MustParse(tcInfo.Resources.MemReq)
   131  		}
   132  		if tcInfo.Resources.EphStorReq != "" {
   133  			req[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorReq)
   134  		}
   135  		res = v1.ResourceRequirements{Limits: lim, Requests: req}
   136  	}
   137  	if tcInfo.Allocations != nil {
   138  		alloc = make(v1.ResourceList)
   139  		if tcInfo.Allocations.CPUAlloc != "" {
   140  			alloc[v1.ResourceCPU] = resource.MustParse(tcInfo.Allocations.CPUAlloc)
   141  		}
   142  		if tcInfo.Allocations.MemAlloc != "" {
   143  			alloc[v1.ResourceMemory] = resource.MustParse(tcInfo.Allocations.MemAlloc)
   144  		}
   145  		if tcInfo.Allocations.ephStorAlloc != "" {
   146  			alloc[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Allocations.ephStorAlloc)
   147  		}
   148  
   149  	}
   150  	if tcInfo.CPUPolicy != nil {
   151  		cpuPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: *tcInfo.CPUPolicy}
   152  		resizePol = append(resizePol, cpuPol)
   153  	}
   154  	if tcInfo.MemPolicy != nil {
   155  		memPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: *tcInfo.MemPolicy}
   156  		resizePol = append(resizePol, memPol)
   157  	}
   158  	return res, alloc, resizePol
   159  }
   160  
   161  func initDefaultResizePolicy(containers []TestContainerInfo) {
   162  	noRestart := v1.NotRequired
   163  	setDefaultPolicy := func(ci *TestContainerInfo) {
   164  		if ci.CPUPolicy == nil {
   165  			ci.CPUPolicy = &noRestart
   166  		}
   167  		if ci.MemPolicy == nil {
   168  			ci.MemPolicy = &noRestart
   169  		}
   170  	}
   171  	for i := range containers {
   172  		setDefaultPolicy(&containers[i])
   173  	}
   174  }
   175  
   176  func makeTestContainer(tcInfo TestContainerInfo) (v1.Container, v1.ContainerStatus) {
   177  	cmd := "trap exit TERM; while true; do sleep 1; done"
   178  	res, alloc, resizePol := getTestResourceInfo(tcInfo)
   179  	bTrue := true
   180  	bFalse := false
   181  	userID := int64(1001)
   182  	userName := "ContainerUser"
   183  
   184  	var securityContext *v1.SecurityContext
   185  
   186  	if framework.NodeOSDistroIs("windows") {
   187  		securityContext = &v1.SecurityContext{
   188  			RunAsNonRoot: &bTrue,
   189  			WindowsOptions: &v1.WindowsSecurityContextOptions{
   190  				RunAsUserName: &userName,
   191  			},
   192  		}
   193  	} else {
   194  		securityContext = &v1.SecurityContext{
   195  			Privileged:               &bFalse,
   196  			AllowPrivilegeEscalation: &bFalse,
   197  			RunAsUser:                &userID,
   198  			RunAsNonRoot:             &bTrue,
   199  			Capabilities: &v1.Capabilities{
   200  				Drop: []v1.Capability{"ALL"},
   201  			},
   202  			SeccompProfile: &v1.SeccompProfile{
   203  				Type: v1.SeccompProfileTypeRuntimeDefault,
   204  			},
   205  		}
   206  	}
   207  
   208  	tc := v1.Container{
   209  		Name:            tcInfo.Name,
   210  		Image:           imageutils.GetE2EImage(imageutils.BusyBox),
   211  		Command:         []string{"/bin/sh"},
   212  		Args:            []string{"-c", cmd},
   213  		Resources:       res,
   214  		ResizePolicy:    resizePol,
   215  		SecurityContext: securityContext,
   216  	}
   217  
   218  	tcStatus := v1.ContainerStatus{
   219  		Name:               tcInfo.Name,
   220  		AllocatedResources: alloc,
   221  	}
   222  	return tc, tcStatus
   223  }
   224  
   225  func makeTestPod(ns, name, timeStamp string, tcInfo []TestContainerInfo) *v1.Pod {
   226  	var testContainers []v1.Container
   227  	var podOS *v1.PodOS
   228  
   229  	for _, ci := range tcInfo {
   230  		tc, _ := makeTestContainer(ci)
   231  		testContainers = append(testContainers, tc)
   232  	}
   233  
   234  	if framework.NodeOSDistroIs("windows") {
   235  		podOS = &v1.PodOS{Name: v1.OSName("windows")}
   236  	} else {
   237  		podOS = &v1.PodOS{Name: v1.OSName(runtime.GOOS)}
   238  	}
   239  
   240  	pod := &v1.Pod{
   241  		TypeMeta: metav1.TypeMeta{
   242  			Kind:       "Pod",
   243  			APIVersion: "v1",
   244  		},
   245  		ObjectMeta: metav1.ObjectMeta{
   246  			Name:      name,
   247  			Namespace: ns,
   248  			Labels: map[string]string{
   249  				"name": "fooPod",
   250  				"time": timeStamp,
   251  			},
   252  		},
   253  		Spec: v1.PodSpec{
   254  			OS:            podOS,
   255  			Containers:    testContainers,
   256  			RestartPolicy: v1.RestartPolicyOnFailure,
   257  		},
   258  	}
   259  	return pod
   260  }
   261  
   262  func verifyPodResizePolicy(pod *v1.Pod, tcInfo []TestContainerInfo) {
   263  	cMap := make(map[string]*v1.Container)
   264  	for i, c := range pod.Spec.Containers {
   265  		cMap[c.Name] = &pod.Spec.Containers[i]
   266  	}
   267  	for _, ci := range tcInfo {
   268  		gomega.Expect(cMap).Should(gomega.HaveKey(ci.Name))
   269  		c := cMap[ci.Name]
   270  		tc, _ := makeTestContainer(ci)
   271  		gomega.Expect(tc.ResizePolicy).To(gomega.Equal(c.ResizePolicy))
   272  	}
   273  }
   274  
   275  func verifyPodResources(pod *v1.Pod, tcInfo []TestContainerInfo) {
   276  	cMap := make(map[string]*v1.Container)
   277  	for i, c := range pod.Spec.Containers {
   278  		cMap[c.Name] = &pod.Spec.Containers[i]
   279  	}
   280  	for _, ci := range tcInfo {
   281  		gomega.Expect(cMap).Should(gomega.HaveKey(ci.Name))
   282  		c := cMap[ci.Name]
   283  		tc, _ := makeTestContainer(ci)
   284  		gomega.Expect(tc.Resources).To(gomega.Equal(c.Resources))
   285  	}
   286  }
   287  
   288  func verifyPodAllocations(pod *v1.Pod, tcInfo []TestContainerInfo, flagError bool) bool {
   289  	cStatusMap := make(map[string]*v1.ContainerStatus)
   290  	for i, c := range pod.Status.ContainerStatuses {
   291  		cStatusMap[c.Name] = &pod.Status.ContainerStatuses[i]
   292  	}
   293  
   294  	for _, ci := range tcInfo {
   295  		gomega.Expect(cStatusMap).Should(gomega.HaveKey(ci.Name))
   296  		cStatus := cStatusMap[ci.Name]
   297  		if ci.Allocations == nil {
   298  			if ci.Resources != nil {
   299  				alloc := &ContainerAllocations{CPUAlloc: ci.Resources.CPUReq, MemAlloc: ci.Resources.MemReq}
   300  				ci.Allocations = alloc
   301  				defer func() {
   302  					ci.Allocations = nil
   303  				}()
   304  			}
   305  		}
   306  
   307  		_, tcStatus := makeTestContainer(ci)
   308  		if flagError {
   309  			gomega.Expect(tcStatus.AllocatedResources).To(gomega.Equal(cStatus.AllocatedResources))
   310  		}
   311  		if !cmp.Equal(cStatus.AllocatedResources, tcStatus.AllocatedResources) {
   312  			return false
   313  		}
   314  	}
   315  	return true
   316  }
   317  
   318  func verifyPodStatusResources(pod *v1.Pod, tcInfo []TestContainerInfo) {
   319  	csMap := make(map[string]*v1.ContainerStatus)
   320  	for i, c := range pod.Status.ContainerStatuses {
   321  		csMap[c.Name] = &pod.Status.ContainerStatuses[i]
   322  	}
   323  	for _, ci := range tcInfo {
   324  		gomega.Expect(csMap).Should(gomega.HaveKey(ci.Name))
   325  		cs := csMap[ci.Name]
   326  		tc, _ := makeTestContainer(ci)
   327  		gomega.Expect(tc.Resources).To(gomega.Equal(*cs.Resources))
   328  		//gomega.Expect(cs.RestartCount).To(gomega.Equal(ci.RestartCount))
   329  	}
   330  }
   331  
   332  func isPodOnCgroupv2Node(pod *v1.Pod) bool {
   333  	// Determine if pod is running on cgroupv2 or cgroupv1 node
   334  	//TODO(vinaykul,InPlacePodVerticalScaling): Is there a better way to determine this?
   335  	cgroupv2File := "/sys/fs/cgroup/cgroup.controllers"
   336  	_, err := e2ekubectl.RunKubectl(pod.Namespace, "exec", pod.Name, "--", "ls", cgroupv2File)
   337  	if err == nil {
   338  		return true
   339  	}
   340  	return false
   341  }
   342  
   343  func verifyPodContainersCgroupValues(pod *v1.Pod, tcInfo []TestContainerInfo, flagError bool) bool {
   344  	podOnCgroupv2Node := isPodOnCgroupv2Node(pod)
   345  	cgroupMemLimit := Cgroupv2MemLimit
   346  	cgroupCPULimit := Cgroupv2CPULimit
   347  	cgroupCPURequest := Cgroupv2CPURequest
   348  	if !podOnCgroupv2Node {
   349  		cgroupMemLimit = CgroupMemLimit
   350  		cgroupCPULimit = CgroupCPUQuota
   351  		cgroupCPURequest = CgroupCPUShares
   352  	}
   353  	verifyCgroupValue := func(cName, cgPath, expectedCgValue string) bool {
   354  		cmd := []string{"head", "-n", "1", cgPath}
   355  		framework.Logf("Namespace %s Pod %s Container %s - looking for cgroup value %s in path %s",
   356  			pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
   357  		cgValue, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, cName, cmd, expectedCgValue, PollTimeout)
   358  		if flagError {
   359  			framework.ExpectNoError(err, fmt.Sprintf("failed to find expected value '%s' in container cgroup '%s'",
   360  				expectedCgValue, cgPath))
   361  		}
   362  		cgValue = strings.Trim(cgValue, "\n")
   363  		if flagError {
   364  			gomega.Expect(cgValue).Should(gomega.Equal(expectedCgValue), "cgroup value")
   365  		}
   366  		if cgValue != expectedCgValue {
   367  			return false
   368  		}
   369  		return true
   370  	}
   371  	for _, ci := range tcInfo {
   372  		if ci.Resources == nil {
   373  			continue
   374  		}
   375  		tc, _ := makeTestContainer(ci)
   376  		if tc.Resources.Limits != nil || tc.Resources.Requests != nil {
   377  			var cpuShares int64
   378  			var cpuLimitString, memLimitString string
   379  			memLimitInBytes := tc.Resources.Limits.Memory().Value()
   380  			cpuRequest := tc.Resources.Requests.Cpu()
   381  			cpuLimit := tc.Resources.Limits.Cpu()
   382  			if cpuRequest.IsZero() && !cpuLimit.IsZero() {
   383  				cpuShares = int64(kubecm.MilliCPUToShares(cpuLimit.MilliValue()))
   384  			} else {
   385  				cpuShares = int64(kubecm.MilliCPUToShares(cpuRequest.MilliValue()))
   386  			}
   387  			cpuQuota := kubecm.MilliCPUToQuota(cpuLimit.MilliValue(), kubecm.QuotaPeriod)
   388  			if cpuLimit.IsZero() {
   389  				cpuQuota = -1
   390  			}
   391  			cpuLimitString = strconv.FormatInt(cpuQuota, 10)
   392  			if podOnCgroupv2Node {
   393  				if cpuLimitString == "-1" {
   394  					cpuLimitString = "max"
   395  				}
   396  				cpuLimitString = fmt.Sprintf("%s %s", cpuLimitString, CpuPeriod)
   397  			}
   398  			memLimitString = strconv.FormatInt(memLimitInBytes, 10)
   399  			if podOnCgroupv2Node && memLimitString == "0" {
   400  				memLimitString = "max"
   401  			}
   402  			if memLimitString != "0" {
   403  				if !verifyCgroupValue(ci.Name, cgroupMemLimit, memLimitString) {
   404  					return false
   405  				}
   406  			}
   407  			if !verifyCgroupValue(ci.Name, cgroupCPULimit, cpuLimitString) {
   408  				return false
   409  			}
   410  			if podOnCgroupv2Node {
   411  				// convert cgroup v1 cpu.shares value to cgroup v2 cpu.weight value
   412  				cpuShares = int64(1 + ((cpuShares-2)*9999)/262142)
   413  			}
   414  			if !verifyCgroupValue(ci.Name, cgroupCPURequest, strconv.FormatInt(cpuShares, 10)) {
   415  				return false
   416  			}
   417  		}
   418  	}
   419  	return true
   420  }
   421  
   422  func waitForPodResizeActuation(c clientset.Interface, podClient *e2epod.PodClient, pod, patchedPod *v1.Pod, expectedContainers []TestContainerInfo) *v1.Pod {
   423  
   424  	waitForContainerRestart := func() error {
   425  		var restartContainersExpected []string
   426  		for _, ci := range expectedContainers {
   427  			if ci.RestartCount > 0 {
   428  				restartContainersExpected = append(restartContainersExpected, ci.Name)
   429  			}
   430  		}
   431  		if len(restartContainersExpected) == 0 {
   432  			return nil
   433  		}
   434  		for start := time.Now(); time.Since(start) < PollTimeout; time.Sleep(PollInterval) {
   435  			pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
   436  			if err != nil {
   437  				return err
   438  			}
   439  			restartedContainersCount := 0
   440  			for _, cName := range restartContainersExpected {
   441  				cs, _ := podutil.GetContainerStatus(pod.Status.ContainerStatuses, cName)
   442  				if cs.RestartCount < 1 {
   443  					break
   444  				}
   445  				restartedContainersCount++
   446  			}
   447  			if restartedContainersCount == len(restartContainersExpected) {
   448  				return nil
   449  			}
   450  		}
   451  		return fmt.Errorf("timed out waiting for expected container restart")
   452  	}
   453  	waitPodAllocationsEqualsExpected := func() (*v1.Pod, error) {
   454  		for start := time.Now(); time.Since(start) < PollTimeout; time.Sleep(PollInterval) {
   455  			pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
   456  			if err != nil {
   457  				return nil, err
   458  			}
   459  			if !verifyPodAllocations(pod, expectedContainers, false) {
   460  				continue
   461  			}
   462  			return pod, nil
   463  		}
   464  		return nil, fmt.Errorf("timed out waiting for pod resource allocation values to match expected")
   465  	}
   466  	waitContainerCgroupValuesEqualsExpected := func() error {
   467  		for start := time.Now(); time.Since(start) < PollTimeout; time.Sleep(PollInterval) {
   468  			if !verifyPodContainersCgroupValues(patchedPod, expectedContainers, false) {
   469  				continue
   470  			}
   471  			return nil
   472  		}
   473  		return fmt.Errorf("timed out waiting for container cgroup values to match expected")
   474  	}
   475  	waitPodStatusResourcesEqualSpecResources := func() (*v1.Pod, error) {
   476  		for start := time.Now(); time.Since(start) < PollTimeout; time.Sleep(PollInterval) {
   477  			pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
   478  			if err != nil {
   479  				return nil, err
   480  			}
   481  			differs := false
   482  			for idx, c := range pod.Spec.Containers {
   483  				if !cmp.Equal(c.Resources, *pod.Status.ContainerStatuses[idx].Resources) {
   484  					differs = true
   485  					break
   486  				}
   487  			}
   488  			if differs {
   489  				continue
   490  			}
   491  			return pod, nil
   492  		}
   493  		return nil, fmt.Errorf("timed out waiting for pod spec resources to match pod status resources")
   494  	}
   495  	rsErr := waitForContainerRestart()
   496  	framework.ExpectNoError(rsErr, "failed waiting for expected container restart")
   497  	// Wait for pod resource allocations to equal expected values after resize
   498  	resizedPod, aErr := waitPodAllocationsEqualsExpected()
   499  	framework.ExpectNoError(aErr, "failed to verify pod resource allocation values equals expected values")
   500  	//TODO(vinaykul,InPlacePodVerticalScaling): Remove this check once base-OS updates to containerd>=1.6.9
   501  	//                containerd needs to add CRI support before Beta (See Node KEP #2273)
   502  	if !isInPlaceResizeSupportedByRuntime(c, pod.Spec.NodeName) {
   503  		// Wait for PodSpec container resources to equal PodStatus container resources indicating resize is complete
   504  		rPod, rErr := waitPodStatusResourcesEqualSpecResources()
   505  		framework.ExpectNoError(rErr, "failed to verify pod spec resources equals pod status resources")
   506  
   507  		ginkgo.By("verifying pod status after resize")
   508  		verifyPodStatusResources(rPod, expectedContainers)
   509  	} else if !framework.NodeOSDistroIs("windows") {
   510  		// Wait for container cgroup values to equal expected cgroup values after resize
   511  		// only for containerd versions before 1.6.9
   512  		cErr := waitContainerCgroupValuesEqualsExpected()
   513  		framework.ExpectNoError(cErr, "failed to verify container cgroup values equals expected values")
   514  	}
   515  	return resizedPod
   516  }
   517  
   518  func doPodResizeTests() {
   519  	f := framework.NewDefaultFramework("pod-resize")
   520  	var podClient *e2epod.PodClient
   521  	ginkgo.BeforeEach(func() {
   522  		podClient = e2epod.NewPodClient(f)
   523  	})
   524  
   525  	type testCase struct {
   526  		name        string
   527  		containers  []TestContainerInfo
   528  		patchString string
   529  		expected    []TestContainerInfo
   530  	}
   531  
   532  	noRestart := v1.NotRequired
   533  	doRestart := v1.RestartContainer
   534  	tests := []testCase{
   535  		{
   536  			name: "Guaranteed QoS pod, one container - increase CPU & memory",
   537  			containers: []TestContainerInfo{
   538  				{
   539  					Name:      "c1",
   540  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
   541  					CPUPolicy: &noRestart,
   542  					MemPolicy: &noRestart,
   543  				},
   544  			},
   545  			patchString: `{"spec":{"containers":[
   546  						{"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}}
   547  					]}}`,
   548  			expected: []TestContainerInfo{
   549  				{
   550  					Name:      "c1",
   551  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi"},
   552  					CPUPolicy: &noRestart,
   553  					MemPolicy: &noRestart,
   554  				},
   555  			},
   556  		},
   557  		{
   558  			name: "Guaranteed QoS pod, one container - decrease CPU & memory",
   559  			containers: []TestContainerInfo{
   560  				{
   561  					Name:      "c1",
   562  					Resources: &ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "500Mi", MemLim: "500Mi"},
   563  					CPUPolicy: &noRestart,
   564  					MemPolicy: &noRestart,
   565  				},
   566  			},
   567  			patchString: `{"spec":{"containers":[
   568  						{"name":"c1", "resources":{"requests":{"cpu":"100m","memory":"250Mi"},"limits":{"cpu":"100m","memory":"250Mi"}}}
   569  					]}}`,
   570  			expected: []TestContainerInfo{
   571  				{
   572  					Name:      "c1",
   573  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "250Mi", MemLim: "250Mi"},
   574  					CPUPolicy: &noRestart,
   575  					MemPolicy: &noRestart,
   576  				},
   577  			},
   578  		},
   579  		{
   580  			name: "Guaranteed QoS pod, one container - increase CPU & decrease memory",
   581  			containers: []TestContainerInfo{
   582  				{
   583  					Name:      "c1",
   584  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
   585  				},
   586  			},
   587  			patchString: `{"spec":{"containers":[
   588  						{"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"100Mi"},"limits":{"cpu":"200m","memory":"100Mi"}}}
   589  					]}}`,
   590  			expected: []TestContainerInfo{
   591  				{
   592  					Name:      "c1",
   593  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "100Mi", MemLim: "100Mi"},
   594  				},
   595  			},
   596  		},
   597  		{
   598  			name: "Guaranteed QoS pod, one container - decrease CPU & increase memory",
   599  			containers: []TestContainerInfo{
   600  				{
   601  					Name:      "c1",
   602  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
   603  				},
   604  			},
   605  			patchString: `{"spec":{"containers":[
   606  						{"name":"c1", "resources":{"requests":{"cpu":"50m","memory":"300Mi"},"limits":{"cpu":"50m","memory":"300Mi"}}}
   607  					]}}`,
   608  			expected: []TestContainerInfo{
   609  				{
   610  					Name:      "c1",
   611  					Resources: &ContainerResources{CPUReq: "50m", CPULim: "50m", MemReq: "300Mi", MemLim: "300Mi"},
   612  				},
   613  			},
   614  		},
   615  		{
   616  			name: "Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3)",
   617  			containers: []TestContainerInfo{
   618  				{
   619  					Name:      "c1",
   620  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "100Mi", MemLim: "100Mi"},
   621  					CPUPolicy: &noRestart,
   622  					MemPolicy: &noRestart,
   623  				},
   624  				{
   625  					Name:      "c2",
   626  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "200Mi", MemLim: "200Mi"},
   627  					CPUPolicy: &noRestart,
   628  					MemPolicy: &noRestart,
   629  				},
   630  				{
   631  					Name:      "c3",
   632  					Resources: &ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "300Mi", MemLim: "300Mi"},
   633  					CPUPolicy: &noRestart,
   634  					MemPolicy: &noRestart,
   635  				},
   636  			},
   637  			patchString: `{"spec":{"containers":[
   638  						{"name":"c1", "resources":{"requests":{"cpu":"140m","memory":"50Mi"},"limits":{"cpu":"140m","memory":"50Mi"}}},
   639  						{"name":"c2", "resources":{"requests":{"cpu":"150m","memory":"240Mi"},"limits":{"cpu":"150m","memory":"240Mi"}}},
   640  						{"name":"c3", "resources":{"requests":{"cpu":"340m","memory":"250Mi"},"limits":{"cpu":"340m","memory":"250Mi"}}}
   641  					]}}`,
   642  			expected: []TestContainerInfo{
   643  				{
   644  					Name:      "c1",
   645  					Resources: &ContainerResources{CPUReq: "140m", CPULim: "140m", MemReq: "50Mi", MemLim: "50Mi"},
   646  					CPUPolicy: &noRestart,
   647  					MemPolicy: &noRestart,
   648  				},
   649  				{
   650  					Name:      "c2",
   651  					Resources: &ContainerResources{CPUReq: "150m", CPULim: "150m", MemReq: "240Mi", MemLim: "240Mi"},
   652  					CPUPolicy: &noRestart,
   653  					MemPolicy: &noRestart,
   654  				},
   655  				{
   656  					Name:      "c3",
   657  					Resources: &ContainerResources{CPUReq: "340m", CPULim: "340m", MemReq: "250Mi", MemLim: "250Mi"},
   658  					CPUPolicy: &noRestart,
   659  					MemPolicy: &noRestart,
   660  				},
   661  			},
   662  		},
   663  		{
   664  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only",
   665  			containers: []TestContainerInfo{
   666  				{
   667  					Name:      "c1",
   668  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   669  				},
   670  			},
   671  			patchString: `{"spec":{"containers":[
   672  						{"name":"c1", "resources":{"requests":{"memory":"200Mi"}}}
   673  					]}}`,
   674  			expected: []TestContainerInfo{
   675  				{
   676  					Name:      "c1",
   677  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "500Mi"},
   678  				},
   679  			},
   680  		},
   681  		{
   682  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory limits only",
   683  			containers: []TestContainerInfo{
   684  				{
   685  					Name:      "c1",
   686  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   687  				},
   688  			},
   689  			patchString: `{"spec":{"containers":[
   690  						{"name":"c1", "resources":{"limits":{"memory":"400Mi"}}}
   691  					]}}`,
   692  			expected: []TestContainerInfo{
   693  				{
   694  					Name:      "c1",
   695  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "400Mi"},
   696  				},
   697  			},
   698  		},
   699  		{
   700  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests only",
   701  			containers: []TestContainerInfo{
   702  				{
   703  					Name:      "c1",
   704  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   705  				},
   706  			},
   707  			patchString: `{"spec":{"containers":[
   708  						{"name":"c1", "resources":{"requests":{"memory":"300Mi"}}}
   709  					]}}`,
   710  			expected: []TestContainerInfo{
   711  				{
   712  					Name:      "c1",
   713  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "300Mi", MemLim: "500Mi"},
   714  				},
   715  			},
   716  		},
   717  		{
   718  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory limits only",
   719  			containers: []TestContainerInfo{
   720  				{
   721  					Name:      "c1",
   722  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   723  				},
   724  			},
   725  			patchString: `{"spec":{"containers":[
   726  						{"name":"c1", "resources":{"limits":{"memory":"600Mi"}}}
   727  					]}}`,
   728  			expected: []TestContainerInfo{
   729  				{
   730  					Name:      "c1",
   731  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "600Mi"},
   732  				},
   733  			},
   734  		},
   735  		{
   736  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only",
   737  			containers: []TestContainerInfo{
   738  				{
   739  					Name:      "c1",
   740  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   741  				},
   742  			},
   743  			patchString: `{"spec":{"containers":[
   744  						{"name":"c1", "resources":{"requests":{"cpu":"100m"}}}
   745  					]}}`,
   746  			expected: []TestContainerInfo{
   747  				{
   748  					Name:      "c1",
   749  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   750  				},
   751  			},
   752  		},
   753  		{
   754  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only",
   755  			containers: []TestContainerInfo{
   756  				{
   757  					Name:      "c1",
   758  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   759  				},
   760  			},
   761  			patchString: `{"spec":{"containers":[
   762  						{"name":"c1", "resources":{"limits":{"cpu":"300m"}}}
   763  					]}}`,
   764  			expected: []TestContainerInfo{
   765  				{
   766  					Name:      "c1",
   767  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "250Mi", MemLim: "500Mi"},
   768  				},
   769  			},
   770  		},
   771  		{
   772  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests only",
   773  			containers: []TestContainerInfo{
   774  				{
   775  					Name:      "c1",
   776  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"},
   777  				},
   778  			},
   779  			patchString: `{"spec":{"containers":[
   780  						{"name":"c1", "resources":{"requests":{"cpu":"150m"}}}
   781  					]}}`,
   782  			expected: []TestContainerInfo{
   783  				{
   784  					Name:      "c1",
   785  					Resources: &ContainerResources{CPUReq: "150m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"},
   786  				},
   787  			},
   788  		},
   789  		{
   790  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU limits only",
   791  			containers: []TestContainerInfo{
   792  				{
   793  					Name:      "c1",
   794  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   795  				},
   796  			},
   797  			patchString: `{"spec":{"containers":[
   798  						{"name":"c1", "resources":{"limits":{"cpu":"500m"}}}
   799  					]}}`,
   800  			expected: []TestContainerInfo{
   801  				{
   802  					Name:      "c1",
   803  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "500m", MemReq: "250Mi", MemLim: "500Mi"},
   804  				},
   805  			},
   806  		},
   807  		{
   808  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and limits",
   809  			containers: []TestContainerInfo{
   810  				{
   811  					Name:      "c1",
   812  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   813  				},
   814  			},
   815  			patchString: `{"spec":{"containers":[
   816  						{"name":"c1", "resources":{"requests":{"cpu":"100m"},"limits":{"cpu":"200m"}}}
   817  					]}}`,
   818  			expected: []TestContainerInfo{
   819  				{
   820  					Name:      "c1",
   821  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"},
   822  				},
   823  			},
   824  		},
   825  		{
   826  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and limits",
   827  			containers: []TestContainerInfo{
   828  				{
   829  					Name:      "c1",
   830  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "250Mi", MemLim: "500Mi"},
   831  				},
   832  			},
   833  			patchString: `{"spec":{"containers":[
   834  						{"name":"c1", "resources":{"requests":{"cpu":"200m"},"limits":{"cpu":"400m"}}}
   835  					]}}`,
   836  			expected: []TestContainerInfo{
   837  				{
   838  					Name:      "c1",
   839  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   840  				},
   841  			},
   842  		},
   843  		{
   844  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase CPU limits",
   845  			containers: []TestContainerInfo{
   846  				{
   847  					Name:      "c1",
   848  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   849  				},
   850  			},
   851  			patchString: `{"spec":{"containers":[
   852  						{"name":"c1", "resources":{"requests":{"cpu":"100m"},"limits":{"cpu":"500m"}}}
   853  					]}}`,
   854  			expected: []TestContainerInfo{
   855  				{
   856  					Name:      "c1",
   857  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "500m", MemReq: "250Mi", MemLim: "500Mi"},
   858  				},
   859  			},
   860  		},
   861  		{
   862  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease CPU limits",
   863  			containers: []TestContainerInfo{
   864  				{
   865  					Name:      "c1",
   866  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "250Mi", MemLim: "500Mi"},
   867  				},
   868  			},
   869  			patchString: `{"spec":{"containers":[
   870  						{"name":"c1", "resources":{"requests":{"cpu":"200m"},"limits":{"cpu":"300m"}}}
   871  					]}}`,
   872  			expected: []TestContainerInfo{
   873  				{
   874  					Name:      "c1",
   875  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "250Mi", MemLim: "500Mi"},
   876  				},
   877  			},
   878  		},
   879  		{
   880  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and limits",
   881  			containers: []TestContainerInfo{
   882  				{
   883  					Name:      "c1",
   884  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
   885  				},
   886  			},
   887  			patchString: `{"spec":{"containers":[
   888  						{"name":"c1", "resources":{"requests":{"memory":"100Mi"},"limits":{"memory":"300Mi"}}}
   889  					]}}`,
   890  			expected: []TestContainerInfo{
   891  				{
   892  					Name:      "c1",
   893  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "300Mi"},
   894  				},
   895  			},
   896  		},
   897  		{
   898  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and limits",
   899  			containers: []TestContainerInfo{
   900  				{
   901  					Name:      "c1",
   902  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
   903  				},
   904  			},
   905  			patchString: `{"spec":{"containers":[
   906  						{"name":"c1", "resources":{"requests":{"memory":"300Mi"},"limits":{"memory":"500Mi"}}}
   907  					]}}`,
   908  			expected: []TestContainerInfo{
   909  				{
   910  					Name:      "c1",
   911  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "300Mi", MemLim: "500Mi"},
   912  				},
   913  			},
   914  		},
   915  		{
   916  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase memory limits",
   917  			containers: []TestContainerInfo{
   918  				{
   919  					Name:      "c1",
   920  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
   921  				},
   922  			},
   923  			patchString: `{"spec":{"containers":[
   924  						{"name":"c1", "resources":{"requests":{"memory":"100Mi"},"limits":{"memory":"500Mi"}}}
   925  					]}}`,
   926  			expected: []TestContainerInfo{
   927  				{
   928  					Name:      "c1",
   929  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "500Mi"},
   930  				},
   931  			},
   932  		},
   933  		{
   934  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease memory limits",
   935  			containers: []TestContainerInfo{
   936  				{
   937  					Name:      "c1",
   938  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
   939  				},
   940  			},
   941  			patchString: `{"spec":{"containers":[
   942  						{"name":"c1", "resources":{"requests":{"memory":"300Mi"},"limits":{"memory":"300Mi"}}}
   943  					]}}`,
   944  			expected: []TestContainerInfo{
   945  				{
   946  					Name:      "c1",
   947  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "300Mi", MemLim: "300Mi"},
   948  				},
   949  			},
   950  		},
   951  		{
   952  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests and increase memory limits",
   953  			containers: []TestContainerInfo{
   954  				{
   955  					Name:      "c1",
   956  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "400Mi"},
   957  				},
   958  			},
   959  			patchString: `{"spec":{"containers":[
   960  						{"name":"c1", "resources":{"requests":{"cpu":"100m"},"limits":{"memory":"500Mi"}}}
   961  					]}}`,
   962  			expected: []TestContainerInfo{
   963  				{
   964  					Name:      "c1",
   965  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "200Mi", MemLim: "500Mi"},
   966  				},
   967  			},
   968  		},
   969  		{
   970  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase CPU requests and decrease memory limits",
   971  			containers: []TestContainerInfo{
   972  				{
   973  					Name:      "c1",
   974  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "400m", MemReq: "200Mi", MemLim: "500Mi"},
   975  				},
   976  			},
   977  			patchString: `{"spec":{"containers":[
   978  						{"name":"c1", "resources":{"requests":{"cpu":"200m"},"limits":{"memory":"400Mi"}}}
   979  					]}}`,
   980  			expected: []TestContainerInfo{
   981  				{
   982  					Name:      "c1",
   983  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "400Mi"},
   984  				},
   985  			},
   986  		},
   987  		{
   988  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests and increase CPU limits",
   989  			containers: []TestContainerInfo{
   990  				{
   991  					Name:      "c1",
   992  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
   993  				},
   994  			},
   995  			patchString: `{"spec":{"containers":[
   996  						{"name":"c1", "resources":{"requests":{"memory":"100Mi"},"limits":{"cpu":"300m"}}}
   997  					]}}`,
   998  			expected: []TestContainerInfo{
   999  				{
  1000  					Name:      "c1",
  1001  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "300m", MemReq: "100Mi", MemLim: "400Mi"},
  1002  				},
  1003  			},
  1004  		},
  1005  		{
  1006  			name: "Burstable QoS pod, one container with cpu & memory requests + limits - increase memory requests and decrease CPU limits",
  1007  			containers: []TestContainerInfo{
  1008  				{
  1009  					Name:      "c1",
  1010  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "400m", MemReq: "200Mi", MemLim: "400Mi"},
  1011  				},
  1012  			},
  1013  			patchString: `{"spec":{"containers":[
  1014  						{"name":"c1", "resources":{"requests":{"memory":"300Mi"},"limits":{"cpu":"300m"}}}
  1015  					]}}`,
  1016  			expected: []TestContainerInfo{
  1017  				{
  1018  					Name:      "c1",
  1019  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "300Mi", MemLim: "400Mi"},
  1020  				},
  1021  			},
  1022  		},
  1023  		{
  1024  			name: "Burstable QoS pod, one container with cpu & memory requests - decrease memory request",
  1025  			containers: []TestContainerInfo{
  1026  				{
  1027  					Name:      "c1",
  1028  					Resources: &ContainerResources{CPUReq: "200m", MemReq: "500Mi"},
  1029  				},
  1030  			},
  1031  			patchString: `{"spec":{"containers":[
  1032  						{"name":"c1", "resources":{"requests":{"memory":"400Mi"}}}
  1033  					]}}`,
  1034  			expected: []TestContainerInfo{
  1035  				{
  1036  					Name:      "c1",
  1037  					Resources: &ContainerResources{CPUReq: "200m", MemReq: "400Mi"},
  1038  				},
  1039  			},
  1040  		},
  1041  		{
  1042  			name: "Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)",
  1043  			containers: []TestContainerInfo{
  1044  				{
  1045  					Name:      "c1",
  1046  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "100m", MemReq: "200Mi", MemLim: "200Mi"},
  1047  					CPUPolicy: &noRestart,
  1048  					MemPolicy: &doRestart,
  1049  				},
  1050  			},
  1051  			patchString: `{"spec":{"containers":[
  1052  						{"name":"c1", "resources":{"requests":{"cpu":"200m","memory":"400Mi"},"limits":{"cpu":"200m","memory":"400Mi"}}}
  1053  					]}}`,
  1054  			expected: []TestContainerInfo{
  1055  				{
  1056  					Name:         "c1",
  1057  					Resources:    &ContainerResources{CPUReq: "200m", CPULim: "200m", MemReq: "400Mi", MemLim: "400Mi"},
  1058  					CPUPolicy:    &noRestart,
  1059  					MemPolicy:    &doRestart,
  1060  					RestartCount: 1,
  1061  				},
  1062  			},
  1063  		},
  1064  		{
  1065  			name: "Burstable QoS pod, one container - decrease CPU (RestartContainer) & memory (NotRequired)",
  1066  			containers: []TestContainerInfo{
  1067  				{
  1068  					Name:      "c1",
  1069  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "200Mi", MemLim: "400Mi"},
  1070  					CPUPolicy: &doRestart,
  1071  					MemPolicy: &noRestart,
  1072  				},
  1073  			},
  1074  			patchString: `{"spec":{"containers":[
  1075  						{"name":"c1", "resources":{"requests":{"cpu":"50m","memory":"100Mi"},"limits":{"cpu":"100m","memory":"200Mi"}}}
  1076  					]}}`,
  1077  			expected: []TestContainerInfo{
  1078  				{
  1079  					Name:         "c1",
  1080  					Resources:    &ContainerResources{CPUReq: "50m", CPULim: "100m", MemReq: "100Mi", MemLim: "200Mi"},
  1081  					CPUPolicy:    &doRestart,
  1082  					MemPolicy:    &noRestart,
  1083  					RestartCount: 1,
  1084  				},
  1085  			},
  1086  		},
  1087  		{
  1088  			name: "Burstable QoS pod, three containers - increase c1 resources, no change for c2, decrease c3 resources (no net change for pod)",
  1089  			containers: []TestContainerInfo{
  1090  				{
  1091  					Name:      "c1",
  1092  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
  1093  					CPUPolicy: &noRestart,
  1094  					MemPolicy: &noRestart,
  1095  				},
  1096  				{
  1097  					Name:      "c2",
  1098  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"},
  1099  					CPUPolicy: &noRestart,
  1100  					MemPolicy: &doRestart,
  1101  				},
  1102  				{
  1103  					Name:      "c3",
  1104  					Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"},
  1105  					CPUPolicy: &noRestart,
  1106  					MemPolicy: &noRestart,
  1107  				},
  1108  			},
  1109  			patchString: `{"spec":{"containers":[
  1110  						{"name":"c1", "resources":{"requests":{"cpu":"150m","memory":"150Mi"},"limits":{"cpu":"250m","memory":"250Mi"}}},
  1111  						{"name":"c3", "resources":{"requests":{"cpu":"250m","memory":"250Mi"},"limits":{"cpu":"350m","memory":"350Mi"}}}
  1112  					]}}`,
  1113  			expected: []TestContainerInfo{
  1114  				{
  1115  					Name:      "c1",
  1116  					Resources: &ContainerResources{CPUReq: "150m", CPULim: "250m", MemReq: "150Mi", MemLim: "250Mi"},
  1117  					CPUPolicy: &noRestart,
  1118  					MemPolicy: &noRestart,
  1119  				},
  1120  				{
  1121  					Name:      "c2",
  1122  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"},
  1123  					CPUPolicy: &noRestart,
  1124  					MemPolicy: &doRestart,
  1125  				},
  1126  				{
  1127  					Name:      "c3",
  1128  					Resources: &ContainerResources{CPUReq: "250m", CPULim: "350m", MemReq: "250Mi", MemLim: "350Mi"},
  1129  					CPUPolicy: &noRestart,
  1130  					MemPolicy: &noRestart,
  1131  				},
  1132  			},
  1133  		},
  1134  		{
  1135  			name: "Burstable QoS pod, three containers - decrease c1 resources, increase c2 resources, no change for c3 (net increase for pod)",
  1136  			containers: []TestContainerInfo{
  1137  				{
  1138  					Name:      "c1",
  1139  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
  1140  					CPUPolicy: &noRestart,
  1141  					MemPolicy: &noRestart,
  1142  				},
  1143  				{
  1144  					Name:      "c2",
  1145  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"},
  1146  					CPUPolicy: &noRestart,
  1147  					MemPolicy: &doRestart,
  1148  				},
  1149  				{
  1150  					Name:      "c3",
  1151  					Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"},
  1152  					CPUPolicy: &noRestart,
  1153  					MemPolicy: &noRestart,
  1154  				},
  1155  			},
  1156  			patchString: `{"spec":{"containers":[
  1157  						{"name":"c1", "resources":{"requests":{"cpu":"50m","memory":"50Mi"},"limits":{"cpu":"150m","memory":"150Mi"}}},
  1158  						{"name":"c2", "resources":{"requests":{"cpu":"350m","memory":"350Mi"},"limits":{"cpu":"450m","memory":"450Mi"}}}
  1159  					]}}`,
  1160  			expected: []TestContainerInfo{
  1161  				{
  1162  					Name:      "c1",
  1163  					Resources: &ContainerResources{CPUReq: "50m", CPULim: "150m", MemReq: "50Mi", MemLim: "150Mi"},
  1164  					CPUPolicy: &noRestart,
  1165  					MemPolicy: &noRestart,
  1166  				},
  1167  				{
  1168  					Name:         "c2",
  1169  					Resources:    &ContainerResources{CPUReq: "350m", CPULim: "450m", MemReq: "350Mi", MemLim: "450Mi"},
  1170  					CPUPolicy:    &noRestart,
  1171  					MemPolicy:    &doRestart,
  1172  					RestartCount: 1,
  1173  				},
  1174  				{
  1175  					Name:      "c3",
  1176  					Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"},
  1177  					CPUPolicy: &noRestart,
  1178  					MemPolicy: &noRestart,
  1179  				},
  1180  			},
  1181  		},
  1182  		{
  1183  			name: "Burstable QoS pod, three containers - no change for c1, increase c2 resources, decrease c3 (net decrease for pod)",
  1184  			containers: []TestContainerInfo{
  1185  				{
  1186  					Name:      "c1",
  1187  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
  1188  					CPUPolicy: &doRestart,
  1189  					MemPolicy: &doRestart,
  1190  				},
  1191  				{
  1192  					Name:      "c2",
  1193  					Resources: &ContainerResources{CPUReq: "200m", CPULim: "300m", MemReq: "200Mi", MemLim: "300Mi"},
  1194  					CPUPolicy: &doRestart,
  1195  					MemPolicy: &noRestart,
  1196  				},
  1197  				{
  1198  					Name:      "c3",
  1199  					Resources: &ContainerResources{CPUReq: "300m", CPULim: "400m", MemReq: "300Mi", MemLim: "400Mi"},
  1200  					CPUPolicy: &noRestart,
  1201  					MemPolicy: &doRestart,
  1202  				},
  1203  			},
  1204  			patchString: `{"spec":{"containers":[
  1205  						{"name":"c2", "resources":{"requests":{"cpu":"250m","memory":"250Mi"},"limits":{"cpu":"350m","memory":"350Mi"}}},
  1206  						{"name":"c3", "resources":{"requests":{"cpu":"100m","memory":"100Mi"},"limits":{"cpu":"200m","memory":"200Mi"}}}
  1207  					]}}`,
  1208  			expected: []TestContainerInfo{
  1209  				{
  1210  					Name:      "c1",
  1211  					Resources: &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
  1212  					CPUPolicy: &doRestart,
  1213  					MemPolicy: &doRestart,
  1214  				},
  1215  				{
  1216  					Name:         "c2",
  1217  					Resources:    &ContainerResources{CPUReq: "250m", CPULim: "350m", MemReq: "250Mi", MemLim: "350Mi"},
  1218  					CPUPolicy:    &noRestart,
  1219  					MemPolicy:    &noRestart,
  1220  					RestartCount: 1,
  1221  				},
  1222  				{
  1223  					Name:         "c3",
  1224  					Resources:    &ContainerResources{CPUReq: "100m", CPULim: "200m", MemReq: "100Mi", MemLim: "200Mi"},
  1225  					CPUPolicy:    &doRestart,
  1226  					MemPolicy:    &doRestart,
  1227  					RestartCount: 1,
  1228  				},
  1229  			},
  1230  		},
  1231  	}
  1232  
  1233  	for idx := range tests {
  1234  		tc := tests[idx]
  1235  		ginkgo.It(tc.name, func(ctx context.Context) {
  1236  			var testPod, patchedPod *v1.Pod
  1237  			var pErr error
  1238  
  1239  			tStamp := strconv.Itoa(time.Now().Nanosecond())
  1240  			initDefaultResizePolicy(tc.containers)
  1241  			initDefaultResizePolicy(tc.expected)
  1242  			testPod = makeTestPod(f.Namespace.Name, "testpod", tStamp, tc.containers)
  1243  
  1244  			ginkgo.By("creating pod")
  1245  			newPod := podClient.CreateSync(ctx, testPod)
  1246  
  1247  			ginkgo.By("verifying the pod is in kubernetes")
  1248  			selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": tStamp}))
  1249  			options := metav1.ListOptions{LabelSelector: selector.String()}
  1250  			podList, err := podClient.List(context.TODO(), options)
  1251  			framework.ExpectNoError(err, "failed to query for pods")
  1252  			gomega.Expect(podList.Items).Should(gomega.HaveLen(1))
  1253  
  1254  			ginkgo.By("verifying initial pod resources, allocations, and policy are as expected")
  1255  			verifyPodResources(newPod, tc.containers)
  1256  			verifyPodResizePolicy(newPod, tc.containers)
  1257  
  1258  			ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
  1259  			verifyPodStatusResources(newPod, tc.containers)
  1260  			// Check cgroup values only for containerd versions before 1.6.9
  1261  			if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
  1262  				if !framework.NodeOSDistroIs("windows") {
  1263  					verifyPodContainersCgroupValues(newPod, tc.containers, true)
  1264  				}
  1265  			}
  1266  
  1267  			ginkgo.By("patching pod for resize")
  1268  			patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
  1269  				types.StrategicMergePatchType, []byte(tc.patchString), metav1.PatchOptions{})
  1270  			framework.ExpectNoError(pErr, "failed to patch pod for resize")
  1271  
  1272  			ginkgo.By("verifying pod patched for resize")
  1273  			verifyPodResources(patchedPod, tc.expected)
  1274  			verifyPodAllocations(patchedPod, tc.containers, true)
  1275  
  1276  			ginkgo.By("waiting for resize to be actuated")
  1277  			resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod, patchedPod, tc.expected)
  1278  
  1279  			// Check cgroup values only for containerd versions before 1.6.9
  1280  			if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
  1281  				ginkgo.By("verifying pod container's cgroup values after resize")
  1282  				if !framework.NodeOSDistroIs("windows") {
  1283  					verifyPodContainersCgroupValues(resizedPod, tc.expected, true)
  1284  				}
  1285  			}
  1286  
  1287  			ginkgo.By("verifying pod resources after resize")
  1288  			verifyPodResources(resizedPod, tc.expected)
  1289  
  1290  			ginkgo.By("verifying pod allocations after resize")
  1291  			verifyPodAllocations(resizedPod, tc.expected, true)
  1292  
  1293  			ginkgo.By("deleting pod")
  1294  			err = e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod)
  1295  			framework.ExpectNoError(err, "failed to delete pod")
  1296  		})
  1297  	}
  1298  }
  1299  
  1300  func doPodResizeResourceQuotaTests() {
  1301  	f := framework.NewDefaultFramework("pod-resize-resource-quota")
  1302  	var podClient *e2epod.PodClient
  1303  	ginkgo.BeforeEach(func() {
  1304  		podClient = e2epod.NewPodClient(f)
  1305  	})
  1306  
  1307  	ginkgo.It("pod-resize-resource-quota-test", func(ctx context.Context) {
  1308  		resourceQuota := v1.ResourceQuota{
  1309  			ObjectMeta: metav1.ObjectMeta{
  1310  				Name:      "resize-resource-quota",
  1311  				Namespace: f.Namespace.Name,
  1312  			},
  1313  			Spec: v1.ResourceQuotaSpec{
  1314  				Hard: v1.ResourceList{
  1315  					v1.ResourceCPU:    resource.MustParse("800m"),
  1316  					v1.ResourceMemory: resource.MustParse("800Mi"),
  1317  				},
  1318  			},
  1319  		}
  1320  		containers := []TestContainerInfo{
  1321  			{
  1322  				Name:      "c1",
  1323  				Resources: &ContainerResources{CPUReq: "300m", CPULim: "300m", MemReq: "300Mi", MemLim: "300Mi"},
  1324  			},
  1325  		}
  1326  		patchString := `{"spec":{"containers":[
  1327  			{"name":"c1", "resources":{"requests":{"cpu":"400m","memory":"400Mi"},"limits":{"cpu":"400m","memory":"400Mi"}}}
  1328  		]}}`
  1329  		expected := []TestContainerInfo{
  1330  			{
  1331  				Name:      "c1",
  1332  				Resources: &ContainerResources{CPUReq: "400m", CPULim: "400m", MemReq: "400Mi", MemLim: "400Mi"},
  1333  			},
  1334  		}
  1335  		patchStringExceedCPU := `{"spec":{"containers":[
  1336  			{"name":"c1", "resources":{"requests":{"cpu":"600m","memory":"200Mi"},"limits":{"cpu":"600m","memory":"200Mi"}}}
  1337  		]}}`
  1338  		patchStringExceedMemory := `{"spec":{"containers":[
  1339  			{"name":"c1", "resources":{"requests":{"cpu":"250m","memory":"750Mi"},"limits":{"cpu":"250m","memory":"750Mi"}}}
  1340  		]}}`
  1341  
  1342  		ginkgo.By("Creating a ResourceQuota")
  1343  		_, rqErr := f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Create(context.TODO(), &resourceQuota, metav1.CreateOptions{})
  1344  		framework.ExpectNoError(rqErr, "failed to create resource quota")
  1345  
  1346  		tStamp := strconv.Itoa(time.Now().Nanosecond())
  1347  		initDefaultResizePolicy(containers)
  1348  		initDefaultResizePolicy(expected)
  1349  		testPod1 := makeTestPod(f.Namespace.Name, "testpod1", tStamp, containers)
  1350  		testPod2 := makeTestPod(f.Namespace.Name, "testpod2", tStamp, containers)
  1351  
  1352  		ginkgo.By("creating pods")
  1353  		newPod1 := podClient.CreateSync(ctx, testPod1)
  1354  		newPod2 := podClient.CreateSync(ctx, testPod2)
  1355  
  1356  		ginkgo.By("verifying the pod is in kubernetes")
  1357  		selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": tStamp}))
  1358  		options := metav1.ListOptions{LabelSelector: selector.String()}
  1359  		podList, listErr := podClient.List(context.TODO(), options)
  1360  		framework.ExpectNoError(listErr, "failed to query for pods")
  1361  		gomega.Expect(podList.Items).Should(gomega.HaveLen(2))
  1362  
  1363  		ginkgo.By("verifying initial pod resources, allocations, and policy are as expected")
  1364  		verifyPodResources(newPod1, containers)
  1365  
  1366  		ginkgo.By("patching pod for resize within resource quota")
  1367  		patchedPod, pErr := f.ClientSet.CoreV1().Pods(newPod1.Namespace).Patch(context.TODO(), newPod1.Name,
  1368  			types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{})
  1369  		framework.ExpectNoError(pErr, "failed to patch pod for resize")
  1370  
  1371  		ginkgo.By("verifying pod patched for resize within resource quota")
  1372  		verifyPodResources(patchedPod, expected)
  1373  		verifyPodAllocations(patchedPod, containers, true)
  1374  
  1375  		ginkgo.By("waiting for resize to be actuated")
  1376  		resizedPod := waitForPodResizeActuation(f.ClientSet, podClient, newPod1, patchedPod, expected)
  1377  		if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod1.Spec.NodeName) {
  1378  			ginkgo.By("verifying pod container's cgroup values after resize")
  1379  			if !framework.NodeOSDistroIs("windows") {
  1380  				verifyPodContainersCgroupValues(resizedPod, expected, true)
  1381  			}
  1382  		}
  1383  
  1384  		ginkgo.By("verifying pod resources after resize")
  1385  		verifyPodResources(resizedPod, expected)
  1386  
  1387  		ginkgo.By("verifying pod allocations after resize")
  1388  		verifyPodAllocations(resizedPod, expected, true)
  1389  
  1390  		ginkgo.By("patching pod for resize with memory exceeding resource quota")
  1391  		_, pErrExceedMemory := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(context.TODO(),
  1392  			resizedPod.Name, types.StrategicMergePatchType, []byte(patchStringExceedMemory), metav1.PatchOptions{})
  1393  		gomega.Expect(pErrExceedMemory).To(gomega.HaveOccurred(), "exceeded quota: %s, requested: memory=350Mi, used: memory=700Mi, limited: memory=800Mi",
  1394  			resourceQuota.Name)
  1395  
  1396  		ginkgo.By("verifying pod patched for resize exceeding memory resource quota remains unchanged")
  1397  		patchedPodExceedMemory, pErrEx2 := podClient.Get(context.TODO(), resizedPod.Name, metav1.GetOptions{})
  1398  		framework.ExpectNoError(pErrEx2, "failed to get pod post exceed memory resize")
  1399  		verifyPodResources(patchedPodExceedMemory, expected)
  1400  		verifyPodAllocations(patchedPodExceedMemory, expected, true)
  1401  
  1402  		ginkgo.By(fmt.Sprintf("patching pod %s for resize with CPU exceeding resource quota", resizedPod.Name))
  1403  		_, pErrExceedCPU := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(context.TODO(),
  1404  			resizedPod.Name, types.StrategicMergePatchType, []byte(patchStringExceedCPU), metav1.PatchOptions{})
  1405  		gomega.Expect(pErrExceedCPU).To(gomega.HaveOccurred(), "exceeded quota: %s, requested: cpu=200m, used: cpu=700m, limited: cpu=800m",
  1406  			resourceQuota.Name)
  1407  
  1408  		ginkgo.By("verifying pod patched for resize exceeding CPU resource quota remains unchanged")
  1409  		patchedPodExceedCPU, pErrEx1 := podClient.Get(context.TODO(), resizedPod.Name, metav1.GetOptions{})
  1410  		framework.ExpectNoError(pErrEx1, "failed to get pod post exceed CPU resize")
  1411  		verifyPodResources(patchedPodExceedCPU, expected)
  1412  		verifyPodAllocations(patchedPodExceedCPU, expected, true)
  1413  
  1414  		ginkgo.By("deleting pods")
  1415  		delErr1 := e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod1)
  1416  		framework.ExpectNoError(delErr1, "failed to delete pod %s", newPod1.Name)
  1417  		delErr2 := e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod2)
  1418  		framework.ExpectNoError(delErr2, "failed to delete pod %s", newPod2.Name)
  1419  	})
  1420  }
  1421  
  1422  func doPodResizeErrorTests() {
  1423  	f := framework.NewDefaultFramework("pod-resize-errors")
  1424  	var podClient *e2epod.PodClient
  1425  	ginkgo.BeforeEach(func() {
  1426  		podClient = e2epod.NewPodClient(f)
  1427  	})
  1428  
  1429  	type testCase struct {
  1430  		name        string
  1431  		containers  []TestContainerInfo
  1432  		patchString string
  1433  		patchError  string
  1434  		expected    []TestContainerInfo
  1435  	}
  1436  
  1437  	tests := []testCase{
  1438  		{
  1439  			name: "BestEffort pod - try requesting memory, expect error",
  1440  			containers: []TestContainerInfo{
  1441  				{
  1442  					Name: "c1",
  1443  				},
  1444  			},
  1445  			patchString: `{"spec":{"containers":[
  1446  						{"name":"c1", "resources":{"requests":{"memory":"400Mi"}}}
  1447  					]}}`,
  1448  			patchError: "Pod QoS is immutable",
  1449  			expected: []TestContainerInfo{
  1450  				{
  1451  					Name: "c1",
  1452  				},
  1453  			},
  1454  		},
  1455  	}
  1456  
  1457  	for idx := range tests {
  1458  		tc := tests[idx]
  1459  		ginkgo.It(tc.name, func(ctx context.Context) {
  1460  			var testPod, patchedPod *v1.Pod
  1461  			var pErr error
  1462  
  1463  			tStamp := strconv.Itoa(time.Now().Nanosecond())
  1464  			initDefaultResizePolicy(tc.containers)
  1465  			initDefaultResizePolicy(tc.expected)
  1466  			testPod = makeTestPod(f.Namespace.Name, "testpod", tStamp, tc.containers)
  1467  
  1468  			ginkgo.By("creating pod")
  1469  			newPod := podClient.CreateSync(ctx, testPod)
  1470  
  1471  			ginkgo.By("verifying the pod is in kubernetes")
  1472  			selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": tStamp}))
  1473  			options := metav1.ListOptions{LabelSelector: selector.String()}
  1474  			podList, err := podClient.List(context.TODO(), options)
  1475  			framework.ExpectNoError(err, "failed to query for pods")
  1476  			gomega.Expect(podList.Items).Should(gomega.HaveLen(1))
  1477  
  1478  			ginkgo.By("verifying initial pod resources, allocations, and policy are as expected")
  1479  			verifyPodResources(newPod, tc.containers)
  1480  			verifyPodResizePolicy(newPod, tc.containers)
  1481  
  1482  			ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
  1483  			verifyPodStatusResources(newPod, tc.containers)
  1484  			if !isInPlaceResizeSupportedByRuntime(f.ClientSet, newPod.Spec.NodeName) {
  1485  				if !framework.NodeOSDistroIs("windows") {
  1486  					verifyPodContainersCgroupValues(newPod, tc.containers, true)
  1487  				}
  1488  			}
  1489  
  1490  			ginkgo.By("patching pod for resize")
  1491  			patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
  1492  				types.StrategicMergePatchType, []byte(tc.patchString), metav1.PatchOptions{})
  1493  			if tc.patchError == "" {
  1494  				framework.ExpectNoError(pErr, "failed to patch pod for resize")
  1495  			} else {
  1496  				gomega.Expect(pErr).To(gomega.HaveOccurred(), tc.patchError)
  1497  				patchedPod = newPod
  1498  			}
  1499  
  1500  			if !isInPlaceResizeSupportedByRuntime(f.ClientSet, patchedPod.Spec.NodeName) {
  1501  				ginkgo.By("verifying pod container's cgroup values after patch")
  1502  				if !framework.NodeOSDistroIs("windows") {
  1503  					verifyPodContainersCgroupValues(patchedPod, tc.expected, true)
  1504  				}
  1505  			}
  1506  
  1507  			ginkgo.By("verifying pod resources after patch")
  1508  			verifyPodResources(patchedPod, tc.expected)
  1509  
  1510  			ginkgo.By("verifying pod allocations after patch")
  1511  			verifyPodAllocations(patchedPod, tc.expected, true)
  1512  
  1513  			ginkgo.By("deleting pod")
  1514  			err = e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod)
  1515  			framework.ExpectNoError(err, "failed to delete pod")
  1516  		})
  1517  	}
  1518  }
  1519  
  1520  func doPodResizeSchedulerTests() {
  1521  	f := framework.NewDefaultFramework("pod-resize-scheduler")
  1522  	var podClient *e2epod.PodClient
  1523  	ginkgo.BeforeEach(func() {
  1524  		podClient = e2epod.NewPodClient(f)
  1525  	})
  1526  
  1527  	ginkgo.It("pod-resize-scheduler-tests", func(ctx context.Context) {
  1528  		nodes, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
  1529  		framework.ExpectNoError(err, "failed to get running nodes")
  1530  		gomega.Expect(nodes.Items).ShouldNot(gomega.BeEmpty())
  1531  		framework.Logf("Found %d schedulable nodes", len(nodes.Items))
  1532  
  1533  		//
  1534  		// Calculate available CPU. nodeAvailableCPU = nodeAllocatableCPU - sum(podAllocatedCPU)
  1535  		//
  1536  		getNodeAllocatableAndAvailableMilliCPUValues := func(n *v1.Node) (int64, int64) {
  1537  			nodeAllocatableMilliCPU := n.Status.Allocatable.Cpu().MilliValue()
  1538  			gomega.Expect(n.Status.Allocatable).ShouldNot(gomega.BeNil(), "allocatable")
  1539  			podAllocatedMilliCPU := int64(0)
  1540  
  1541  			// Exclude pods that are in the Succeeded or Failed states
  1542  			selector := fmt.Sprintf("spec.nodeName=%s,status.phase!=%v,status.phase!=%v", n.Name, v1.PodSucceeded, v1.PodFailed)
  1543  			listOptions := metav1.ListOptions{FieldSelector: selector}
  1544  			podList, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), listOptions)
  1545  
  1546  			framework.ExpectNoError(err, "failed to get running pods")
  1547  			framework.Logf("Found %d pods on node '%s'", len(podList.Items), n.Name)
  1548  			for _, pod := range podList.Items {
  1549  				podRequestMilliCPU := resourceapi.GetResourceRequest(&pod, v1.ResourceCPU)
  1550  				podAllocatedMilliCPU += podRequestMilliCPU
  1551  			}
  1552  			nodeAvailableMilliCPU := nodeAllocatableMilliCPU - podAllocatedMilliCPU
  1553  			return nodeAllocatableMilliCPU, nodeAvailableMilliCPU
  1554  		}
  1555  
  1556  		ginkgo.By("Find node CPU resources available for allocation!")
  1557  		node := nodes.Items[0]
  1558  		nodeAllocatableMilliCPU, nodeAvailableMilliCPU := getNodeAllocatableAndAvailableMilliCPUValues(&node)
  1559  		framework.Logf("Node '%s': NodeAllocatable MilliCPUs = %dm. MilliCPUs currently available to allocate = %dm.",
  1560  			node.Name, nodeAllocatableMilliCPU, nodeAvailableMilliCPU)
  1561  
  1562  		//
  1563  		// Scheduler focussed pod resize E2E test case #1:
  1564  		//     1. Create pod1 and pod2 on node such that pod1 has enough CPU to be scheduled, but pod2 does not.
  1565  		//     2. Resize pod2 down so that it fits on the node and can be scheduled.
  1566  		//     3. Verify that pod2 gets scheduled and comes up and running.
  1567  		//
  1568  		testPod1CPUQuantity := resource.NewMilliQuantity(nodeAvailableMilliCPU/2, resource.DecimalSI)
  1569  		testPod2CPUQuantity := resource.NewMilliQuantity(nodeAvailableMilliCPU, resource.DecimalSI)
  1570  		testPod2CPUQuantityResized := resource.NewMilliQuantity(testPod1CPUQuantity.MilliValue()/2, resource.DecimalSI)
  1571  		framework.Logf("TEST1: testPod1 initial CPU request is '%dm'", testPod1CPUQuantity.MilliValue())
  1572  		framework.Logf("TEST1: testPod2 initial CPU request is '%dm'", testPod2CPUQuantity.MilliValue())
  1573  		framework.Logf("TEST1: testPod2 resized CPU request is '%dm'", testPod2CPUQuantityResized.MilliValue())
  1574  
  1575  		c1 := []TestContainerInfo{
  1576  			{
  1577  				Name:      "c1",
  1578  				Resources: &ContainerResources{CPUReq: testPod1CPUQuantity.String(), CPULim: testPod1CPUQuantity.String()},
  1579  			},
  1580  		}
  1581  		c2 := []TestContainerInfo{
  1582  			{
  1583  				Name:      "c2",
  1584  				Resources: &ContainerResources{CPUReq: testPod2CPUQuantity.String(), CPULim: testPod2CPUQuantity.String()},
  1585  			},
  1586  		}
  1587  		patchTestpod2ToFitNode := fmt.Sprintf(`{
  1588  				"spec": {
  1589  					"containers": [
  1590  						{
  1591  							"name":      "c2",
  1592  							"resources": {"requests": {"cpu": "%dm"}, "limits": {"cpu": "%dm"}}
  1593  						}
  1594  					]
  1595  				}
  1596  			}`, testPod2CPUQuantityResized.MilliValue(), testPod2CPUQuantityResized.MilliValue())
  1597  
  1598  		tStamp := strconv.Itoa(time.Now().Nanosecond())
  1599  		initDefaultResizePolicy(c1)
  1600  		initDefaultResizePolicy(c2)
  1601  		testPod1 := makeTestPod(f.Namespace.Name, "testpod1", tStamp, c1)
  1602  		testPod2 := makeTestPod(f.Namespace.Name, "testpod2", tStamp, c2)
  1603  		e2epod.SetNodeAffinity(&testPod1.Spec, node.Name)
  1604  		e2epod.SetNodeAffinity(&testPod2.Spec, node.Name)
  1605  
  1606  		ginkgo.By(fmt.Sprintf("TEST1: Create pod '%s' that fits the node '%s'", testPod1.Name, node.Name))
  1607  		testPod1 = podClient.CreateSync(ctx, testPod1)
  1608  		gomega.Expect(testPod1.Status.Phase).To(gomega.Equal(v1.PodRunning))
  1609  
  1610  		ginkgo.By(fmt.Sprintf("TEST1: Create pod '%s' that won't fit node '%s' with pod '%s' on it", testPod2.Name, node.Name, testPod1.Name))
  1611  		testPod2 = podClient.Create(ctx, testPod2)
  1612  		err = e2epod.WaitForPodNameUnschedulableInNamespace(ctx, f.ClientSet, testPod2.Name, testPod2.Namespace)
  1613  		framework.ExpectNoError(err)
  1614  		gomega.Expect(testPod2.Status.Phase).To(gomega.Equal(v1.PodPending))
  1615  
  1616  		ginkgo.By(fmt.Sprintf("TEST1: Resize pod '%s' to fit in node '%s'", testPod2.Name, node.Name))
  1617  		testPod2, pErr := f.ClientSet.CoreV1().Pods(testPod2.Namespace).Patch(ctx,
  1618  			testPod2.Name, types.StrategicMergePatchType, []byte(patchTestpod2ToFitNode), metav1.PatchOptions{})
  1619  		framework.ExpectNoError(pErr, "failed to patch pod for resize")
  1620  
  1621  		ginkgo.By(fmt.Sprintf("TEST1: Verify that pod '%s' is running after resize", testPod2.Name))
  1622  		framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, testPod2))
  1623  
  1624  		//
  1625  		// Scheduler focussed pod resize E2E test case #2
  1626  		//     1. With pod1 + pod2 running on node above, create pod3 that requests more CPU than available, verify pending.
  1627  		//     2. Resize pod1 down so that pod3 gets room to be scheduled.
  1628  		//     3. Verify that pod3 is scheduled and running.
  1629  		//
  1630  		nodeAllocatableMilliCPU2, nodeAvailableMilliCPU2 := getNodeAllocatableAndAvailableMilliCPUValues(&node)
  1631  		framework.Logf("TEST2: Node '%s': NodeAllocatable MilliCPUs = %dm. MilliCPUs currently available to allocate = %dm.",
  1632  			node.Name, nodeAllocatableMilliCPU2, nodeAvailableMilliCPU2)
  1633  		testPod3CPUQuantity := resource.NewMilliQuantity(nodeAvailableMilliCPU2+testPod1CPUQuantity.MilliValue()/2, resource.DecimalSI)
  1634  		testPod1CPUQuantityResized := resource.NewMilliQuantity(testPod1CPUQuantity.MilliValue()/3, resource.DecimalSI)
  1635  		framework.Logf("TEST2: testPod1 MilliCPUs after resize '%dm'", testPod1CPUQuantityResized.MilliValue())
  1636  
  1637  		c3 := []TestContainerInfo{
  1638  			{
  1639  				Name:      "c3",
  1640  				Resources: &ContainerResources{CPUReq: testPod3CPUQuantity.String(), CPULim: testPod3CPUQuantity.String()},
  1641  			},
  1642  		}
  1643  		patchTestpod1ToMakeSpaceForPod3 := fmt.Sprintf(`{
  1644  				"spec": {
  1645  					"containers": [
  1646  						{
  1647  							"name":      "c1",
  1648  							"resources": {"requests": {"cpu": "%dm"},"limits": {"cpu": "%dm"}}
  1649  						}
  1650  					]
  1651  				}
  1652  			}`, testPod1CPUQuantityResized.MilliValue(), testPod1CPUQuantityResized.MilliValue())
  1653  
  1654  		tStamp = strconv.Itoa(time.Now().Nanosecond())
  1655  		initDefaultResizePolicy(c3)
  1656  		testPod3 := makeTestPod(f.Namespace.Name, "testpod3", tStamp, c3)
  1657  		e2epod.SetNodeAffinity(&testPod3.Spec, node.Name)
  1658  
  1659  		ginkgo.By(fmt.Sprintf("TEST2: Create testPod3 '%s' that cannot fit node '%s' due to insufficient CPU.", testPod3.Name, node.Name))
  1660  		testPod3 = podClient.Create(ctx, testPod3)
  1661  		p3Err := e2epod.WaitForPodNameUnschedulableInNamespace(ctx, f.ClientSet, testPod3.Name, testPod3.Namespace)
  1662  		framework.ExpectNoError(p3Err, "failed to create pod3 or pod3 did not become pending!")
  1663  		gomega.Expect(testPod3.Status.Phase).To(gomega.Equal(v1.PodPending))
  1664  
  1665  		ginkgo.By(fmt.Sprintf("TEST2: Resize pod '%s' to make enough space for pod '%s'", testPod1.Name, testPod3.Name))
  1666  		testPod1, p1Err := f.ClientSet.CoreV1().Pods(testPod1.Namespace).Patch(context.TODO(),
  1667  			testPod1.Name, types.StrategicMergePatchType, []byte(patchTestpod1ToMakeSpaceForPod3), metav1.PatchOptions{})
  1668  		framework.ExpectNoError(p1Err, "failed to patch pod for resize")
  1669  
  1670  		ginkgo.By(fmt.Sprintf("TEST2: Verify pod '%s' is running after successfully resizing pod '%s'", testPod3.Name, testPod1.Name))
  1671  		framework.Logf("TEST2: Pod '%s' CPU requests '%dm'", testPod1.Name, testPod1.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())
  1672  		framework.Logf("TEST2: Pod '%s' CPU requests '%dm'", testPod2.Name, testPod2.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())
  1673  		framework.Logf("TEST2: Pod '%s' CPU requests '%dm'", testPod3.Name, testPod3.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())
  1674  		framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, testPod3))
  1675  
  1676  		ginkgo.By("deleting pods")
  1677  		delErr1 := e2epod.DeletePodWithWait(ctx, f.ClientSet, testPod1)
  1678  		framework.ExpectNoError(delErr1, "failed to delete pod %s", testPod1.Name)
  1679  		delErr2 := e2epod.DeletePodWithWait(ctx, f.ClientSet, testPod2)
  1680  		framework.ExpectNoError(delErr2, "failed to delete pod %s", testPod2.Name)
  1681  		delErr3 := e2epod.DeletePodWithWait(ctx, f.ClientSet, testPod3)
  1682  		framework.ExpectNoError(delErr3, "failed to delete pod %s", testPod3.Name)
  1683  	})
  1684  }
  1685  
  1686  var _ = SIGDescribe(framework.WithSerial(), "Pod InPlace Resize Container (scheduler-focused)", feature.InPlacePodVerticalScaling, func() {
  1687  	doPodResizeSchedulerTests()
  1688  })
  1689  
  1690  var _ = SIGDescribe("Pod InPlace Resize Container", feature.InPlacePodVerticalScaling, func() {
  1691  	doPodResizeTests()
  1692  	doPodResizeResourceQuotaTests()
  1693  	doPodResizeErrorTests()
  1694  })