k8s.io/kubernetes@v1.29.3/pkg/kubelet/kuberuntime/kuberuntime_sandbox_linux_test.go (about) 1 //go:build linux 2 // +build linux 3 4 /* 5 Copyright 2021 The Kubernetes Authors. 6 7 Licensed under the Apache License, Version 2.0 (the "License"); 8 you may not use this file except in compliance with the License. 9 You may obtain a copy of the License at 10 11 http://www.apache.org/licenses/LICENSE-2.0 12 13 Unless required by applicable law or agreed to in writing, software 14 distributed under the License is distributed on an "AS IS" BASIS, 15 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 See the License for the specific language governing permissions and 17 limitations under the License. 18 */ 19 20 package kuberuntime 21 22 import ( 23 "testing" 24 25 "github.com/stretchr/testify/assert" 26 "github.com/stretchr/testify/require" 27 v1 "k8s.io/api/core/v1" 28 "k8s.io/apimachinery/pkg/api/resource" 29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" 31 ) 32 33 func TestApplySandboxResources(t *testing.T) { 34 _, _, m, err := createTestRuntimeManager() 35 m.cpuCFSQuota = true 36 37 config := &runtimeapi.PodSandboxConfig{ 38 Linux: &runtimeapi.LinuxPodSandboxConfig{}, 39 } 40 41 getPodWithOverhead := func() *v1.Pod { 42 return &v1.Pod{ 43 ObjectMeta: metav1.ObjectMeta{ 44 UID: "12345678", 45 Name: "bar", 46 Namespace: "new", 47 }, 48 Spec: v1.PodSpec{ 49 Containers: []v1.Container{ 50 { 51 Resources: v1.ResourceRequirements{ 52 Requests: v1.ResourceList{ 53 v1.ResourceMemory: resource.MustParse("128Mi"), 54 v1.ResourceCPU: resource.MustParse("2"), 55 }, 56 Limits: v1.ResourceList{ 57 v1.ResourceMemory: resource.MustParse("256Mi"), 58 v1.ResourceCPU: resource.MustParse("4"), 59 }, 60 }, 61 }, 62 }, 63 Overhead: v1.ResourceList{ 64 v1.ResourceMemory: resource.MustParse("128Mi"), 65 v1.ResourceCPU: resource.MustParse("1"), 66 }, 67 }, 68 } 69 } 70 getPodWithoutOverhead := func() *v1.Pod { 71 return &v1.Pod{ 72 ObjectMeta: metav1.ObjectMeta{ 73 UID: "12345678", 74 Name: "bar", 75 Namespace: "new", 76 }, 77 Spec: v1.PodSpec{ 78 Containers: []v1.Container{ 79 { 80 Resources: v1.ResourceRequirements{ 81 Requests: v1.ResourceList{ 82 v1.ResourceMemory: resource.MustParse("128Mi"), 83 }, 84 Limits: v1.ResourceList{ 85 v1.ResourceMemory: resource.MustParse("256Mi"), 86 }, 87 }, 88 }, 89 }, 90 }, 91 } 92 } 93 94 require.NoError(t, err) 95 96 tests := []struct { 97 description string 98 pod *v1.Pod 99 expectedResource *runtimeapi.LinuxContainerResources 100 expectedOverhead *runtimeapi.LinuxContainerResources 101 cgroupVersion CgroupVersion 102 }{ 103 { 104 description: "pod with overhead defined", 105 pod: getPodWithOverhead(), 106 expectedResource: &runtimeapi.LinuxContainerResources{ 107 MemoryLimitInBytes: 268435456, 108 CpuPeriod: 100000, 109 CpuQuota: 400000, 110 CpuShares: 2048, 111 }, 112 expectedOverhead: &runtimeapi.LinuxContainerResources{ 113 MemoryLimitInBytes: 134217728, 114 CpuPeriod: 100000, 115 CpuQuota: 100000, 116 CpuShares: 1024, 117 }, 118 cgroupVersion: cgroupV1, 119 }, 120 { 121 description: "pod without overhead defined", 122 pod: getPodWithoutOverhead(), 123 expectedResource: &runtimeapi.LinuxContainerResources{ 124 MemoryLimitInBytes: 268435456, 125 CpuPeriod: 100000, 126 CpuQuota: 0, 127 CpuShares: 2, 128 }, 129 expectedOverhead: &runtimeapi.LinuxContainerResources{}, 130 cgroupVersion: cgroupV1, 131 }, 132 { 133 description: "pod with overhead defined", 134 pod: getPodWithOverhead(), 135 expectedResource: &runtimeapi.LinuxContainerResources{ 136 MemoryLimitInBytes: 268435456, 137 CpuPeriod: 100000, 138 CpuQuota: 400000, 139 CpuShares: 2048, 140 Unified: map[string]string{"memory.oom.group": "1"}, 141 }, 142 expectedOverhead: &runtimeapi.LinuxContainerResources{ 143 MemoryLimitInBytes: 134217728, 144 CpuPeriod: 100000, 145 CpuQuota: 100000, 146 CpuShares: 1024, 147 Unified: map[string]string{"memory.oom.group": "1"}, 148 }, 149 cgroupVersion: cgroupV2, 150 }, 151 { 152 description: "pod without overhead defined", 153 pod: getPodWithoutOverhead(), 154 expectedResource: &runtimeapi.LinuxContainerResources{ 155 MemoryLimitInBytes: 268435456, 156 CpuPeriod: 100000, 157 CpuQuota: 0, 158 CpuShares: 2, 159 Unified: map[string]string{"memory.oom.group": "1"}, 160 }, 161 expectedOverhead: &runtimeapi.LinuxContainerResources{}, 162 cgroupVersion: cgroupV2, 163 }, 164 } 165 166 for i, test := range tests { 167 setCgroupVersionDuringTest(test.cgroupVersion) 168 169 m.applySandboxResources(test.pod, config) 170 assert.Equal(t, test.expectedResource, config.Linux.Resources, "TestCase[%d]: %s", i, test.description) 171 assert.Equal(t, test.expectedOverhead, config.Linux.Overhead, "TestCase[%d]: %s", i, test.description) 172 } 173 } 174 175 func TestGeneratePodSandboxConfigWithLinuxSecurityContext(t *testing.T) { 176 _, _, m, err := createTestRuntimeManager() 177 require.NoError(t, err) 178 pod := newTestPodWithLinuxSecurityContext() 179 180 expectedLinuxPodSandboxConfig := &runtimeapi.LinuxPodSandboxConfig{ 181 SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{ 182 SelinuxOptions: &runtimeapi.SELinuxOption{ 183 User: "qux", 184 }, 185 RunAsUser: &runtimeapi.Int64Value{Value: 1000}, 186 RunAsGroup: &runtimeapi.Int64Value{Value: 10}, 187 }, 188 } 189 190 podSandboxConfig, err := m.generatePodSandboxConfig(pod, 1) 191 assert.NoError(t, err) 192 assert.Equal(t, expectedLinuxPodSandboxConfig.SecurityContext.SelinuxOptions, podSandboxConfig.Linux.SecurityContext.SelinuxOptions) 193 assert.Equal(t, expectedLinuxPodSandboxConfig.SecurityContext.RunAsUser, podSandboxConfig.Linux.SecurityContext.RunAsUser) 194 assert.Equal(t, expectedLinuxPodSandboxConfig.SecurityContext.RunAsGroup, podSandboxConfig.Linux.SecurityContext.RunAsGroup) 195 } 196 197 func newTestPodWithLinuxSecurityContext() *v1.Pod { 198 anyGroup := int64(10) 199 anyUser := int64(1000) 200 pod := newTestPod() 201 202 pod.Spec.SecurityContext = &v1.PodSecurityContext{ 203 SELinuxOptions: &v1.SELinuxOptions{ 204 User: "qux", 205 }, 206 RunAsUser: &anyUser, 207 RunAsGroup: &anyGroup, 208 } 209 210 return pod 211 }