k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/kubelet/kuberuntime/kuberuntime_container_linux_test.go (about) 1 //go:build linux 2 // +build linux 3 4 /* 5 Copyright 2018 The Kubernetes Authors. 6 7 Licensed under the Apache License, Version 2.0 (the "License"); 8 you may not use this file except in compliance with the License. 9 You may obtain a copy of the License at 10 11 http://www.apache.org/licenses/LICENSE-2.0 12 13 Unless required by applicable law or agreed to in writing, software 14 distributed under the License is distributed on an "AS IS" BASIS, 15 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 See the License for the specific language governing permissions and 17 limitations under the License. 18 */ 19 20 package kuberuntime 21 22 import ( 23 "context" 24 "fmt" 25 "math" 26 "os" 27 "reflect" 28 "strconv" 29 "testing" 30 31 "k8s.io/kubernetes/pkg/kubelet/cm" 32 "k8s.io/kubernetes/pkg/kubelet/types" 33 34 "github.com/google/go-cmp/cmp" 35 libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups" 36 "github.com/stretchr/testify/assert" 37 v1 "k8s.io/api/core/v1" 38 "k8s.io/apimachinery/pkg/api/resource" 39 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 40 utilfeature "k8s.io/apiserver/pkg/util/feature" 41 featuregatetesting "k8s.io/component-base/featuregate/testing" 42 runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" 43 "k8s.io/kubernetes/pkg/features" 44 kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" 45 ) 46 47 func makeExpectedConfig(m *kubeGenericRuntimeManager, pod *v1.Pod, containerIndex int, enforceMemoryQoS bool) *runtimeapi.ContainerConfig { 48 ctx := context.Background() 49 container := &pod.Spec.Containers[containerIndex] 50 podIP := "" 51 restartCount := 0 52 opts, _, _ := m.runtimeHelper.GenerateRunContainerOptions(ctx, pod, container, podIP, []string{podIP}) 53 containerLogsPath := buildContainerLogsPath(container.Name, restartCount) 54 restartCountUint32 := uint32(restartCount) 55 envs := make([]*runtimeapi.KeyValue, len(opts.Envs)) 56 57 l, _ := m.generateLinuxContainerConfig(container, pod, new(int64), "", nil, enforceMemoryQoS) 58 59 expectedConfig := &runtimeapi.ContainerConfig{ 60 Metadata: &runtimeapi.ContainerMetadata{ 61 Name: container.Name, 62 Attempt: restartCountUint32, 63 }, 64 Image: &runtimeapi.ImageSpec{Image: container.Image, UserSpecifiedImage: container.Image}, 65 Command: container.Command, 66 Args: []string(nil), 67 WorkingDir: container.WorkingDir, 68 Labels: newContainerLabels(container, pod), 69 Annotations: newContainerAnnotations(container, pod, restartCount, opts), 70 Devices: makeDevices(opts), 71 Mounts: m.makeMounts(opts, container), 72 LogPath: containerLogsPath, 73 Stdin: container.Stdin, 74 StdinOnce: container.StdinOnce, 75 Tty: container.TTY, 76 Linux: l, 77 Envs: envs, 78 CDIDevices: makeCDIDevices(opts), 79 } 80 return expectedConfig 81 } 82 83 func TestGenerateContainerConfig(t *testing.T) { 84 ctx := context.Background() 85 _, imageService, m, err := createTestRuntimeManager() 86 assert.NoError(t, err) 87 88 runAsUser := int64(1000) 89 runAsGroup := int64(2000) 90 pod := &v1.Pod{ 91 ObjectMeta: metav1.ObjectMeta{ 92 UID: "12345678", 93 Name: "bar", 94 Namespace: "new", 95 }, 96 Spec: v1.PodSpec{ 97 Containers: []v1.Container{ 98 { 99 Name: "foo", 100 Image: "busybox", 101 ImagePullPolicy: v1.PullIfNotPresent, 102 Command: []string{"testCommand"}, 103 WorkingDir: "testWorkingDir", 104 SecurityContext: &v1.SecurityContext{ 105 RunAsUser: &runAsUser, 106 RunAsGroup: &runAsGroup, 107 }, 108 }, 109 }, 110 }, 111 } 112 113 expectedConfig := makeExpectedConfig(m, pod, 0, false) 114 containerConfig, _, err := m.generateContainerConfig(ctx, &pod.Spec.Containers[0], pod, 0, "", pod.Spec.Containers[0].Image, []string{}, nil) 115 assert.NoError(t, err) 116 assert.Equal(t, expectedConfig, containerConfig, "generate container config for kubelet runtime v1.") 117 assert.Equal(t, runAsUser, containerConfig.GetLinux().GetSecurityContext().GetRunAsUser().GetValue(), "RunAsUser should be set") 118 assert.Equal(t, runAsGroup, containerConfig.GetLinux().GetSecurityContext().GetRunAsGroup().GetValue(), "RunAsGroup should be set") 119 120 runAsRoot := int64(0) 121 runAsNonRootTrue := true 122 podWithContainerSecurityContext := &v1.Pod{ 123 ObjectMeta: metav1.ObjectMeta{ 124 UID: "12345678", 125 Name: "bar", 126 Namespace: "new", 127 }, 128 Spec: v1.PodSpec{ 129 Containers: []v1.Container{ 130 { 131 Name: "foo", 132 Image: "busybox", 133 ImagePullPolicy: v1.PullIfNotPresent, 134 Command: []string{"testCommand"}, 135 WorkingDir: "testWorkingDir", 136 SecurityContext: &v1.SecurityContext{ 137 RunAsNonRoot: &runAsNonRootTrue, 138 RunAsUser: &runAsRoot, 139 }, 140 }, 141 }, 142 }, 143 } 144 145 _, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil) 146 assert.Error(t, err) 147 148 imageID, _ := imageService.PullImage(ctx, &runtimeapi.ImageSpec{Image: "busybox"}, nil, nil) 149 resp, _ := imageService.ImageStatus(ctx, &runtimeapi.ImageSpec{Image: imageID}, false) 150 151 resp.Image.Uid = nil 152 resp.Image.Username = "test" 153 154 podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsUser = nil 155 podWithContainerSecurityContext.Spec.Containers[0].SecurityContext.RunAsNonRoot = &runAsNonRootTrue 156 157 _, _, err = m.generateContainerConfig(ctx, &podWithContainerSecurityContext.Spec.Containers[0], podWithContainerSecurityContext, 0, "", podWithContainerSecurityContext.Spec.Containers[0].Image, []string{}, nil) 158 assert.Error(t, err, "RunAsNonRoot should fail for non-numeric username") 159 } 160 161 func TestGenerateLinuxContainerConfigResources(t *testing.T) { 162 _, _, m, err := createTestRuntimeManager() 163 m.cpuCFSQuota = true 164 165 assert.NoError(t, err) 166 167 tests := []struct { 168 name string 169 podResources v1.ResourceRequirements 170 expected *runtimeapi.LinuxContainerResources 171 }{ 172 { 173 name: "Request 128M/1C, Limit 256M/3C", 174 podResources: v1.ResourceRequirements{ 175 Requests: v1.ResourceList{ 176 v1.ResourceMemory: resource.MustParse("128Mi"), 177 v1.ResourceCPU: resource.MustParse("1"), 178 }, 179 Limits: v1.ResourceList{ 180 v1.ResourceMemory: resource.MustParse("256Mi"), 181 v1.ResourceCPU: resource.MustParse("3"), 182 }, 183 }, 184 expected: &runtimeapi.LinuxContainerResources{ 185 CpuPeriod: 100000, 186 CpuQuota: 300000, 187 CpuShares: 1024, 188 MemoryLimitInBytes: 256 * 1024 * 1024, 189 }, 190 }, 191 { 192 name: "Request 128M/2C, No Limit", 193 podResources: v1.ResourceRequirements{ 194 Requests: v1.ResourceList{ 195 v1.ResourceMemory: resource.MustParse("128Mi"), 196 v1.ResourceCPU: resource.MustParse("2"), 197 }, 198 }, 199 expected: &runtimeapi.LinuxContainerResources{ 200 CpuPeriod: 100000, 201 CpuQuota: 0, 202 CpuShares: 2048, 203 MemoryLimitInBytes: 0, 204 }, 205 }, 206 } 207 208 for _, test := range tests { 209 pod := &v1.Pod{ 210 ObjectMeta: metav1.ObjectMeta{ 211 UID: "12345678", 212 Name: "bar", 213 Namespace: "new", 214 }, 215 Spec: v1.PodSpec{ 216 Containers: []v1.Container{ 217 { 218 Name: "foo", 219 Image: "busybox", 220 ImagePullPolicy: v1.PullIfNotPresent, 221 Command: []string{"testCommand"}, 222 WorkingDir: "testWorkingDir", 223 Resources: test.podResources, 224 }, 225 }, 226 }, 227 } 228 229 linuxConfig, err := m.generateLinuxContainerConfig(&pod.Spec.Containers[0], pod, new(int64), "", nil, false) 230 assert.NoError(t, err) 231 assert.Equal(t, test.expected.CpuPeriod, linuxConfig.GetResources().CpuPeriod, test.name) 232 assert.Equal(t, test.expected.CpuQuota, linuxConfig.GetResources().CpuQuota, test.name) 233 assert.Equal(t, test.expected.CpuShares, linuxConfig.GetResources().CpuShares, test.name) 234 assert.Equal(t, test.expected.MemoryLimitInBytes, linuxConfig.GetResources().MemoryLimitInBytes, test.name) 235 } 236 } 237 238 func TestCalculateLinuxResources(t *testing.T) { 239 _, _, m, err := createTestRuntimeManager() 240 m.cpuCFSQuota = true 241 242 assert.NoError(t, err) 243 244 generateResourceQuantity := func(str string) *resource.Quantity { 245 quantity := resource.MustParse(str) 246 return &quantity 247 } 248 249 tests := []struct { 250 name string 251 cpuReq *resource.Quantity 252 cpuLim *resource.Quantity 253 memLim *resource.Quantity 254 expected *runtimeapi.LinuxContainerResources 255 cgroupVersion CgroupVersion 256 }{ 257 { 258 name: "Request128MBLimit256MB", 259 cpuReq: generateResourceQuantity("1"), 260 cpuLim: generateResourceQuantity("2"), 261 memLim: generateResourceQuantity("128Mi"), 262 expected: &runtimeapi.LinuxContainerResources{ 263 CpuPeriod: 100000, 264 CpuQuota: 200000, 265 CpuShares: 1024, 266 MemoryLimitInBytes: 134217728, 267 }, 268 cgroupVersion: cgroupV1, 269 }, 270 { 271 name: "RequestNoMemory", 272 cpuReq: generateResourceQuantity("2"), 273 cpuLim: generateResourceQuantity("8"), 274 memLim: generateResourceQuantity("0"), 275 expected: &runtimeapi.LinuxContainerResources{ 276 CpuPeriod: 100000, 277 CpuQuota: 800000, 278 CpuShares: 2048, 279 MemoryLimitInBytes: 0, 280 }, 281 cgroupVersion: cgroupV1, 282 }, 283 { 284 name: "RequestNilCPU", 285 cpuLim: generateResourceQuantity("2"), 286 memLim: generateResourceQuantity("0"), 287 expected: &runtimeapi.LinuxContainerResources{ 288 CpuPeriod: 100000, 289 CpuQuota: 200000, 290 CpuShares: 2048, 291 MemoryLimitInBytes: 0, 292 }, 293 cgroupVersion: cgroupV1, 294 }, 295 { 296 name: "RequestZeroCPU", 297 cpuReq: generateResourceQuantity("0"), 298 cpuLim: generateResourceQuantity("2"), 299 memLim: generateResourceQuantity("0"), 300 expected: &runtimeapi.LinuxContainerResources{ 301 CpuPeriod: 100000, 302 CpuQuota: 200000, 303 CpuShares: 2, 304 MemoryLimitInBytes: 0, 305 }, 306 cgroupVersion: cgroupV1, 307 }, 308 { 309 name: "Request128MBLimit256MB", 310 cpuReq: generateResourceQuantity("1"), 311 cpuLim: generateResourceQuantity("2"), 312 memLim: generateResourceQuantity("128Mi"), 313 expected: &runtimeapi.LinuxContainerResources{ 314 CpuPeriod: 100000, 315 CpuQuota: 200000, 316 CpuShares: 1024, 317 MemoryLimitInBytes: 134217728, 318 Unified: map[string]string{"memory.oom.group": "1"}, 319 }, 320 cgroupVersion: cgroupV2, 321 }, 322 { 323 name: "RequestNoMemory", 324 cpuReq: generateResourceQuantity("2"), 325 cpuLim: generateResourceQuantity("8"), 326 memLim: generateResourceQuantity("0"), 327 expected: &runtimeapi.LinuxContainerResources{ 328 CpuPeriod: 100000, 329 CpuQuota: 800000, 330 CpuShares: 2048, 331 MemoryLimitInBytes: 0, 332 Unified: map[string]string{"memory.oom.group": "1"}, 333 }, 334 cgroupVersion: cgroupV2, 335 }, 336 { 337 name: "RequestNilCPU", 338 cpuLim: generateResourceQuantity("2"), 339 memLim: generateResourceQuantity("0"), 340 expected: &runtimeapi.LinuxContainerResources{ 341 CpuPeriod: 100000, 342 CpuQuota: 200000, 343 CpuShares: 2048, 344 MemoryLimitInBytes: 0, 345 Unified: map[string]string{"memory.oom.group": "1"}, 346 }, 347 cgroupVersion: cgroupV2, 348 }, 349 { 350 name: "RequestZeroCPU", 351 cpuReq: generateResourceQuantity("0"), 352 cpuLim: generateResourceQuantity("2"), 353 memLim: generateResourceQuantity("0"), 354 expected: &runtimeapi.LinuxContainerResources{ 355 CpuPeriod: 100000, 356 CpuQuota: 200000, 357 CpuShares: 2, 358 MemoryLimitInBytes: 0, 359 Unified: map[string]string{"memory.oom.group": "1"}, 360 }, 361 cgroupVersion: cgroupV2, 362 }, 363 } 364 for _, test := range tests { 365 setCgroupVersionDuringTest(test.cgroupVersion) 366 linuxContainerResources := m.calculateLinuxResources(test.cpuReq, test.cpuLim, test.memLim) 367 assert.Equal(t, test.expected, linuxContainerResources) 368 } 369 } 370 371 func TestGenerateContainerConfigWithMemoryQoSEnforced(t *testing.T) { 372 _, _, m, err := createTestRuntimeManager() 373 assert.NoError(t, err) 374 375 podRequestMemory := resource.MustParse("128Mi") 376 pod1LimitMemory := resource.MustParse("256Mi") 377 pod1 := &v1.Pod{ 378 ObjectMeta: metav1.ObjectMeta{ 379 UID: "12345678", 380 Name: "bar", 381 Namespace: "new", 382 }, 383 Spec: v1.PodSpec{ 384 Containers: []v1.Container{ 385 { 386 Name: "foo", 387 Image: "busybox", 388 ImagePullPolicy: v1.PullIfNotPresent, 389 Command: []string{"testCommand"}, 390 WorkingDir: "testWorkingDir", 391 Resources: v1.ResourceRequirements{ 392 Requests: v1.ResourceList{ 393 v1.ResourceMemory: podRequestMemory, 394 }, 395 Limits: v1.ResourceList{ 396 v1.ResourceMemory: pod1LimitMemory, 397 }, 398 }, 399 }, 400 }, 401 }, 402 } 403 404 pod2 := &v1.Pod{ 405 ObjectMeta: metav1.ObjectMeta{ 406 UID: "12345678", 407 Name: "bar", 408 Namespace: "new", 409 }, 410 Spec: v1.PodSpec{ 411 Containers: []v1.Container{ 412 { 413 Name: "foo", 414 Image: "busybox", 415 ImagePullPolicy: v1.PullIfNotPresent, 416 Command: []string{"testCommand"}, 417 WorkingDir: "testWorkingDir", 418 Resources: v1.ResourceRequirements{ 419 Requests: v1.ResourceList{ 420 v1.ResourceMemory: podRequestMemory, 421 }, 422 }, 423 }, 424 }, 425 }, 426 } 427 pageSize := int64(os.Getpagesize()) 428 memoryNodeAllocatable := resource.MustParse(fakeNodeAllocatableMemory) 429 pod1MemoryHigh := int64(math.Floor( 430 float64(podRequestMemory.Value())+ 431 (float64(pod1LimitMemory.Value())-float64(podRequestMemory.Value()))*float64(m.memoryThrottlingFactor))/float64(pageSize)) * pageSize 432 pod2MemoryHigh := int64(math.Floor( 433 float64(podRequestMemory.Value())+ 434 (float64(memoryNodeAllocatable.Value())-float64(podRequestMemory.Value()))*float64(m.memoryThrottlingFactor))/float64(pageSize)) * pageSize 435 436 type expectedResult struct { 437 containerConfig *runtimeapi.LinuxContainerConfig 438 memoryLow int64 439 memoryHigh int64 440 } 441 l1, _ := m.generateLinuxContainerConfig(&pod1.Spec.Containers[0], pod1, new(int64), "", nil, true) 442 l2, _ := m.generateLinuxContainerConfig(&pod2.Spec.Containers[0], pod2, new(int64), "", nil, true) 443 tests := []struct { 444 name string 445 pod *v1.Pod 446 expected *expectedResult 447 }{ 448 { 449 name: "Request128MBLimit256MB", 450 pod: pod1, 451 expected: &expectedResult{ 452 l1, 453 128 * 1024 * 1024, 454 int64(pod1MemoryHigh), 455 }, 456 }, 457 { 458 name: "Request128MBWithoutLimit", 459 pod: pod2, 460 expected: &expectedResult{ 461 l2, 462 128 * 1024 * 1024, 463 int64(pod2MemoryHigh), 464 }, 465 }, 466 } 467 468 for _, test := range tests { 469 linuxConfig, err := m.generateLinuxContainerConfig(&test.pod.Spec.Containers[0], test.pod, new(int64), "", nil, true) 470 assert.NoError(t, err) 471 assert.Equal(t, test.expected.containerConfig, linuxConfig, test.name) 472 assert.Equal(t, linuxConfig.GetResources().GetUnified()["memory.min"], strconv.FormatInt(test.expected.memoryLow, 10), test.name) 473 assert.Equal(t, linuxConfig.GetResources().GetUnified()["memory.high"], strconv.FormatInt(test.expected.memoryHigh, 10), test.name) 474 } 475 } 476 477 func TestGetHugepageLimitsFromResources(t *testing.T) { 478 var baseHugepage []*runtimeapi.HugepageLimit 479 480 // For each page size, limit to 0. 481 for _, pageSize := range libcontainercgroups.HugePageSizes() { 482 baseHugepage = append(baseHugepage, &runtimeapi.HugepageLimit{ 483 PageSize: pageSize, 484 Limit: uint64(0), 485 }) 486 } 487 488 tests := []struct { 489 name string 490 resources v1.ResourceRequirements 491 expected []*runtimeapi.HugepageLimit 492 }{ 493 { 494 name: "Success2MB", 495 resources: v1.ResourceRequirements{ 496 Limits: v1.ResourceList{ 497 "hugepages-2Mi": resource.MustParse("2Mi"), 498 }, 499 }, 500 expected: []*runtimeapi.HugepageLimit{ 501 { 502 PageSize: "2MB", 503 Limit: 2097152, 504 }, 505 }, 506 }, 507 { 508 name: "Success1GB", 509 resources: v1.ResourceRequirements{ 510 Limits: v1.ResourceList{ 511 "hugepages-1Gi": resource.MustParse("2Gi"), 512 }, 513 }, 514 expected: []*runtimeapi.HugepageLimit{ 515 { 516 PageSize: "1GB", 517 Limit: 2147483648, 518 }, 519 }, 520 }, 521 { 522 name: "Skip2MB", 523 resources: v1.ResourceRequirements{ 524 Limits: v1.ResourceList{ 525 "hugepages-2MB": resource.MustParse("2Mi"), 526 }, 527 }, 528 expected: []*runtimeapi.HugepageLimit{ 529 { 530 PageSize: "2MB", 531 Limit: 0, 532 }, 533 }, 534 }, 535 { 536 name: "Skip1GB", 537 resources: v1.ResourceRequirements{ 538 Limits: v1.ResourceList{ 539 "hugepages-1GB": resource.MustParse("2Gi"), 540 }, 541 }, 542 expected: []*runtimeapi.HugepageLimit{ 543 { 544 PageSize: "1GB", 545 Limit: 0, 546 }, 547 }, 548 }, 549 { 550 name: "Success2MBand1GB", 551 resources: v1.ResourceRequirements{ 552 Limits: v1.ResourceList{ 553 v1.ResourceName(v1.ResourceCPU): resource.MustParse("0"), 554 "hugepages-2Mi": resource.MustParse("2Mi"), 555 "hugepages-1Gi": resource.MustParse("2Gi"), 556 }, 557 }, 558 expected: []*runtimeapi.HugepageLimit{ 559 { 560 PageSize: "2MB", 561 Limit: 2097152, 562 }, 563 { 564 PageSize: "1GB", 565 Limit: 2147483648, 566 }, 567 }, 568 }, 569 { 570 name: "Skip2MBand1GB", 571 resources: v1.ResourceRequirements{ 572 Limits: v1.ResourceList{ 573 v1.ResourceName(v1.ResourceCPU): resource.MustParse("0"), 574 "hugepages-2MB": resource.MustParse("2Mi"), 575 "hugepages-1GB": resource.MustParse("2Gi"), 576 }, 577 }, 578 expected: []*runtimeapi.HugepageLimit{ 579 { 580 PageSize: "2MB", 581 Limit: 0, 582 }, 583 { 584 PageSize: "1GB", 585 Limit: 0, 586 }, 587 }, 588 }, 589 } 590 591 for _, test := range tests { 592 // Validate if machine supports hugepage size that used in test case. 593 machineHugepageSupport := true 594 for _, hugepageLimit := range test.expected { 595 hugepageSupport := false 596 for _, pageSize := range libcontainercgroups.HugePageSizes() { 597 if pageSize == hugepageLimit.PageSize { 598 hugepageSupport = true 599 break 600 } 601 } 602 603 if !hugepageSupport { 604 machineHugepageSupport = false 605 break 606 } 607 } 608 609 // Case of machine can't support hugepage size 610 if !machineHugepageSupport { 611 continue 612 } 613 614 expectedHugepages := baseHugepage 615 for _, hugepage := range test.expected { 616 for _, expectedHugepage := range expectedHugepages { 617 if expectedHugepage.PageSize == hugepage.PageSize { 618 expectedHugepage.Limit = hugepage.Limit 619 } 620 } 621 } 622 623 results := GetHugepageLimitsFromResources(test.resources) 624 if !reflect.DeepEqual(expectedHugepages, results) { 625 t.Errorf("%s test failed. Expected %v but got %v", test.name, expectedHugepages, results) 626 } 627 628 for _, hugepage := range baseHugepage { 629 hugepage.Limit = uint64(0) 630 } 631 } 632 } 633 634 func TestGenerateLinuxContainerConfigNamespaces(t *testing.T) { 635 _, _, m, err := createTestRuntimeManager() 636 if err != nil { 637 t.Fatalf("error creating test RuntimeManager: %v", err) 638 } 639 640 for _, tc := range []struct { 641 name string 642 pod *v1.Pod 643 target *kubecontainer.ContainerID 644 want *runtimeapi.NamespaceOption 645 }{ 646 { 647 "Default namespaces", 648 &v1.Pod{ 649 Spec: v1.PodSpec{ 650 Containers: []v1.Container{ 651 {Name: "test"}, 652 }, 653 }, 654 }, 655 nil, 656 &runtimeapi.NamespaceOption{ 657 Pid: runtimeapi.NamespaceMode_CONTAINER, 658 }, 659 }, 660 { 661 "PID Namespace POD", 662 &v1.Pod{ 663 Spec: v1.PodSpec{ 664 Containers: []v1.Container{ 665 {Name: "test"}, 666 }, 667 ShareProcessNamespace: &[]bool{true}[0], 668 }, 669 }, 670 nil, 671 &runtimeapi.NamespaceOption{ 672 Pid: runtimeapi.NamespaceMode_POD, 673 }, 674 }, 675 { 676 "PID Namespace TARGET", 677 &v1.Pod{ 678 Spec: v1.PodSpec{ 679 Containers: []v1.Container{ 680 {Name: "test"}, 681 }, 682 }, 683 }, 684 &kubecontainer.ContainerID{Type: "docker", ID: "really-long-id-string"}, 685 &runtimeapi.NamespaceOption{ 686 Pid: runtimeapi.NamespaceMode_TARGET, 687 TargetId: "really-long-id-string", 688 }, 689 }, 690 } { 691 t.Run(tc.name, func(t *testing.T) { 692 got, err := m.generateLinuxContainerConfig(&tc.pod.Spec.Containers[0], tc.pod, nil, "", tc.target, false) 693 assert.NoError(t, err) 694 if diff := cmp.Diff(tc.want, got.SecurityContext.NamespaceOptions); diff != "" { 695 t.Errorf("%v: diff (-want +got):\n%v", t.Name(), diff) 696 } 697 }) 698 } 699 } 700 701 func TestGenerateLinuxContainerResources(t *testing.T) { 702 _, _, m, err := createTestRuntimeManager() 703 assert.NoError(t, err) 704 m.machineInfo.MemoryCapacity = 17179860387 // 16GB 705 706 pod := &v1.Pod{ 707 ObjectMeta: metav1.ObjectMeta{ 708 UID: "12345678", 709 Name: "foo", 710 Namespace: "bar", 711 }, 712 Spec: v1.PodSpec{ 713 Containers: []v1.Container{ 714 { 715 Name: "c1", 716 Image: "busybox", 717 }, 718 }, 719 }, 720 Status: v1.PodStatus{}, 721 } 722 723 for _, tc := range []struct { 724 name string 725 scalingFg bool 726 limits v1.ResourceList 727 requests v1.ResourceList 728 cStatus []v1.ContainerStatus 729 expected *runtimeapi.LinuxContainerResources 730 }{ 731 { 732 "requests & limits, cpu & memory, guaranteed qos - no container status", 733 true, 734 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 735 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 736 []v1.ContainerStatus{}, 737 &runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 524288000, OomScoreAdj: -997}, 738 }, 739 { 740 "requests & limits, cpu & memory, burstable qos - no container status", 741 true, 742 v1.ResourceList{v1.ResourceCPU: resource.MustParse("500m"), v1.ResourceMemory: resource.MustParse("750Mi")}, 743 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 744 []v1.ContainerStatus{}, 745 &runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 786432000, OomScoreAdj: 970}, 746 }, 747 { 748 "best-effort qos - no container status", 749 true, 750 nil, 751 nil, 752 []v1.ContainerStatus{}, 753 &runtimeapi.LinuxContainerResources{CpuShares: 2, OomScoreAdj: 1000}, 754 }, 755 { 756 "requests & limits, cpu & memory, guaranteed qos - empty resources container status", 757 true, 758 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 759 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 760 []v1.ContainerStatus{{Name: "c1"}}, 761 &runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 524288000, OomScoreAdj: -997}, 762 }, 763 { 764 "requests & limits, cpu & memory, burstable qos - empty resources container status", 765 true, 766 v1.ResourceList{v1.ResourceCPU: resource.MustParse("500m"), v1.ResourceMemory: resource.MustParse("750Mi")}, 767 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 768 []v1.ContainerStatus{{Name: "c1"}}, 769 &runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 786432000, OomScoreAdj: 999}, 770 }, 771 { 772 "best-effort qos - empty resources container status", 773 true, 774 nil, 775 nil, 776 []v1.ContainerStatus{{Name: "c1"}}, 777 &runtimeapi.LinuxContainerResources{CpuShares: 2, OomScoreAdj: 1000}, 778 }, 779 { 780 "requests & limits, cpu & memory, guaranteed qos - container status with allocatedResources", 781 true, 782 v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 783 v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 784 []v1.ContainerStatus{ 785 { 786 Name: "c1", 787 AllocatedResources: v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 788 }, 789 }, 790 &runtimeapi.LinuxContainerResources{CpuShares: 204, MemoryLimitInBytes: 524288000, OomScoreAdj: -997}, 791 }, 792 { 793 "requests & limits, cpu & memory, burstable qos - container status with allocatedResources", 794 true, 795 v1.ResourceList{v1.ResourceCPU: resource.MustParse("500m"), v1.ResourceMemory: resource.MustParse("750Mi")}, 796 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 797 []v1.ContainerStatus{ 798 { 799 Name: "c1", 800 AllocatedResources: v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 801 }, 802 }, 803 &runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 786432000, OomScoreAdj: 970}, 804 }, 805 { 806 "requests & limits, cpu & memory, guaranteed qos - no container status", 807 false, 808 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 809 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 810 []v1.ContainerStatus{}, 811 &runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 524288000, OomScoreAdj: -997}, 812 }, 813 { 814 "requests & limits, cpu & memory, burstable qos - container status with allocatedResources", 815 false, 816 v1.ResourceList{v1.ResourceCPU: resource.MustParse("500m"), v1.ResourceMemory: resource.MustParse("750Mi")}, 817 v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 818 []v1.ContainerStatus{ 819 { 820 Name: "c1", 821 AllocatedResources: v1.ResourceList{v1.ResourceCPU: resource.MustParse("250m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 822 }, 823 }, 824 &runtimeapi.LinuxContainerResources{CpuShares: 256, MemoryLimitInBytes: 786432000, OomScoreAdj: 970}, 825 }, 826 { 827 "requests & limits, cpu & memory, guaranteed qos - container status with allocatedResources", 828 false, 829 v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 830 v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 831 []v1.ContainerStatus{ 832 { 833 Name: "c1", 834 AllocatedResources: v1.ResourceList{v1.ResourceCPU: resource.MustParse("200m"), v1.ResourceMemory: resource.MustParse("500Mi")}, 835 }, 836 }, 837 &runtimeapi.LinuxContainerResources{CpuShares: 204, MemoryLimitInBytes: 524288000, OomScoreAdj: -997}, 838 }, 839 { 840 "best-effort qos - no container status", 841 false, 842 nil, 843 nil, 844 []v1.ContainerStatus{}, 845 &runtimeapi.LinuxContainerResources{CpuShares: 2, OomScoreAdj: 1000}, 846 }, 847 } { 848 t.Run(tc.name, func(t *testing.T) { 849 defer setSwapControllerAvailableDuringTest(false)() 850 if tc.scalingFg { 851 featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true) 852 } 853 854 setCgroupVersionDuringTest(cgroupV1) 855 856 pod.Spec.Containers[0].Resources = v1.ResourceRequirements{Limits: tc.limits, Requests: tc.requests} 857 if len(tc.cStatus) > 0 { 858 pod.Status.ContainerStatuses = tc.cStatus 859 } 860 resources := m.generateLinuxContainerResources(pod, &pod.Spec.Containers[0], false) 861 tc.expected.HugepageLimits = resources.HugepageLimits 862 if !cmp.Equal(resources, tc.expected) { 863 t.Errorf("Test %s: expected resources %+v, but got %+v", tc.name, tc.expected, resources) 864 } 865 }) 866 } 867 //TODO(vinaykul,InPlacePodVerticalScaling): Add unit tests for cgroup v1 & v2 868 } 869 870 func TestGenerateLinuxContainerResourcesWithSwap(t *testing.T) { 871 _, _, m, err := createTestRuntimeManager() 872 assert.NoError(t, err) 873 m.machineInfo.MemoryCapacity = 42949672960 // 40Gb == 40 * 1024^3 874 m.machineInfo.SwapCapacity = 5368709120 // 5Gb == 5 * 1024^3 875 876 pod := &v1.Pod{ 877 ObjectMeta: metav1.ObjectMeta{ 878 UID: "12345678", 879 Name: "foo", 880 Namespace: "bar", 881 }, 882 Spec: v1.PodSpec{ 883 Containers: []v1.Container{ 884 { 885 Name: "c1", 886 }, 887 { 888 Name: "c2", 889 }, 890 }, 891 }, 892 Status: v1.PodStatus{}, 893 } 894 895 expectSwapDisabled := func(cgroupVersion CgroupVersion, resources ...*runtimeapi.LinuxContainerResources) { 896 const msg = "container is expected to not have swap configured" 897 898 for _, r := range resources { 899 switch cgroupVersion { 900 case cgroupV1: 901 assert.Equal(t, int64(0), r.MemorySwapLimitInBytes, msg) 902 case cgroupV2: 903 assert.NotContains(t, r.Unified, cm.Cgroup2MaxSwapFilename, msg) 904 } 905 } 906 } 907 908 expectNoSwap := func(cgroupVersion CgroupVersion, resources ...*runtimeapi.LinuxContainerResources) { 909 const msg = "container is expected to not have swap access" 910 911 for _, r := range resources { 912 switch cgroupVersion { 913 case cgroupV1: 914 assert.Equal(t, r.MemoryLimitInBytes, r.MemorySwapLimitInBytes, msg) 915 case cgroupV2: 916 assert.Equal(t, "0", r.Unified[cm.Cgroup2MaxSwapFilename], msg) 917 } 918 } 919 } 920 921 expectSwap := func(cgroupVersion CgroupVersion, swapBytesExpected int64, resources *runtimeapi.LinuxContainerResources) { 922 msg := fmt.Sprintf("container swap is expected to be limited by %d bytes", swapBytesExpected) 923 924 switch cgroupVersion { 925 case cgroupV1: 926 assert.Equal(t, resources.MemoryLimitInBytes+swapBytesExpected, resources.MemorySwapLimitInBytes, msg) 927 case cgroupV2: 928 assert.Equal(t, fmt.Sprintf("%d", swapBytesExpected), resources.Unified[cm.Cgroup2MaxSwapFilename], msg) 929 } 930 } 931 932 calcSwapForBurstablePods := func(containerMemoryRequest int64) int64 { 933 swapSize, err := calcSwapForBurstablePods(containerMemoryRequest, int64(m.machineInfo.MemoryCapacity), int64(m.machineInfo.SwapCapacity)) 934 assert.NoError(t, err) 935 936 return swapSize 937 } 938 939 for _, tc := range []struct { 940 name string 941 cgroupVersion CgroupVersion 942 qosClass v1.PodQOSClass 943 swapDisabledOnNode bool 944 nodeSwapFeatureGateEnabled bool 945 swapBehavior string 946 addContainerWithoutRequests bool 947 addGuaranteedContainer bool 948 }{ 949 // With cgroup v1 950 { 951 name: "cgroups v1, LimitedSwap, Burstable QoS", 952 cgroupVersion: cgroupV1, 953 qosClass: v1.PodQOSBurstable, 954 nodeSwapFeatureGateEnabled: true, 955 swapBehavior: types.LimitedSwap, 956 }, 957 { 958 name: "cgroups v1, LimitedSwap, Best-effort QoS", 959 cgroupVersion: cgroupV1, 960 qosClass: v1.PodQOSBestEffort, 961 nodeSwapFeatureGateEnabled: true, 962 swapBehavior: types.LimitedSwap, 963 }, 964 965 // With feature gate turned off 966 { 967 name: "NodeSwap feature gate turned off, cgroups v2, LimitedSwap", 968 cgroupVersion: cgroupV2, 969 qosClass: v1.PodQOSBurstable, 970 nodeSwapFeatureGateEnabled: false, 971 swapBehavior: types.LimitedSwap, 972 }, 973 974 // With no swapBehavior, NoSwap should be the default 975 { 976 name: "With no swapBehavior - NoSwap should be the default", 977 cgroupVersion: cgroupV2, 978 qosClass: v1.PodQOSBestEffort, 979 nodeSwapFeatureGateEnabled: true, 980 swapBehavior: "", 981 }, 982 983 // With Guaranteed and Best-effort QoS 984 { 985 name: "Best-effort QoS, cgroups v2, NoSwap", 986 cgroupVersion: cgroupV2, 987 qosClass: v1.PodQOSBestEffort, 988 nodeSwapFeatureGateEnabled: true, 989 swapBehavior: "NoSwap", 990 }, 991 { 992 name: "Best-effort QoS, cgroups v2, LimitedSwap", 993 cgroupVersion: cgroupV2, 994 qosClass: v1.PodQOSBurstable, 995 nodeSwapFeatureGateEnabled: true, 996 swapBehavior: types.LimitedSwap, 997 }, 998 { 999 name: "Guaranteed QoS, cgroups v2, LimitedSwap", 1000 cgroupVersion: cgroupV2, 1001 qosClass: v1.PodQOSGuaranteed, 1002 nodeSwapFeatureGateEnabled: true, 1003 swapBehavior: types.LimitedSwap, 1004 }, 1005 1006 // With a "guaranteed" container (when memory requests equal to limits) 1007 { 1008 name: "Burstable QoS, cgroups v2, LimitedSwap, with a guaranteed container", 1009 cgroupVersion: cgroupV2, 1010 qosClass: v1.PodQOSBurstable, 1011 nodeSwapFeatureGateEnabled: true, 1012 swapBehavior: types.LimitedSwap, 1013 addContainerWithoutRequests: false, 1014 addGuaranteedContainer: true, 1015 }, 1016 1017 // Swap is expected to be allocated 1018 { 1019 name: "Burstable QoS, cgroups v2, LimitedSwap", 1020 cgroupVersion: cgroupV2, 1021 qosClass: v1.PodQOSBurstable, 1022 nodeSwapFeatureGateEnabled: true, 1023 swapBehavior: types.LimitedSwap, 1024 addContainerWithoutRequests: false, 1025 addGuaranteedContainer: false, 1026 }, 1027 { 1028 name: "Burstable QoS, cgroups v2, LimitedSwap, with a container with no requests", 1029 cgroupVersion: cgroupV2, 1030 qosClass: v1.PodQOSBurstable, 1031 nodeSwapFeatureGateEnabled: true, 1032 swapBehavior: types.LimitedSwap, 1033 addContainerWithoutRequests: true, 1034 addGuaranteedContainer: false, 1035 }, 1036 // All the above examples with Swap disabled on node 1037 { 1038 name: "Swap disabled on node, cgroups v1, LimitedSwap, Burstable QoS", 1039 swapDisabledOnNode: true, 1040 cgroupVersion: cgroupV1, 1041 qosClass: v1.PodQOSBurstable, 1042 nodeSwapFeatureGateEnabled: true, 1043 swapBehavior: types.LimitedSwap, 1044 }, 1045 { 1046 name: "Swap disabled on node, cgroups v1, LimitedSwap, Best-effort QoS", 1047 swapDisabledOnNode: true, 1048 cgroupVersion: cgroupV1, 1049 qosClass: v1.PodQOSBestEffort, 1050 nodeSwapFeatureGateEnabled: true, 1051 swapBehavior: types.LimitedSwap, 1052 }, 1053 1054 // With feature gate turned off 1055 { 1056 name: "Swap disabled on node, NodeSwap feature gate turned off, cgroups v2, LimitedSwap", 1057 swapDisabledOnNode: true, 1058 cgroupVersion: cgroupV2, 1059 qosClass: v1.PodQOSBurstable, 1060 nodeSwapFeatureGateEnabled: false, 1061 swapBehavior: types.LimitedSwap, 1062 }, 1063 1064 // With no swapBehavior, NoSwap should be the default 1065 { 1066 name: "Swap disabled on node, With no swapBehavior - NoSwap should be the default", 1067 swapDisabledOnNode: true, 1068 cgroupVersion: cgroupV2, 1069 qosClass: v1.PodQOSBestEffort, 1070 nodeSwapFeatureGateEnabled: true, 1071 swapBehavior: "", 1072 }, 1073 1074 // With Guaranteed and Best-effort QoS 1075 { 1076 name: "Swap disabled on node, Best-effort QoS, cgroups v2, LimitedSwap", 1077 swapDisabledOnNode: true, 1078 cgroupVersion: cgroupV2, 1079 qosClass: v1.PodQOSBurstable, 1080 nodeSwapFeatureGateEnabled: true, 1081 swapBehavior: types.LimitedSwap, 1082 }, 1083 { 1084 name: "Swap disabled on node, Guaranteed QoS, cgroups v2, LimitedSwap", 1085 swapDisabledOnNode: true, 1086 cgroupVersion: cgroupV2, 1087 qosClass: v1.PodQOSGuaranteed, 1088 nodeSwapFeatureGateEnabled: true, 1089 swapBehavior: types.LimitedSwap, 1090 }, 1091 1092 // With a "guaranteed" container (when memory requests equal to limits) 1093 { 1094 name: "Swap disabled on node, Burstable QoS, cgroups v2, LimitedSwap, with a guaranteed container", 1095 swapDisabledOnNode: true, 1096 cgroupVersion: cgroupV2, 1097 qosClass: v1.PodQOSBurstable, 1098 nodeSwapFeatureGateEnabled: true, 1099 swapBehavior: types.LimitedSwap, 1100 addContainerWithoutRequests: false, 1101 addGuaranteedContainer: true, 1102 }, 1103 1104 // Swap is expected to be allocated 1105 { 1106 name: "Swap disabled on node, Burstable QoS, cgroups v2, LimitedSwap", 1107 swapDisabledOnNode: true, 1108 cgroupVersion: cgroupV2, 1109 qosClass: v1.PodQOSBurstable, 1110 nodeSwapFeatureGateEnabled: true, 1111 swapBehavior: types.LimitedSwap, 1112 addContainerWithoutRequests: false, 1113 addGuaranteedContainer: false, 1114 }, 1115 { 1116 name: "Swap disabled on node, Burstable QoS, cgroups v2, LimitedSwap, with a container with no requests", 1117 swapDisabledOnNode: true, 1118 cgroupVersion: cgroupV2, 1119 qosClass: v1.PodQOSBurstable, 1120 nodeSwapFeatureGateEnabled: true, 1121 swapBehavior: types.LimitedSwap, 1122 addContainerWithoutRequests: true, 1123 addGuaranteedContainer: false, 1124 }, 1125 } { 1126 t.Run(tc.name, func(t *testing.T) { 1127 setCgroupVersionDuringTest(tc.cgroupVersion) 1128 defer setSwapControllerAvailableDuringTest(!tc.swapDisabledOnNode)() 1129 featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeSwap, tc.nodeSwapFeatureGateEnabled) 1130 m.memorySwapBehavior = tc.swapBehavior 1131 1132 var resourceReqsC1, resourceReqsC2 v1.ResourceRequirements 1133 switch tc.qosClass { 1134 case v1.PodQOSBurstable: 1135 resourceReqsC1 = v1.ResourceRequirements{ 1136 Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("1Gi")}, 1137 } 1138 1139 if !tc.addContainerWithoutRequests { 1140 resourceReqsC2 = v1.ResourceRequirements{ 1141 Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Gi")}, 1142 } 1143 1144 if tc.addGuaranteedContainer { 1145 resourceReqsC2.Limits = v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Gi")} 1146 } 1147 } 1148 case v1.PodQOSGuaranteed: 1149 resourceReqsC1 = v1.ResourceRequirements{ 1150 Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("1Gi"), v1.ResourceCPU: resource.MustParse("1")}, 1151 Limits: v1.ResourceList{v1.ResourceMemory: resource.MustParse("1Gi"), v1.ResourceCPU: resource.MustParse("1")}, 1152 } 1153 resourceReqsC2 = v1.ResourceRequirements{ 1154 Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Gi"), v1.ResourceCPU: resource.MustParse("1")}, 1155 Limits: v1.ResourceList{v1.ResourceMemory: resource.MustParse("2Gi"), v1.ResourceCPU: resource.MustParse("1")}, 1156 } 1157 } 1158 pod.Spec.Containers[0].Resources = resourceReqsC1 1159 pod.Spec.Containers[1].Resources = resourceReqsC2 1160 1161 resourcesC1 := m.generateLinuxContainerResources(pod, &pod.Spec.Containers[0], false) 1162 resourcesC2 := m.generateLinuxContainerResources(pod, &pod.Spec.Containers[1], false) 1163 1164 if tc.swapDisabledOnNode { 1165 expectSwapDisabled(tc.cgroupVersion, resourcesC1, resourcesC2) 1166 return 1167 } 1168 1169 if !tc.nodeSwapFeatureGateEnabled || tc.cgroupVersion == cgroupV1 || (tc.swapBehavior == types.LimitedSwap && tc.qosClass != v1.PodQOSBurstable) { 1170 expectNoSwap(tc.cgroupVersion, resourcesC1, resourcesC2) 1171 return 1172 } 1173 1174 if tc.swapBehavior == types.NoSwap || tc.swapBehavior == "" { 1175 expectNoSwap(tc.cgroupVersion, resourcesC1, resourcesC2) 1176 return 1177 } 1178 1179 c1ExpectedSwap := calcSwapForBurstablePods(resourceReqsC1.Requests.Memory().Value()) 1180 c2ExpectedSwap := int64(0) 1181 if !tc.addContainerWithoutRequests && !tc.addGuaranteedContainer { 1182 c2ExpectedSwap = calcSwapForBurstablePods(resourceReqsC2.Requests.Memory().Value()) 1183 } 1184 1185 expectSwap(tc.cgroupVersion, c1ExpectedSwap, resourcesC1) 1186 expectSwap(tc.cgroupVersion, c2ExpectedSwap, resourcesC2) 1187 }) 1188 } 1189 } 1190 1191 type CgroupVersion string 1192 1193 const ( 1194 cgroupV1 CgroupVersion = "v1" 1195 cgroupV2 CgroupVersion = "v2" 1196 ) 1197 1198 func setCgroupVersionDuringTest(version CgroupVersion) { 1199 isCgroup2UnifiedMode = func() bool { 1200 return version == cgroupV2 1201 } 1202 } 1203 1204 func setSwapControllerAvailableDuringTest(available bool) func() { 1205 original := swapControllerAvailable 1206 swapControllerAvailable = func() bool { 1207 return available 1208 } 1209 1210 return func() { 1211 swapControllerAvailable = original 1212 } 1213 }