k8s.io/kubernetes@v1.29.3/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go (about) 1 /* 2 Copyright 2016 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package kuberuntime 18 19 import ( 20 "context" 21 "fmt" 22 "path/filepath" 23 "reflect" 24 goruntime "runtime" 25 "sort" 26 "testing" 27 "time" 28 29 "google.golang.org/grpc/codes" 30 "google.golang.org/grpc/status" 31 32 cadvisorapi "github.com/google/cadvisor/info/v1" 33 "github.com/stretchr/testify/assert" 34 "github.com/stretchr/testify/require" 35 oteltrace "go.opentelemetry.io/otel/trace" 36 37 v1 "k8s.io/api/core/v1" 38 "k8s.io/apimachinery/pkg/api/resource" 39 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 40 "k8s.io/apimachinery/pkg/types" 41 "k8s.io/apimachinery/pkg/util/sets" 42 utilfeature "k8s.io/apiserver/pkg/util/feature" 43 "k8s.io/client-go/util/flowcontrol" 44 featuregatetesting "k8s.io/component-base/featuregate/testing" 45 runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" 46 apitest "k8s.io/cri-api/pkg/apis/testing" 47 podutil "k8s.io/kubernetes/pkg/api/v1/pod" 48 "k8s.io/kubernetes/pkg/credentialprovider" 49 "k8s.io/kubernetes/pkg/features" 50 kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" 51 containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" 52 proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" 53 ) 54 55 var ( 56 fakeCreatedAt int64 = 1 57 containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways 58 ) 59 60 func createTestRuntimeManager() (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) { 61 return customTestRuntimeManager(&credentialprovider.BasicDockerKeyring{}) 62 } 63 64 func customTestRuntimeManager(keyring *credentialprovider.BasicDockerKeyring) (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) { 65 fakeRuntimeService := apitest.NewFakeRuntimeService() 66 fakeImageService := apitest.NewFakeImageService() 67 // Only an empty machineInfo is needed here, because in unit test all containers are besteffort, 68 // data in machineInfo is not used. If burstable containers are used in unit test in the future, 69 // we may want to set memory capacity. 70 memoryCapacityQuantity := resource.MustParse(fakeNodeAllocatableMemory) 71 machineInfo := &cadvisorapi.MachineInfo{ 72 MemoryCapacity: uint64(memoryCapacityQuantity.Value()), 73 } 74 osInterface := &containertest.FakeOS{} 75 manager, err := newFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring, oteltrace.NewNoopTracerProvider().Tracer("")) 76 return fakeRuntimeService, fakeImageService, manager, err 77 } 78 79 // sandboxTemplate is a sandbox template to create fake sandbox. 80 type sandboxTemplate struct { 81 pod *v1.Pod 82 attempt uint32 83 createdAt int64 84 state runtimeapi.PodSandboxState 85 running bool 86 terminating bool 87 } 88 89 // containerTemplate is a container template to create fake container. 90 type containerTemplate struct { 91 pod *v1.Pod 92 container *v1.Container 93 sandboxAttempt uint32 94 attempt int 95 createdAt int64 96 state runtimeapi.ContainerState 97 } 98 99 // makeAndSetFakePod is a helper function to create and set one fake sandbox for a pod and 100 // one fake container for each of its container. 101 func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *apitest.FakeRuntimeService, 102 pod *v1.Pod) (*apitest.FakePodSandbox, []*apitest.FakeContainer) { 103 sandbox := makeFakePodSandbox(t, m, sandboxTemplate{ 104 pod: pod, 105 createdAt: fakeCreatedAt, 106 state: runtimeapi.PodSandboxState_SANDBOX_READY, 107 }) 108 109 var containers []*apitest.FakeContainer 110 newTemplate := func(c *v1.Container) containerTemplate { 111 return containerTemplate{ 112 pod: pod, 113 container: c, 114 createdAt: fakeCreatedAt, 115 state: runtimeapi.ContainerState_CONTAINER_RUNNING, 116 } 117 } 118 podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool { 119 containers = append(containers, makeFakeContainer(t, m, newTemplate(c))) 120 return true 121 }) 122 123 fakeRuntime.SetFakeSandboxes([]*apitest.FakePodSandbox{sandbox}) 124 fakeRuntime.SetFakeContainers(containers) 125 return sandbox, containers 126 } 127 128 // makeFakePodSandbox creates a fake pod sandbox based on a sandbox template. 129 func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template sandboxTemplate) *apitest.FakePodSandbox { 130 config, err := m.generatePodSandboxConfig(template.pod, template.attempt) 131 assert.NoError(t, err, "generatePodSandboxConfig for sandbox template %+v", template) 132 133 podSandboxID := apitest.BuildSandboxName(config.Metadata) 134 podSandBoxStatus := &apitest.FakePodSandbox{ 135 PodSandboxStatus: runtimeapi.PodSandboxStatus{ 136 Id: podSandboxID, 137 Metadata: config.Metadata, 138 State: template.state, 139 CreatedAt: template.createdAt, 140 Network: &runtimeapi.PodSandboxNetworkStatus{ 141 Ip: apitest.FakePodSandboxIPs[0], 142 }, 143 Labels: config.Labels, 144 }, 145 } 146 // assign additional IPs 147 additionalIPs := apitest.FakePodSandboxIPs[1:] 148 additionalPodIPs := make([]*runtimeapi.PodIP, 0, len(additionalIPs)) 149 for _, ip := range additionalIPs { 150 additionalPodIPs = append(additionalPodIPs, &runtimeapi.PodIP{ 151 Ip: ip, 152 }) 153 } 154 podSandBoxStatus.Network.AdditionalIps = additionalPodIPs 155 return podSandBoxStatus 156 157 } 158 159 // makeFakePodSandboxes creates a group of fake pod sandboxes based on the sandbox templates. 160 // The function guarantees the order of the fake pod sandboxes is the same with the templates. 161 func makeFakePodSandboxes(t *testing.T, m *kubeGenericRuntimeManager, templates []sandboxTemplate) []*apitest.FakePodSandbox { 162 var fakePodSandboxes []*apitest.FakePodSandbox 163 for _, template := range templates { 164 fakePodSandboxes = append(fakePodSandboxes, makeFakePodSandbox(t, m, template)) 165 } 166 return fakePodSandboxes 167 } 168 169 // makeFakeContainer creates a fake container based on a container template. 170 func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer { 171 ctx := context.Background() 172 sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt) 173 assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template) 174 175 containerConfig, _, err := m.generateContainerConfig(ctx, template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil) 176 assert.NoError(t, err, "generateContainerConfig for container template %+v", template) 177 178 podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata) 179 containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID) 180 imageRef := containerConfig.Image.Image 181 return &apitest.FakeContainer{ 182 ContainerStatus: runtimeapi.ContainerStatus{ 183 Id: containerID, 184 Metadata: containerConfig.Metadata, 185 Image: containerConfig.Image, 186 ImageRef: imageRef, 187 CreatedAt: template.createdAt, 188 State: template.state, 189 Labels: containerConfig.Labels, 190 Annotations: containerConfig.Annotations, 191 LogPath: filepath.Join(sandboxConfig.GetLogDirectory(), containerConfig.GetLogPath()), 192 }, 193 SandboxID: podSandboxID, 194 } 195 } 196 197 // makeFakeContainers creates a group of fake containers based on the container templates. 198 // The function guarantees the order of the fake containers is the same with the templates. 199 func makeFakeContainers(t *testing.T, m *kubeGenericRuntimeManager, templates []containerTemplate) []*apitest.FakeContainer { 200 var fakeContainers []*apitest.FakeContainer 201 for _, template := range templates { 202 fakeContainers = append(fakeContainers, makeFakeContainer(t, m, template)) 203 } 204 return fakeContainers 205 } 206 207 // makeTestContainer creates a test api container. 208 func makeTestContainer(name, image string) v1.Container { 209 return v1.Container{ 210 Name: name, 211 Image: image, 212 } 213 } 214 215 // makeTestPod creates a test api pod. 216 func makeTestPod(podName, podNamespace, podUID string, containers []v1.Container) *v1.Pod { 217 return &v1.Pod{ 218 ObjectMeta: metav1.ObjectMeta{ 219 UID: types.UID(podUID), 220 Name: podName, 221 Namespace: podNamespace, 222 }, 223 Spec: v1.PodSpec{ 224 Containers: containers, 225 }, 226 } 227 } 228 229 // verifyPods returns true if the two pod slices are equal. 230 func verifyPods(a, b []*kubecontainer.Pod) bool { 231 if len(a) != len(b) { 232 return false 233 } 234 235 // Sort the containers within a pod. 236 for i := range a { 237 sort.Sort(containersByID(a[i].Containers)) 238 } 239 for i := range b { 240 sort.Sort(containersByID(b[i].Containers)) 241 } 242 243 // Sort the pods by UID. 244 sort.Sort(podsByID(a)) 245 sort.Sort(podsByID(b)) 246 247 return reflect.DeepEqual(a, b) 248 } 249 250 func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected sets.String) (sets.String, bool) { 251 actual := sets.NewString() 252 for _, c := range fakeRuntime.Containers { 253 actual.Insert(c.Id) 254 } 255 return actual, actual.Equal(expected) 256 } 257 258 // Only extract the fields of interests. 259 type cRecord struct { 260 name string 261 attempt uint32 262 state runtimeapi.ContainerState 263 } 264 265 type cRecordList []*cRecord 266 267 func (b cRecordList) Len() int { return len(b) } 268 func (b cRecordList) Swap(i, j int) { b[i], b[j] = b[j], b[i] } 269 func (b cRecordList) Less(i, j int) bool { 270 if b[i].name != b[j].name { 271 return b[i].name < b[j].name 272 } 273 return b[i].attempt < b[j].attempt 274 } 275 276 func verifyContainerStatuses(t *testing.T, runtime *apitest.FakeRuntimeService, expected []*cRecord, desc string) { 277 actual := []*cRecord{} 278 for _, cStatus := range runtime.Containers { 279 actual = append(actual, &cRecord{name: cStatus.Metadata.Name, attempt: cStatus.Metadata.Attempt, state: cStatus.State}) 280 } 281 sort.Sort(cRecordList(expected)) 282 sort.Sort(cRecordList(actual)) 283 assert.Equal(t, expected, actual, desc) 284 } 285 286 func TestNewKubeRuntimeManager(t *testing.T) { 287 _, _, _, err := createTestRuntimeManager() 288 assert.NoError(t, err) 289 } 290 291 func TestVersion(t *testing.T) { 292 ctx := context.Background() 293 _, _, m, err := createTestRuntimeManager() 294 assert.NoError(t, err) 295 296 version, err := m.Version(ctx) 297 assert.NoError(t, err) 298 assert.Equal(t, kubeRuntimeAPIVersion, version.String()) 299 } 300 301 func TestContainerRuntimeType(t *testing.T) { 302 _, _, m, err := createTestRuntimeManager() 303 assert.NoError(t, err) 304 305 runtimeType := m.Type() 306 assert.Equal(t, apitest.FakeRuntimeName, runtimeType) 307 } 308 309 func TestGetPodStatus(t *testing.T) { 310 ctx := context.Background() 311 fakeRuntime, _, m, err := createTestRuntimeManager() 312 assert.NoError(t, err) 313 314 containers := []v1.Container{ 315 { 316 Name: "foo1", 317 Image: "busybox", 318 ImagePullPolicy: v1.PullIfNotPresent, 319 }, 320 { 321 Name: "foo2", 322 Image: "busybox", 323 ImagePullPolicy: v1.PullIfNotPresent, 324 }, 325 } 326 pod := &v1.Pod{ 327 ObjectMeta: metav1.ObjectMeta{ 328 UID: "12345678", 329 Name: "foo", 330 Namespace: "new", 331 }, 332 Spec: v1.PodSpec{ 333 Containers: containers, 334 }, 335 } 336 337 // Set fake sandbox and faked containers to fakeRuntime. 338 makeAndSetFakePod(t, m, fakeRuntime, pod) 339 340 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) 341 assert.NoError(t, err) 342 assert.Equal(t, pod.UID, podStatus.ID) 343 assert.Equal(t, pod.Name, podStatus.Name) 344 assert.Equal(t, pod.Namespace, podStatus.Namespace) 345 assert.Equal(t, apitest.FakePodSandboxIPs, podStatus.IPs) 346 } 347 348 func TestStopContainerWithNotFoundError(t *testing.T) { 349 ctx := context.Background() 350 fakeRuntime, _, m, err := createTestRuntimeManager() 351 assert.NoError(t, err) 352 353 containers := []v1.Container{ 354 { 355 Name: "foo1", 356 Image: "busybox", 357 ImagePullPolicy: v1.PullIfNotPresent, 358 }, 359 { 360 Name: "foo2", 361 Image: "busybox", 362 ImagePullPolicy: v1.PullIfNotPresent, 363 }, 364 } 365 pod := &v1.Pod{ 366 ObjectMeta: metav1.ObjectMeta{ 367 UID: "12345678", 368 Name: "foo", 369 Namespace: "new", 370 }, 371 Spec: v1.PodSpec{ 372 Containers: containers, 373 }, 374 } 375 376 // Set fake sandbox and faked containers to fakeRuntime. 377 makeAndSetFakePod(t, m, fakeRuntime, pod) 378 fakeRuntime.InjectError("StopContainer", status.Error(codes.NotFound, "No such container")) 379 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) 380 require.NoError(t, err) 381 p := kubecontainer.ConvertPodStatusToRunningPod("", podStatus) 382 gracePeriod := int64(1) 383 err = m.KillPod(ctx, pod, p, &gracePeriod) 384 require.NoError(t, err) 385 } 386 387 func TestGetPodStatusWithNotFoundError(t *testing.T) { 388 ctx := context.Background() 389 fakeRuntime, _, m, err := createTestRuntimeManager() 390 assert.NoError(t, err) 391 392 containers := []v1.Container{ 393 { 394 Name: "foo1", 395 Image: "busybox", 396 ImagePullPolicy: v1.PullIfNotPresent, 397 }, 398 { 399 Name: "foo2", 400 Image: "busybox", 401 ImagePullPolicy: v1.PullIfNotPresent, 402 }, 403 } 404 pod := &v1.Pod{ 405 ObjectMeta: metav1.ObjectMeta{ 406 UID: "12345678", 407 Name: "foo", 408 Namespace: "new", 409 }, 410 Spec: v1.PodSpec{ 411 Containers: containers, 412 }, 413 } 414 415 // Set fake sandbox and faked containers to fakeRuntime. 416 makeAndSetFakePod(t, m, fakeRuntime, pod) 417 fakeRuntime.InjectError("ContainerStatus", status.Error(codes.NotFound, "No such container")) 418 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) 419 require.NoError(t, err) 420 require.Equal(t, pod.UID, podStatus.ID) 421 require.Equal(t, pod.Name, podStatus.Name) 422 require.Equal(t, pod.Namespace, podStatus.Namespace) 423 require.Equal(t, apitest.FakePodSandboxIPs, podStatus.IPs) 424 } 425 426 func TestGetPods(t *testing.T) { 427 ctx := context.Background() 428 fakeRuntime, _, m, err := createTestRuntimeManager() 429 assert.NoError(t, err) 430 431 pod := &v1.Pod{ 432 ObjectMeta: metav1.ObjectMeta{ 433 UID: "12345678", 434 Name: "foo", 435 Namespace: "new", 436 }, 437 Spec: v1.PodSpec{ 438 Containers: []v1.Container{ 439 { 440 Name: "foo1", 441 Image: "busybox", 442 }, 443 { 444 Name: "foo2", 445 Image: "busybox", 446 }, 447 }, 448 }, 449 } 450 451 // Set fake sandbox and fake containers to fakeRuntime. 452 fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod) 453 454 // Convert the fakeContainers to kubecontainer.Container 455 containers := make([]*kubecontainer.Container, len(fakeContainers)) 456 for i := range containers { 457 fakeContainer := fakeContainers[i] 458 c, err := m.toKubeContainer(&runtimeapi.Container{ 459 Id: fakeContainer.Id, 460 Metadata: fakeContainer.Metadata, 461 State: fakeContainer.State, 462 Image: fakeContainer.Image, 463 ImageRef: fakeContainer.ImageRef, 464 Labels: fakeContainer.Labels, 465 Annotations: fakeContainer.Annotations, 466 }) 467 if err != nil { 468 t.Fatalf("unexpected error %v", err) 469 } 470 containers[i] = c 471 } 472 // Convert fakeSandbox to kubecontainer.Container 473 sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{ 474 Id: fakeSandbox.Id, 475 Metadata: fakeSandbox.Metadata, 476 State: fakeSandbox.State, 477 CreatedAt: fakeSandbox.CreatedAt, 478 Labels: fakeSandbox.Labels, 479 Annotations: fakeSandbox.Annotations, 480 }) 481 if err != nil { 482 t.Fatalf("unexpected error %v", err) 483 } 484 485 expected := []*kubecontainer.Pod{ 486 { 487 ID: types.UID("12345678"), 488 Name: "foo", 489 Namespace: "new", 490 CreatedAt: uint64(fakeSandbox.CreatedAt), 491 Containers: []*kubecontainer.Container{containers[0], containers[1]}, 492 Sandboxes: []*kubecontainer.Container{sandbox}, 493 }, 494 } 495 496 actual, err := m.GetPods(ctx, false) 497 assert.NoError(t, err) 498 499 if !verifyPods(expected, actual) { 500 t.Errorf("expected %#v, got %#v", expected, actual) 501 } 502 } 503 504 func TestGetPodsSorted(t *testing.T) { 505 ctx := context.Background() 506 fakeRuntime, _, m, err := createTestRuntimeManager() 507 assert.NoError(t, err) 508 509 pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}} 510 511 createdTimestamps := []uint64{10, 5, 20} 512 fakeSandboxes := []*apitest.FakePodSandbox{} 513 for i, createdAt := range createdTimestamps { 514 pod.UID = types.UID(fmt.Sprint(i)) 515 fakeSandboxes = append(fakeSandboxes, makeFakePodSandbox(t, m, sandboxTemplate{ 516 pod: pod, 517 createdAt: int64(createdAt), 518 state: runtimeapi.PodSandboxState_SANDBOX_READY, 519 })) 520 } 521 fakeRuntime.SetFakeSandboxes(fakeSandboxes) 522 523 actual, err := m.GetPods(ctx, false) 524 assert.NoError(t, err) 525 526 assert.Len(t, actual, 3) 527 528 // Verify that the pods are sorted by their creation time (newest/biggest timestamp first) 529 assert.Equal(t, uint64(createdTimestamps[2]), actual[0].CreatedAt) 530 assert.Equal(t, uint64(createdTimestamps[0]), actual[1].CreatedAt) 531 assert.Equal(t, uint64(createdTimestamps[1]), actual[2].CreatedAt) 532 } 533 534 func TestKillPod(t *testing.T) { 535 ctx := context.Background() 536 fakeRuntime, _, m, err := createTestRuntimeManager() 537 assert.NoError(t, err) 538 539 pod := &v1.Pod{ 540 ObjectMeta: metav1.ObjectMeta{ 541 UID: "12345678", 542 Name: "foo", 543 Namespace: "new", 544 }, 545 Spec: v1.PodSpec{ 546 Containers: []v1.Container{ 547 { 548 Name: "foo1", 549 Image: "busybox", 550 }, 551 { 552 Name: "foo2", 553 Image: "busybox", 554 }, 555 }, 556 EphemeralContainers: []v1.EphemeralContainer{ 557 { 558 EphemeralContainerCommon: v1.EphemeralContainerCommon{ 559 Name: "debug", 560 Image: "busybox", 561 }, 562 }, 563 }, 564 }, 565 } 566 567 // Set fake sandbox and fake containers to fakeRuntime. 568 fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod) 569 570 // Convert the fakeContainers to kubecontainer.Container 571 containers := make([]*kubecontainer.Container, len(fakeContainers)) 572 for i := range containers { 573 fakeContainer := fakeContainers[i] 574 c, err := m.toKubeContainer(&runtimeapi.Container{ 575 Id: fakeContainer.Id, 576 Metadata: fakeContainer.Metadata, 577 State: fakeContainer.State, 578 Image: fakeContainer.Image, 579 ImageRef: fakeContainer.ImageRef, 580 Labels: fakeContainer.Labels, 581 }) 582 if err != nil { 583 t.Fatalf("unexpected error %v", err) 584 } 585 containers[i] = c 586 } 587 runningPod := kubecontainer.Pod{ 588 ID: pod.UID, 589 Name: pod.Name, 590 Namespace: pod.Namespace, 591 Containers: []*kubecontainer.Container{containers[0], containers[1], containers[2]}, 592 Sandboxes: []*kubecontainer.Container{ 593 { 594 ID: kubecontainer.ContainerID{ 595 ID: fakeSandbox.Id, 596 Type: apitest.FakeRuntimeName, 597 }, 598 }, 599 }, 600 } 601 602 err = m.KillPod(ctx, pod, runningPod, nil) 603 assert.NoError(t, err) 604 assert.Equal(t, 3, len(fakeRuntime.Containers)) 605 assert.Equal(t, 1, len(fakeRuntime.Sandboxes)) 606 for _, sandbox := range fakeRuntime.Sandboxes { 607 assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State) 608 } 609 for _, c := range fakeRuntime.Containers { 610 assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.State) 611 } 612 } 613 614 func TestSyncPod(t *testing.T) { 615 fakeRuntime, fakeImage, m, err := createTestRuntimeManager() 616 assert.NoError(t, err) 617 618 containers := []v1.Container{ 619 { 620 Name: "foo1", 621 Image: "busybox", 622 ImagePullPolicy: v1.PullIfNotPresent, 623 }, 624 { 625 Name: "foo2", 626 Image: "alpine", 627 ImagePullPolicy: v1.PullIfNotPresent, 628 }, 629 } 630 pod := &v1.Pod{ 631 ObjectMeta: metav1.ObjectMeta{ 632 UID: "12345678", 633 Name: "foo", 634 Namespace: "new", 635 }, 636 Spec: v1.PodSpec{ 637 Containers: containers, 638 }, 639 } 640 641 backOff := flowcontrol.NewBackOff(time.Second, time.Minute) 642 result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) 643 assert.NoError(t, result.Error()) 644 assert.Equal(t, 2, len(fakeRuntime.Containers)) 645 assert.Equal(t, 2, len(fakeImage.Images)) 646 assert.Equal(t, 1, len(fakeRuntime.Sandboxes)) 647 for _, sandbox := range fakeRuntime.Sandboxes { 648 assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State) 649 } 650 for _, c := range fakeRuntime.Containers { 651 assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State) 652 } 653 } 654 655 func TestSyncPodWithConvertedPodSysctls(t *testing.T) { 656 fakeRuntime, _, m, err := createTestRuntimeManager() 657 assert.NoError(t, err) 658 659 containers := []v1.Container{ 660 { 661 Name: "foo", 662 Image: "busybox", 663 ImagePullPolicy: v1.PullIfNotPresent, 664 }, 665 } 666 667 securityContext := &v1.PodSecurityContext{ 668 Sysctls: []v1.Sysctl{ 669 { 670 Name: "kernel/shm_rmid_forced", 671 Value: "1", 672 }, 673 { 674 Name: "net/ipv4/ip_local_port_range", 675 Value: "1024 65535", 676 }, 677 }, 678 } 679 exceptSysctls := []v1.Sysctl{ 680 { 681 Name: "kernel.shm_rmid_forced", 682 Value: "1", 683 }, 684 { 685 Name: "net.ipv4.ip_local_port_range", 686 Value: "1024 65535", 687 }, 688 } 689 pod := &v1.Pod{ 690 ObjectMeta: metav1.ObjectMeta{ 691 UID: "12345678", 692 Name: "foo", 693 Namespace: "new", 694 }, 695 Spec: v1.PodSpec{ 696 Containers: containers, 697 SecurityContext: securityContext, 698 }, 699 } 700 701 backOff := flowcontrol.NewBackOff(time.Second, time.Minute) 702 result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff) 703 assert.NoError(t, result.Error()) 704 assert.Equal(t, exceptSysctls, pod.Spec.SecurityContext.Sysctls) 705 for _, sandbox := range fakeRuntime.Sandboxes { 706 assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State) 707 } 708 for _, c := range fakeRuntime.Containers { 709 assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State) 710 } 711 } 712 713 func TestPruneInitContainers(t *testing.T) { 714 ctx := context.Background() 715 fakeRuntime, _, m, err := createTestRuntimeManager() 716 assert.NoError(t, err) 717 718 init1 := makeTestContainer("init1", "busybox") 719 init2 := makeTestContainer("init2", "busybox") 720 pod := &v1.Pod{ 721 ObjectMeta: metav1.ObjectMeta{ 722 UID: "12345678", 723 Name: "foo", 724 Namespace: "new", 725 }, 726 Spec: v1.PodSpec{ 727 InitContainers: []v1.Container{init1, init2}, 728 }, 729 } 730 731 templates := []containerTemplate{ 732 {pod: pod, container: &init1, attempt: 3, createdAt: 3, state: runtimeapi.ContainerState_CONTAINER_EXITED}, 733 {pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeapi.ContainerState_CONTAINER_EXITED}, 734 {pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED}, 735 {pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_UNKNOWN}, 736 {pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, 737 {pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, 738 } 739 fakes := makeFakeContainers(t, m, templates) 740 fakeRuntime.SetFakeContainers(fakes) 741 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) 742 assert.NoError(t, err) 743 744 m.pruneInitContainersBeforeStart(ctx, pod, podStatus) 745 expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id) 746 if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok { 747 t.Errorf("expected %v, got %v", expectedContainers, actual) 748 } 749 } 750 751 func TestSyncPodWithInitContainers(t *testing.T) { 752 ctx := context.Background() 753 fakeRuntime, _, m, err := createTestRuntimeManager() 754 assert.NoError(t, err) 755 756 initContainers := []v1.Container{ 757 { 758 Name: "init1", 759 Image: "init", 760 ImagePullPolicy: v1.PullIfNotPresent, 761 }, 762 } 763 containers := []v1.Container{ 764 { 765 Name: "foo1", 766 Image: "busybox", 767 ImagePullPolicy: v1.PullIfNotPresent, 768 }, 769 { 770 Name: "foo2", 771 Image: "alpine", 772 ImagePullPolicy: v1.PullIfNotPresent, 773 }, 774 } 775 pod := &v1.Pod{ 776 ObjectMeta: metav1.ObjectMeta{ 777 UID: "12345678", 778 Name: "foo", 779 Namespace: "new", 780 }, 781 Spec: v1.PodSpec{ 782 Containers: containers, 783 InitContainers: initContainers, 784 }, 785 } 786 787 backOff := flowcontrol.NewBackOff(time.Second, time.Minute) 788 789 // 1. should only create the init container. 790 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) 791 assert.NoError(t, err) 792 result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff) 793 assert.NoError(t, result.Error()) 794 expected := []*cRecord{ 795 {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, 796 } 797 verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container") 798 799 // 2. should not create app container because init container is still running. 800 podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) 801 assert.NoError(t, err) 802 result = m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff) 803 assert.NoError(t, result.Error()) 804 verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing") 805 806 // 3. should create all app containers because init container finished. 807 // Stop init container instance 0. 808 sandboxIDs, err := m.getSandboxIDByPodUID(ctx, pod.UID, nil) 809 require.NoError(t, err) 810 sandboxID := sandboxIDs[0] 811 initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0) 812 require.NoError(t, err) 813 fakeRuntime.StopContainer(ctx, initID0, 0) 814 // Sync again. 815 podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) 816 assert.NoError(t, err) 817 result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff) 818 assert.NoError(t, result.Error()) 819 expected = []*cRecord{ 820 {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, 821 {name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, 822 {name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, 823 } 824 verifyContainerStatuses(t, fakeRuntime, expected, "init container completed; all app containers should be running") 825 826 // 4. should restart the init container if needed to create a new podsandbox 827 // Stop the pod sandbox. 828 fakeRuntime.StopPodSandbox(ctx, sandboxID) 829 // Sync again. 830 podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) 831 assert.NoError(t, err) 832 result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff) 833 assert.NoError(t, result.Error()) 834 expected = []*cRecord{ 835 // The first init container instance is purged and no longer visible. 836 // The second (attempt == 1) instance has been started and is running. 837 {name: initContainers[0].Name, attempt: 1, state: runtimeapi.ContainerState_CONTAINER_RUNNING}, 838 // All containers are killed. 839 {name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, 840 {name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED}, 841 } 842 verifyContainerStatuses(t, fakeRuntime, expected, "kill all app containers, purge the existing init container, and restart a new one") 843 } 844 845 // A helper function to get a basic pod and its status assuming all sandbox and 846 // containers are running and ready. 847 func makeBasePodAndStatus() (*v1.Pod, *kubecontainer.PodStatus) { 848 pod := &v1.Pod{ 849 ObjectMeta: metav1.ObjectMeta{ 850 UID: "12345678", 851 Name: "foo", 852 Namespace: "foo-ns", 853 }, 854 Spec: v1.PodSpec{ 855 Containers: []v1.Container{ 856 { 857 Name: "foo1", 858 Image: "busybox", 859 }, 860 { 861 Name: "foo2", 862 Image: "busybox", 863 }, 864 { 865 Name: "foo3", 866 Image: "busybox", 867 }, 868 }, 869 }, 870 Status: v1.PodStatus{ 871 ContainerStatuses: []v1.ContainerStatus{ 872 { 873 ContainerID: "://id1", 874 Name: "foo1", 875 Image: "busybox", 876 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}, 877 }, 878 { 879 ContainerID: "://id2", 880 Name: "foo2", 881 Image: "busybox", 882 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}, 883 }, 884 { 885 ContainerID: "://id3", 886 Name: "foo3", 887 Image: "busybox", 888 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}, 889 }, 890 }, 891 }, 892 } 893 status := &kubecontainer.PodStatus{ 894 ID: pod.UID, 895 Name: pod.Name, 896 Namespace: pod.Namespace, 897 SandboxStatuses: []*runtimeapi.PodSandboxStatus{ 898 { 899 Id: "sandboxID", 900 State: runtimeapi.PodSandboxState_SANDBOX_READY, 901 Metadata: &runtimeapi.PodSandboxMetadata{Name: pod.Name, Namespace: pod.Namespace, Uid: "sandboxuid", Attempt: uint32(0)}, 902 Network: &runtimeapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"}, 903 }, 904 }, 905 ContainerStatuses: []*kubecontainer.Status{ 906 { 907 ID: kubecontainer.ContainerID{ID: "id1"}, 908 Name: "foo1", State: kubecontainer.ContainerStateRunning, 909 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[0]), 910 }, 911 { 912 ID: kubecontainer.ContainerID{ID: "id2"}, 913 Name: "foo2", State: kubecontainer.ContainerStateRunning, 914 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[1]), 915 }, 916 { 917 ID: kubecontainer.ContainerID{ID: "id3"}, 918 Name: "foo3", State: kubecontainer.ContainerStateRunning, 919 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[2]), 920 }, 921 }, 922 } 923 return pod, status 924 } 925 926 func TestComputePodActions(t *testing.T) { 927 _, _, m, err := createTestRuntimeManager() 928 require.NoError(t, err) 929 930 // Creating a pair reference pod and status for the test cases to refer 931 // the specific fields. 932 basePod, baseStatus := makeBasePodAndStatus() 933 noAction := podActions{ 934 SandboxID: baseStatus.SandboxStatuses[0].Id, 935 ContainersToStart: []int{}, 936 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 937 } 938 939 for desc, test := range map[string]struct { 940 mutatePodFn func(*v1.Pod) 941 mutateStatusFn func(*kubecontainer.PodStatus) 942 actions podActions 943 resetStatusFn func(*kubecontainer.PodStatus) 944 }{ 945 "everything is good; do nothing": { 946 actions: noAction, 947 }, 948 "start pod sandbox and all containers for a new pod": { 949 mutateStatusFn: func(status *kubecontainer.PodStatus) { 950 // No container or sandbox exists. 951 status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{} 952 status.ContainerStatuses = []*kubecontainer.Status{} 953 }, 954 actions: podActions{ 955 KillPod: true, 956 CreateSandbox: true, 957 Attempt: uint32(0), 958 ContainersToStart: []int{0, 1, 2}, 959 ContainersToKill: getKillMap(basePod, baseStatus, []int{}), 960 }, 961 }, 962 "restart exited containers if RestartPolicy == Always": { 963 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 964 mutateStatusFn: func(status *kubecontainer.PodStatus) { 965 // The first container completed, restart it, 966 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited 967 status.ContainerStatuses[0].ExitCode = 0 968 969 // The second container exited with failure, restart it, 970 status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited 971 status.ContainerStatuses[1].ExitCode = 111 972 }, 973 actions: podActions{ 974 SandboxID: baseStatus.SandboxStatuses[0].Id, 975 ContainersToStart: []int{0, 1}, 976 ContainersToKill: getKillMap(basePod, baseStatus, []int{}), 977 }, 978 }, 979 "restart failed containers if RestartPolicy == OnFailure": { 980 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, 981 mutateStatusFn: func(status *kubecontainer.PodStatus) { 982 // The first container completed, don't restart it, 983 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited 984 status.ContainerStatuses[0].ExitCode = 0 985 986 // The second container exited with failure, restart it, 987 status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited 988 status.ContainerStatuses[1].ExitCode = 111 989 }, 990 actions: podActions{ 991 SandboxID: baseStatus.SandboxStatuses[0].Id, 992 ContainersToStart: []int{1}, 993 ContainersToKill: getKillMap(basePod, baseStatus, []int{}), 994 }, 995 }, 996 "don't restart containers if RestartPolicy == Never": { 997 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 998 mutateStatusFn: func(status *kubecontainer.PodStatus) { 999 // Don't restart any containers. 1000 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited 1001 status.ContainerStatuses[0].ExitCode = 0 1002 status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited 1003 status.ContainerStatuses[1].ExitCode = 111 1004 }, 1005 actions: noAction, 1006 }, 1007 "Kill pod and recreate everything if the pod sandbox is dead, and RestartPolicy == Always": { 1008 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1009 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1010 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1011 }, 1012 actions: podActions{ 1013 KillPod: true, 1014 CreateSandbox: true, 1015 SandboxID: baseStatus.SandboxStatuses[0].Id, 1016 Attempt: uint32(1), 1017 ContainersToStart: []int{0, 1, 2}, 1018 ContainersToKill: getKillMap(basePod, baseStatus, []int{}), 1019 }, 1020 }, 1021 "Kill pod and recreate all containers (except for the succeeded one) if the pod sandbox is dead, and RestartPolicy == OnFailure": { 1022 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, 1023 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1024 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1025 status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited 1026 status.ContainerStatuses[1].ExitCode = 0 1027 }, 1028 actions: podActions{ 1029 KillPod: true, 1030 CreateSandbox: true, 1031 SandboxID: baseStatus.SandboxStatuses[0].Id, 1032 Attempt: uint32(1), 1033 ContainersToStart: []int{0, 2}, 1034 ContainersToKill: getKillMap(basePod, baseStatus, []int{}), 1035 }, 1036 }, 1037 "Kill pod and recreate all containers if the PodSandbox does not have an IP": { 1038 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1039 status.SandboxStatuses[0].Network.Ip = "" 1040 }, 1041 actions: podActions{ 1042 KillPod: true, 1043 CreateSandbox: true, 1044 SandboxID: baseStatus.SandboxStatuses[0].Id, 1045 Attempt: uint32(1), 1046 ContainersToStart: []int{0, 1, 2}, 1047 ContainersToKill: getKillMap(basePod, baseStatus, []int{}), 1048 }, 1049 }, 1050 "Kill and recreate the container if the container's spec changed": { 1051 mutatePodFn: func(pod *v1.Pod) { 1052 pod.Spec.RestartPolicy = v1.RestartPolicyAlways 1053 }, 1054 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1055 status.ContainerStatuses[1].Hash = uint64(432423432) 1056 }, 1057 actions: podActions{ 1058 SandboxID: baseStatus.SandboxStatuses[0].Id, 1059 ContainersToKill: getKillMap(basePod, baseStatus, []int{1}), 1060 ContainersToStart: []int{1}, 1061 }, 1062 }, 1063 "Kill and recreate the container if the liveness check has failed": { 1064 mutatePodFn: func(pod *v1.Pod) { 1065 pod.Spec.RestartPolicy = v1.RestartPolicyAlways 1066 }, 1067 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1068 m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Failure, basePod) 1069 }, 1070 actions: podActions{ 1071 SandboxID: baseStatus.SandboxStatuses[0].Id, 1072 ContainersToKill: getKillMap(basePod, baseStatus, []int{1}), 1073 ContainersToStart: []int{1}, 1074 }, 1075 resetStatusFn: func(status *kubecontainer.PodStatus) { 1076 m.livenessManager.Remove(status.ContainerStatuses[1].ID) 1077 }, 1078 }, 1079 "Kill and recreate the container if the startup check has failed": { 1080 mutatePodFn: func(pod *v1.Pod) { 1081 pod.Spec.RestartPolicy = v1.RestartPolicyAlways 1082 }, 1083 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1084 m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Failure, basePod) 1085 }, 1086 actions: podActions{ 1087 SandboxID: baseStatus.SandboxStatuses[0].Id, 1088 ContainersToKill: getKillMap(basePod, baseStatus, []int{1}), 1089 ContainersToStart: []int{1}, 1090 }, 1091 resetStatusFn: func(status *kubecontainer.PodStatus) { 1092 m.startupManager.Remove(status.ContainerStatuses[1].ID) 1093 }, 1094 }, 1095 "Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and all containers exited": { 1096 mutatePodFn: func(pod *v1.Pod) { 1097 pod.Spec.RestartPolicy = v1.RestartPolicyNever 1098 }, 1099 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1100 // no ready sandbox 1101 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1102 status.SandboxStatuses[0].Metadata.Attempt = uint32(1) 1103 // all containers exited 1104 for i := range status.ContainerStatuses { 1105 status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited 1106 status.ContainerStatuses[i].ExitCode = 0 1107 } 1108 }, 1109 actions: podActions{ 1110 SandboxID: baseStatus.SandboxStatuses[0].Id, 1111 Attempt: uint32(2), 1112 CreateSandbox: false, 1113 KillPod: true, 1114 ContainersToStart: []int{}, 1115 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1116 }, 1117 }, 1118 "Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=OnFailure and all containers succeeded": { 1119 mutatePodFn: func(pod *v1.Pod) { 1120 pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure 1121 }, 1122 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1123 // no ready sandbox 1124 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1125 status.SandboxStatuses[0].Metadata.Attempt = uint32(1) 1126 // all containers succeeded 1127 for i := range status.ContainerStatuses { 1128 status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited 1129 status.ContainerStatuses[i].ExitCode = 0 1130 } 1131 }, 1132 actions: podActions{ 1133 SandboxID: baseStatus.SandboxStatuses[0].Id, 1134 Attempt: uint32(2), 1135 CreateSandbox: false, 1136 KillPod: true, 1137 ContainersToStart: []int{}, 1138 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1139 }, 1140 }, 1141 "Verify we create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and no containers have ever been created": { 1142 mutatePodFn: func(pod *v1.Pod) { 1143 pod.Spec.RestartPolicy = v1.RestartPolicyNever 1144 }, 1145 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1146 // no ready sandbox 1147 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1148 status.SandboxStatuses[0].Metadata.Attempt = uint32(2) 1149 // no visible containers 1150 status.ContainerStatuses = []*kubecontainer.Status{} 1151 }, 1152 actions: podActions{ 1153 SandboxID: baseStatus.SandboxStatuses[0].Id, 1154 Attempt: uint32(3), 1155 CreateSandbox: true, 1156 KillPod: true, 1157 ContainersToStart: []int{0, 1, 2}, 1158 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1159 }, 1160 }, 1161 "Kill and recreate the container if the container is in unknown state": { 1162 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1163 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1164 status.ContainerStatuses[1].State = kubecontainer.ContainerStateUnknown 1165 }, 1166 actions: podActions{ 1167 SandboxID: baseStatus.SandboxStatuses[0].Id, 1168 ContainersToKill: getKillMap(basePod, baseStatus, []int{1}), 1169 ContainersToStart: []int{1}, 1170 }, 1171 }, 1172 } { 1173 pod, status := makeBasePodAndStatus() 1174 if test.mutatePodFn != nil { 1175 test.mutatePodFn(pod) 1176 } 1177 if test.mutateStatusFn != nil { 1178 test.mutateStatusFn(status) 1179 } 1180 ctx := context.Background() 1181 actions := m.computePodActions(ctx, pod, status) 1182 verifyActions(t, &test.actions, &actions, desc) 1183 if test.resetStatusFn != nil { 1184 test.resetStatusFn(status) 1185 } 1186 } 1187 } 1188 1189 func getKillMap(pod *v1.Pod, status *kubecontainer.PodStatus, cIndexes []int) map[kubecontainer.ContainerID]containerToKillInfo { 1190 m := map[kubecontainer.ContainerID]containerToKillInfo{} 1191 for _, i := range cIndexes { 1192 m[status.ContainerStatuses[i].ID] = containerToKillInfo{ 1193 container: &pod.Spec.Containers[i], 1194 name: pod.Spec.Containers[i].Name, 1195 } 1196 } 1197 return m 1198 } 1199 1200 func getKillMapWithInitContainers(pod *v1.Pod, status *kubecontainer.PodStatus, cIndexes []int) map[kubecontainer.ContainerID]containerToKillInfo { 1201 m := map[kubecontainer.ContainerID]containerToKillInfo{} 1202 for _, i := range cIndexes { 1203 m[status.ContainerStatuses[i].ID] = containerToKillInfo{ 1204 container: &pod.Spec.InitContainers[i], 1205 name: pod.Spec.InitContainers[i].Name, 1206 } 1207 } 1208 return m 1209 } 1210 1211 func verifyActions(t *testing.T, expected, actual *podActions, desc string) { 1212 if actual.ContainersToKill != nil { 1213 // Clear the message and reason fields since we don't need to verify them. 1214 for k, info := range actual.ContainersToKill { 1215 info.message = "" 1216 info.reason = "" 1217 actual.ContainersToKill[k] = info 1218 } 1219 } 1220 assert.Equal(t, expected, actual, desc) 1221 } 1222 1223 func TestComputePodActionsWithInitContainers(t *testing.T) { 1224 t.Run("sidecar containers disabled", func(t *testing.T) { 1225 testComputePodActionsWithInitContainers(t, false) 1226 }) 1227 t.Run("sidecar containers enabled", func(t *testing.T) { 1228 testComputePodActionsWithInitContainers(t, true) 1229 }) 1230 } 1231 1232 func testComputePodActionsWithInitContainers(t *testing.T, sidecarContainersEnabled bool) { 1233 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, sidecarContainersEnabled)() 1234 _, _, m, err := createTestRuntimeManager() 1235 require.NoError(t, err) 1236 1237 // Creating a pair reference pod and status for the test cases to refer 1238 // the specific fields. 1239 basePod, baseStatus := makeBasePodAndStatusWithInitContainers() 1240 noAction := podActions{ 1241 SandboxID: baseStatus.SandboxStatuses[0].Id, 1242 ContainersToStart: []int{}, 1243 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1244 } 1245 1246 for desc, test := range map[string]struct { 1247 mutatePodFn func(*v1.Pod) 1248 mutateStatusFn func(*kubecontainer.PodStatus) 1249 actions podActions 1250 }{ 1251 "initialization completed; start all containers": { 1252 actions: podActions{ 1253 SandboxID: baseStatus.SandboxStatuses[0].Id, 1254 ContainersToStart: []int{0, 1, 2}, 1255 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1256 }, 1257 }, 1258 "no init containers have been started; start the first one": { 1259 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1260 status.ContainerStatuses = nil 1261 }, 1262 actions: podActions{ 1263 SandboxID: baseStatus.SandboxStatuses[0].Id, 1264 NextInitContainerToStart: &basePod.Spec.InitContainers[0], 1265 InitContainersToStart: []int{0}, 1266 ContainersToStart: []int{}, 1267 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1268 }, 1269 }, 1270 "initialization in progress; do nothing": { 1271 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1272 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1273 status.ContainerStatuses[2].State = kubecontainer.ContainerStateRunning 1274 }, 1275 actions: noAction, 1276 }, 1277 "Kill pod and restart the first init container if the pod sandbox is dead": { 1278 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1279 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1280 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1281 }, 1282 actions: podActions{ 1283 KillPod: true, 1284 CreateSandbox: true, 1285 SandboxID: baseStatus.SandboxStatuses[0].Id, 1286 Attempt: uint32(1), 1287 NextInitContainerToStart: &basePod.Spec.InitContainers[0], 1288 InitContainersToStart: []int{0}, 1289 ContainersToStart: []int{}, 1290 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1291 }, 1292 }, 1293 "initialization failed; restart the last init container if RestartPolicy == Always": { 1294 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1295 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1296 status.ContainerStatuses[2].ExitCode = 137 1297 }, 1298 actions: podActions{ 1299 SandboxID: baseStatus.SandboxStatuses[0].Id, 1300 NextInitContainerToStart: &basePod.Spec.InitContainers[2], 1301 InitContainersToStart: []int{2}, 1302 ContainersToStart: []int{}, 1303 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1304 }, 1305 }, 1306 "initialization failed; restart the last init container if RestartPolicy == OnFailure": { 1307 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, 1308 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1309 status.ContainerStatuses[2].ExitCode = 137 1310 }, 1311 actions: podActions{ 1312 SandboxID: baseStatus.SandboxStatuses[0].Id, 1313 NextInitContainerToStart: &basePod.Spec.InitContainers[2], 1314 InitContainersToStart: []int{2}, 1315 ContainersToStart: []int{}, 1316 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1317 }, 1318 }, 1319 "initialization failed; kill pod if RestartPolicy == Never": { 1320 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1321 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1322 status.ContainerStatuses[2].ExitCode = 137 1323 }, 1324 actions: podActions{ 1325 KillPod: true, 1326 SandboxID: baseStatus.SandboxStatuses[0].Id, 1327 ContainersToStart: []int{}, 1328 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1329 }, 1330 }, 1331 "init container state unknown; kill and recreate the last init container if RestartPolicy == Always": { 1332 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1333 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1334 status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown 1335 }, 1336 actions: podActions{ 1337 SandboxID: baseStatus.SandboxStatuses[0].Id, 1338 NextInitContainerToStart: &basePod.Spec.InitContainers[2], 1339 InitContainersToStart: []int{2}, 1340 ContainersToStart: []int{}, 1341 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}), 1342 }, 1343 }, 1344 "init container state unknown; kill and recreate the last init container if RestartPolicy == OnFailure": { 1345 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, 1346 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1347 status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown 1348 }, 1349 actions: podActions{ 1350 SandboxID: baseStatus.SandboxStatuses[0].Id, 1351 NextInitContainerToStart: &basePod.Spec.InitContainers[2], 1352 InitContainersToStart: []int{2}, 1353 ContainersToStart: []int{}, 1354 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}), 1355 }, 1356 }, 1357 "init container state unknown; kill pod if RestartPolicy == Never": { 1358 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1359 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1360 status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown 1361 }, 1362 actions: podActions{ 1363 KillPod: true, 1364 SandboxID: baseStatus.SandboxStatuses[0].Id, 1365 ContainersToStart: []int{}, 1366 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1367 }, 1368 }, 1369 "Pod sandbox not ready, init container failed, but RestartPolicy == Never; kill pod only": { 1370 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1371 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1372 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1373 }, 1374 actions: podActions{ 1375 KillPod: true, 1376 CreateSandbox: false, 1377 SandboxID: baseStatus.SandboxStatuses[0].Id, 1378 Attempt: uint32(1), 1379 ContainersToStart: []int{}, 1380 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1381 }, 1382 }, 1383 "Pod sandbox not ready, and RestartPolicy == Never, but no visible init containers; create a new pod sandbox": { 1384 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1385 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1386 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1387 status.ContainerStatuses = []*kubecontainer.Status{} 1388 }, 1389 actions: podActions{ 1390 KillPod: true, 1391 CreateSandbox: true, 1392 SandboxID: baseStatus.SandboxStatuses[0].Id, 1393 Attempt: uint32(1), 1394 NextInitContainerToStart: &basePod.Spec.InitContainers[0], 1395 InitContainersToStart: []int{0}, 1396 ContainersToStart: []int{}, 1397 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1398 }, 1399 }, 1400 "Pod sandbox not ready, init container failed, and RestartPolicy == OnFailure; create a new pod sandbox": { 1401 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, 1402 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1403 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1404 status.ContainerStatuses[2].ExitCode = 137 1405 }, 1406 actions: podActions{ 1407 KillPod: true, 1408 CreateSandbox: true, 1409 SandboxID: baseStatus.SandboxStatuses[0].Id, 1410 Attempt: uint32(1), 1411 NextInitContainerToStart: &basePod.Spec.InitContainers[0], 1412 InitContainersToStart: []int{0}, 1413 ContainersToStart: []int{}, 1414 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1415 }, 1416 }, 1417 "some of the init container statuses are missing but the last init container is running, don't restart preceding ones": { 1418 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1419 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1420 status.ContainerStatuses[2].State = kubecontainer.ContainerStateRunning 1421 status.ContainerStatuses = status.ContainerStatuses[2:] 1422 }, 1423 actions: podActions{ 1424 KillPod: false, 1425 SandboxID: baseStatus.SandboxStatuses[0].Id, 1426 ContainersToStart: []int{}, 1427 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1428 }, 1429 }, 1430 } { 1431 pod, status := makeBasePodAndStatusWithInitContainers() 1432 if test.mutatePodFn != nil { 1433 test.mutatePodFn(pod) 1434 } 1435 if test.mutateStatusFn != nil { 1436 test.mutateStatusFn(status) 1437 } 1438 ctx := context.Background() 1439 actions := m.computePodActions(ctx, pod, status) 1440 if !sidecarContainersEnabled { 1441 // If sidecar containers are disabled, we should not see any 1442 // InitContainersToStart in the actions. 1443 test.actions.InitContainersToStart = nil 1444 } else { 1445 // If sidecar containers are enabled, we should not see any 1446 // NextInitContainerToStart in the actions. 1447 test.actions.NextInitContainerToStart = nil 1448 } 1449 verifyActions(t, &test.actions, &actions, desc) 1450 } 1451 } 1452 1453 func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus) { 1454 pod, status := makeBasePodAndStatus() 1455 pod.Spec.InitContainers = []v1.Container{ 1456 { 1457 Name: "init1", 1458 Image: "bar-image", 1459 }, 1460 { 1461 Name: "init2", 1462 Image: "bar-image", 1463 }, 1464 { 1465 Name: "init3", 1466 Image: "bar-image", 1467 }, 1468 } 1469 // Replace the original statuses of the containers with those for the init 1470 // containers. 1471 status.ContainerStatuses = []*kubecontainer.Status{ 1472 { 1473 ID: kubecontainer.ContainerID{ID: "initid1"}, 1474 Name: "init1", State: kubecontainer.ContainerStateExited, 1475 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]), 1476 }, 1477 { 1478 ID: kubecontainer.ContainerID{ID: "initid2"}, 1479 Name: "init2", State: kubecontainer.ContainerStateExited, 1480 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]), 1481 }, 1482 { 1483 ID: kubecontainer.ContainerID{ID: "initid3"}, 1484 Name: "init3", State: kubecontainer.ContainerStateExited, 1485 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]), 1486 }, 1487 } 1488 return pod, status 1489 } 1490 1491 func TestComputePodActionsWithRestartableInitContainers(t *testing.T) { 1492 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)() 1493 _, _, m, err := createTestRuntimeManager() 1494 require.NoError(t, err) 1495 1496 // Creating a pair reference pod and status for the test cases to refer 1497 // the specific fields. 1498 basePod, baseStatus := makeBasePodAndStatusWithRestartableInitContainers() 1499 noAction := podActions{ 1500 SandboxID: baseStatus.SandboxStatuses[0].Id, 1501 ContainersToStart: []int{}, 1502 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1503 } 1504 1505 for desc, test := range map[string]struct { 1506 mutatePodFn func(*v1.Pod) 1507 mutateStatusFn func(*v1.Pod, *kubecontainer.PodStatus) 1508 actions podActions 1509 resetStatusFn func(*kubecontainer.PodStatus) 1510 }{ 1511 "initialization completed; start all containers": { 1512 actions: podActions{ 1513 SandboxID: baseStatus.SandboxStatuses[0].Id, 1514 ContainersToStart: []int{0, 1, 2}, 1515 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1516 }, 1517 }, 1518 "no init containers have been started; start the first one": { 1519 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1520 status.ContainerStatuses = nil 1521 }, 1522 actions: podActions{ 1523 SandboxID: baseStatus.SandboxStatuses[0].Id, 1524 InitContainersToStart: []int{0}, 1525 ContainersToStart: []int{}, 1526 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1527 }, 1528 }, 1529 "initialization in progress; do nothing": { 1530 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1531 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1532 status.ContainerStatuses[2].State = kubecontainer.ContainerStateCreated 1533 }, 1534 actions: noAction, 1535 }, 1536 "restartable init container has started; start the next": { 1537 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1538 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1539 status.ContainerStatuses = status.ContainerStatuses[:1] 1540 }, 1541 actions: podActions{ 1542 SandboxID: baseStatus.SandboxStatuses[0].Id, 1543 InitContainersToStart: []int{1}, 1544 ContainersToStart: []int{}, 1545 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1546 }, 1547 }, 1548 "livenessProbe has not been run; start the nothing": { 1549 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1550 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1551 m.livenessManager.Remove(status.ContainerStatuses[1].ID) 1552 status.ContainerStatuses = status.ContainerStatuses[:2] 1553 }, 1554 actions: podActions{ 1555 SandboxID: baseStatus.SandboxStatuses[0].Id, 1556 InitContainersToStart: []int{2}, 1557 ContainersToStart: []int{}, 1558 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1559 }, 1560 }, 1561 "livenessProbe in progress; start the next": { 1562 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1563 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1564 m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Unknown, basePod) 1565 status.ContainerStatuses = status.ContainerStatuses[:2] 1566 }, 1567 actions: podActions{ 1568 SandboxID: baseStatus.SandboxStatuses[0].Id, 1569 InitContainersToStart: []int{2}, 1570 ContainersToStart: []int{}, 1571 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1572 }, 1573 resetStatusFn: func(status *kubecontainer.PodStatus) { 1574 m.livenessManager.Remove(status.ContainerStatuses[1].ID) 1575 }, 1576 }, 1577 "livenessProbe has completed; start the next": { 1578 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1579 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1580 status.ContainerStatuses = status.ContainerStatuses[:2] 1581 }, 1582 actions: podActions{ 1583 SandboxID: baseStatus.SandboxStatuses[0].Id, 1584 InitContainersToStart: []int{2}, 1585 ContainersToStart: []int{}, 1586 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1587 }, 1588 }, 1589 "kill and recreate the restartable init container if the liveness check has failed": { 1590 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1591 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1592 m.livenessManager.Set(status.ContainerStatuses[2].ID, proberesults.Failure, basePod) 1593 }, 1594 actions: podActions{ 1595 SandboxID: baseStatus.SandboxStatuses[0].Id, 1596 InitContainersToStart: []int{2}, 1597 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}), 1598 ContainersToStart: []int{0, 1, 2}, 1599 }, 1600 resetStatusFn: func(status *kubecontainer.PodStatus) { 1601 m.livenessManager.Remove(status.ContainerStatuses[2].ID) 1602 }, 1603 }, 1604 "startupProbe has not been run; do nothing": { 1605 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1606 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1607 m.startupManager.Remove(status.ContainerStatuses[1].ID) 1608 status.ContainerStatuses = status.ContainerStatuses[:2] 1609 }, 1610 actions: noAction, 1611 }, 1612 "startupProbe in progress; do nothing": { 1613 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1614 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1615 m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Unknown, basePod) 1616 status.ContainerStatuses = status.ContainerStatuses[:2] 1617 }, 1618 actions: noAction, 1619 resetStatusFn: func(status *kubecontainer.PodStatus) { 1620 m.startupManager.Remove(status.ContainerStatuses[1].ID) 1621 }, 1622 }, 1623 "startupProbe has completed; start the next": { 1624 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1625 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1626 status.ContainerStatuses = status.ContainerStatuses[:2] 1627 }, 1628 actions: podActions{ 1629 SandboxID: baseStatus.SandboxStatuses[0].Id, 1630 InitContainersToStart: []int{2}, 1631 ContainersToStart: []int{}, 1632 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1633 }, 1634 }, 1635 "kill and recreate the restartable init container if the startup check has failed": { 1636 mutatePodFn: func(pod *v1.Pod) { 1637 pod.Spec.RestartPolicy = v1.RestartPolicyAlways 1638 pod.Spec.InitContainers[2].StartupProbe = &v1.Probe{} 1639 }, 1640 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1641 m.startupManager.Set(status.ContainerStatuses[2].ID, proberesults.Failure, basePod) 1642 }, 1643 actions: podActions{ 1644 SandboxID: baseStatus.SandboxStatuses[0].Id, 1645 InitContainersToStart: []int{2}, 1646 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}), 1647 ContainersToStart: []int{}, 1648 }, 1649 resetStatusFn: func(status *kubecontainer.PodStatus) { 1650 m.startupManager.Remove(status.ContainerStatuses[2].ID) 1651 }, 1652 }, 1653 "restart terminated restartable init container and next init container": { 1654 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1655 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1656 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited 1657 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited 1658 }, 1659 actions: podActions{ 1660 SandboxID: baseStatus.SandboxStatuses[0].Id, 1661 InitContainersToStart: []int{0, 2}, 1662 ContainersToStart: []int{}, 1663 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1664 }, 1665 }, 1666 "restart terminated restartable init container and regular containers": { 1667 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1668 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1669 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited 1670 }, 1671 actions: podActions{ 1672 SandboxID: baseStatus.SandboxStatuses[0].Id, 1673 InitContainersToStart: []int{0}, 1674 ContainersToStart: []int{0, 1, 2}, 1675 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1676 }, 1677 }, 1678 "Pod sandbox not ready, restartable init container failed, but RestartPolicy == Never; kill pod only": { 1679 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1680 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1681 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1682 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited 1683 status.ContainerStatuses[2].ExitCode = 137 1684 }, 1685 actions: podActions{ 1686 KillPod: true, 1687 CreateSandbox: false, 1688 SandboxID: baseStatus.SandboxStatuses[0].Id, 1689 Attempt: uint32(1), 1690 ContainersToStart: []int{}, 1691 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1692 }, 1693 }, 1694 "Pod sandbox not ready, and RestartPolicy == Never, but no visible restartable init containers; create a new pod sandbox": { 1695 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1696 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1697 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1698 status.ContainerStatuses = []*kubecontainer.Status{} 1699 }, 1700 actions: podActions{ 1701 KillPod: true, 1702 CreateSandbox: true, 1703 SandboxID: baseStatus.SandboxStatuses[0].Id, 1704 Attempt: uint32(1), 1705 InitContainersToStart: []int{0}, 1706 ContainersToStart: []int{}, 1707 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1708 }, 1709 }, 1710 "Pod sandbox not ready, restartable init container failed, and RestartPolicy == OnFailure; create a new pod sandbox": { 1711 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, 1712 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1713 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1714 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited 1715 status.ContainerStatuses[2].ExitCode = 137 1716 }, 1717 actions: podActions{ 1718 KillPod: true, 1719 CreateSandbox: true, 1720 SandboxID: baseStatus.SandboxStatuses[0].Id, 1721 Attempt: uint32(1), 1722 InitContainersToStart: []int{0}, 1723 ContainersToStart: []int{}, 1724 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1725 }, 1726 }, 1727 "Pod sandbox not ready, restartable init container failed, and RestartPolicy == Always; create a new pod sandbox": { 1728 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1729 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1730 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1731 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited 1732 status.ContainerStatuses[2].ExitCode = 137 1733 }, 1734 actions: podActions{ 1735 KillPod: true, 1736 CreateSandbox: true, 1737 SandboxID: baseStatus.SandboxStatuses[0].Id, 1738 Attempt: uint32(1), 1739 InitContainersToStart: []int{0}, 1740 ContainersToStart: []int{}, 1741 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1742 }, 1743 }, 1744 "initialization failed; restart the last restartable init container even if pod's RestartPolicy == Never": { 1745 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1746 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1747 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited 1748 status.ContainerStatuses[2].ExitCode = 137 1749 }, 1750 actions: podActions{ 1751 SandboxID: baseStatus.SandboxStatuses[0].Id, 1752 InitContainersToStart: []int{2}, 1753 ContainersToStart: []int{}, 1754 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1755 }, 1756 }, 1757 "restartable init container state unknown; kill and recreate the last restartable init container even if pod's RestartPolicy == Never": { 1758 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1759 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1760 status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown 1761 }, 1762 actions: podActions{ 1763 SandboxID: baseStatus.SandboxStatuses[0].Id, 1764 InitContainersToStart: []int{2}, 1765 ContainersToStart: []int{}, 1766 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}), 1767 }, 1768 }, 1769 "restart restartable init container if regular containers are running even if pod's RestartPolicy == Never": { 1770 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1771 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1772 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited 1773 status.ContainerStatuses[2].ExitCode = 137 1774 // all main containers are running 1775 for i := 1; i <= 3; i++ { 1776 status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{ 1777 ID: kubecontainer.ContainerID{ID: fmt.Sprintf("id%d", i)}, 1778 Name: fmt.Sprintf("foo%d", i), 1779 State: kubecontainer.ContainerStateRunning, 1780 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[i-1]), 1781 }) 1782 } 1783 }, 1784 actions: podActions{ 1785 SandboxID: baseStatus.SandboxStatuses[0].Id, 1786 InitContainersToStart: []int{2}, 1787 ContainersToStart: []int{}, 1788 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1789 }, 1790 }, 1791 "kill the pod if all main containers succeeded if pod's RestartPolicy == Never": { 1792 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 1793 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1794 // all main containers succeeded 1795 for i := 1; i <= 3; i++ { 1796 status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{ 1797 ID: kubecontainer.ContainerID{ID: fmt.Sprintf("id%d", i)}, 1798 Name: fmt.Sprintf("foo%d", i), 1799 State: kubecontainer.ContainerStateExited, 1800 ExitCode: 0, 1801 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[i-1]), 1802 }) 1803 } 1804 }, 1805 actions: podActions{ 1806 KillPod: true, 1807 SandboxID: baseStatus.SandboxStatuses[0].Id, 1808 ContainersToStart: []int{}, 1809 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1810 }, 1811 }, 1812 "some of the init container statuses are missing but the last init container is running, restart restartable init and regular containers": { 1813 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1814 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) { 1815 status.ContainerStatuses = status.ContainerStatuses[2:] 1816 }, 1817 actions: podActions{ 1818 SandboxID: baseStatus.SandboxStatuses[0].Id, 1819 InitContainersToStart: []int{0, 1}, 1820 ContainersToStart: []int{0, 1, 2}, 1821 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1822 }, 1823 }, 1824 } { 1825 pod, status := makeBasePodAndStatusWithRestartableInitContainers() 1826 m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Success, basePod) 1827 m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Success, basePod) 1828 m.livenessManager.Set(status.ContainerStatuses[2].ID, proberesults.Success, basePod) 1829 m.startupManager.Set(status.ContainerStatuses[2].ID, proberesults.Success, basePod) 1830 if test.mutatePodFn != nil { 1831 test.mutatePodFn(pod) 1832 } 1833 if test.mutateStatusFn != nil { 1834 test.mutateStatusFn(pod, status) 1835 } 1836 ctx := context.Background() 1837 actions := m.computePodActions(ctx, pod, status) 1838 verifyActions(t, &test.actions, &actions, desc) 1839 if test.resetStatusFn != nil { 1840 test.resetStatusFn(status) 1841 } 1842 } 1843 } 1844 1845 func makeBasePodAndStatusWithRestartableInitContainers() (*v1.Pod, *kubecontainer.PodStatus) { 1846 pod, status := makeBasePodAndStatus() 1847 pod.Spec.InitContainers = []v1.Container{ 1848 { 1849 Name: "restartable-init-1", 1850 Image: "bar-image", 1851 RestartPolicy: &containerRestartPolicyAlways, 1852 }, 1853 { 1854 Name: "restartable-init-2", 1855 Image: "bar-image", 1856 RestartPolicy: &containerRestartPolicyAlways, 1857 LivenessProbe: &v1.Probe{}, 1858 StartupProbe: &v1.Probe{}, 1859 }, 1860 { 1861 Name: "restartable-init-3", 1862 Image: "bar-image", 1863 RestartPolicy: &containerRestartPolicyAlways, 1864 LivenessProbe: &v1.Probe{}, 1865 StartupProbe: &v1.Probe{}, 1866 }, 1867 } 1868 // Replace the original statuses of the containers with those for the init 1869 // containers. 1870 status.ContainerStatuses = []*kubecontainer.Status{ 1871 { 1872 ID: kubecontainer.ContainerID{ID: "initid1"}, 1873 Name: "restartable-init-1", State: kubecontainer.ContainerStateRunning, 1874 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]), 1875 }, 1876 { 1877 ID: kubecontainer.ContainerID{ID: "initid2"}, 1878 Name: "restartable-init-2", State: kubecontainer.ContainerStateRunning, 1879 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]), 1880 }, 1881 { 1882 ID: kubecontainer.ContainerID{ID: "initid3"}, 1883 Name: "restartable-init-3", State: kubecontainer.ContainerStateRunning, 1884 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]), 1885 }, 1886 } 1887 return pod, status 1888 } 1889 1890 func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) { 1891 // Make sure existing test cases pass with feature enabled 1892 TestComputePodActions(t) 1893 TestComputePodActionsWithInitContainers(t) 1894 1895 t.Run("sidecar containers disabled", func(t *testing.T) { 1896 testComputePodActionsWithInitAndEphemeralContainers(t, false) 1897 }) 1898 t.Run("sidecar containers enabled", func(t *testing.T) { 1899 testComputePodActionsWithInitAndEphemeralContainers(t, true) 1900 }) 1901 } 1902 1903 func testComputePodActionsWithInitAndEphemeralContainers(t *testing.T, sidecarContainersEnabled bool) { 1904 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, sidecarContainersEnabled)() 1905 _, _, m, err := createTestRuntimeManager() 1906 require.NoError(t, err) 1907 1908 basePod, baseStatus := makeBasePodAndStatusWithInitAndEphemeralContainers() 1909 noAction := podActions{ 1910 SandboxID: baseStatus.SandboxStatuses[0].Id, 1911 ContainersToStart: []int{}, 1912 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1913 } 1914 1915 for desc, test := range map[string]struct { 1916 mutatePodFn func(*v1.Pod) 1917 mutateStatusFn func(*kubecontainer.PodStatus) 1918 actions podActions 1919 }{ 1920 "steady state; do nothing; ignore ephemeral container": { 1921 actions: noAction, 1922 }, 1923 "No ephemeral containers running; start one": { 1924 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1925 status.ContainerStatuses = status.ContainerStatuses[:4] 1926 }, 1927 actions: podActions{ 1928 SandboxID: baseStatus.SandboxStatuses[0].Id, 1929 ContainersToStart: []int{}, 1930 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1931 EphemeralContainersToStart: []int{0}, 1932 }, 1933 }, 1934 "Start second ephemeral container": { 1935 mutatePodFn: func(pod *v1.Pod) { 1936 pod.Spec.EphemeralContainers = append(pod.Spec.EphemeralContainers, v1.EphemeralContainer{ 1937 EphemeralContainerCommon: v1.EphemeralContainerCommon{ 1938 Name: "debug2", 1939 Image: "busybox", 1940 }, 1941 }) 1942 }, 1943 actions: podActions{ 1944 SandboxID: baseStatus.SandboxStatuses[0].Id, 1945 ContainersToStart: []int{}, 1946 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1947 EphemeralContainersToStart: []int{1}, 1948 }, 1949 }, 1950 "Ephemeral container exited; do not restart": { 1951 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1952 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1953 status.ContainerStatuses[4].State = kubecontainer.ContainerStateExited 1954 }, 1955 actions: podActions{ 1956 SandboxID: baseStatus.SandboxStatuses[0].Id, 1957 ContainersToStart: []int{}, 1958 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1959 }, 1960 }, 1961 "initialization in progress; start ephemeral container": { 1962 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1963 status.ContainerStatuses[3].State = kubecontainer.ContainerStateRunning 1964 status.ContainerStatuses = status.ContainerStatuses[:4] 1965 }, 1966 actions: podActions{ 1967 SandboxID: baseStatus.SandboxStatuses[0].Id, 1968 ContainersToStart: []int{}, 1969 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 1970 EphemeralContainersToStart: []int{0}, 1971 }, 1972 }, 1973 "Create a new pod sandbox if the pod sandbox is dead, init container failed and RestartPolicy == OnFailure": { 1974 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure }, 1975 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1976 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1977 status.ContainerStatuses = status.ContainerStatuses[3:] 1978 status.ContainerStatuses[0].ExitCode = 137 1979 }, 1980 actions: podActions{ 1981 KillPod: true, 1982 CreateSandbox: true, 1983 SandboxID: baseStatus.SandboxStatuses[0].Id, 1984 Attempt: uint32(1), 1985 NextInitContainerToStart: &basePod.Spec.InitContainers[0], 1986 InitContainersToStart: []int{0}, 1987 ContainersToStart: []int{}, 1988 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 1989 }, 1990 }, 1991 "Kill pod and do not restart ephemeral container if the pod sandbox is dead": { 1992 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways }, 1993 mutateStatusFn: func(status *kubecontainer.PodStatus) { 1994 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY 1995 }, 1996 actions: podActions{ 1997 KillPod: true, 1998 CreateSandbox: true, 1999 SandboxID: baseStatus.SandboxStatuses[0].Id, 2000 Attempt: uint32(1), 2001 NextInitContainerToStart: &basePod.Spec.InitContainers[0], 2002 InitContainersToStart: []int{0}, 2003 ContainersToStart: []int{}, 2004 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}), 2005 }, 2006 }, 2007 "Kill pod if all containers exited except ephemeral container": { 2008 mutatePodFn: func(pod *v1.Pod) { 2009 pod.Spec.RestartPolicy = v1.RestartPolicyNever 2010 }, 2011 mutateStatusFn: func(status *kubecontainer.PodStatus) { 2012 // all regular containers exited 2013 for i := 0; i < 3; i++ { 2014 status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited 2015 status.ContainerStatuses[i].ExitCode = 0 2016 } 2017 }, 2018 actions: podActions{ 2019 SandboxID: baseStatus.SandboxStatuses[0].Id, 2020 CreateSandbox: false, 2021 KillPod: true, 2022 ContainersToStart: []int{}, 2023 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{}, 2024 }, 2025 }, 2026 "Ephemeral container is in unknown state; leave it alone": { 2027 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever }, 2028 mutateStatusFn: func(status *kubecontainer.PodStatus) { 2029 status.ContainerStatuses[4].State = kubecontainer.ContainerStateUnknown 2030 }, 2031 actions: noAction, 2032 }, 2033 } { 2034 pod, status := makeBasePodAndStatusWithInitAndEphemeralContainers() 2035 if test.mutatePodFn != nil { 2036 test.mutatePodFn(pod) 2037 } 2038 if test.mutateStatusFn != nil { 2039 test.mutateStatusFn(status) 2040 } 2041 ctx := context.Background() 2042 actions := m.computePodActions(ctx, pod, status) 2043 if !sidecarContainersEnabled { 2044 // If sidecar containers are disabled, we should not see any 2045 // InitContainersToStart in the actions. 2046 test.actions.InitContainersToStart = nil 2047 } else { 2048 // If sidecar containers are enabled, we should not see any 2049 // NextInitContainerToStart in the actions. 2050 test.actions.NextInitContainerToStart = nil 2051 } 2052 verifyActions(t, &test.actions, &actions, desc) 2053 } 2054 } 2055 2056 func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) { 2057 ctx := context.Background() 2058 fakeRuntime, _, m, err := createTestRuntimeManager() 2059 assert.NoError(t, err) 2060 fakeRuntime.ErrorOnSandboxCreate = true 2061 2062 containers := []v1.Container{ 2063 { 2064 Name: "foo1", 2065 Image: "busybox", 2066 ImagePullPolicy: v1.PullIfNotPresent, 2067 }, 2068 } 2069 pod := &v1.Pod{ 2070 ObjectMeta: metav1.ObjectMeta{ 2071 UID: "12345678", 2072 Name: "foo", 2073 Namespace: "new", 2074 }, 2075 Spec: v1.PodSpec{ 2076 Containers: containers, 2077 }, 2078 } 2079 2080 backOff := flowcontrol.NewBackOff(time.Second, time.Minute) 2081 m.podStateProvider.(*fakePodStateProvider).removed = map[types.UID]struct{}{pod.UID: {}} 2082 2083 // GetPodStatus and the following SyncPod will not return errors in the 2084 // case where the pod has been deleted. We are not adding any pods into 2085 // the fakePodProvider so they are 'deleted'. 2086 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace) 2087 assert.NoError(t, err) 2088 result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff) 2089 // This will return an error if the pod has _not_ been deleted. 2090 assert.NoError(t, result.Error()) 2091 } 2092 2093 func makeBasePodAndStatusWithInitAndEphemeralContainers() (*v1.Pod, *kubecontainer.PodStatus) { 2094 pod, status := makeBasePodAndStatus() 2095 pod.Spec.InitContainers = []v1.Container{ 2096 { 2097 Name: "init1", 2098 Image: "bar-image", 2099 }, 2100 } 2101 pod.Spec.EphemeralContainers = []v1.EphemeralContainer{ 2102 { 2103 EphemeralContainerCommon: v1.EphemeralContainerCommon{ 2104 Name: "debug", 2105 Image: "busybox", 2106 }, 2107 }, 2108 } 2109 status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{ 2110 ID: kubecontainer.ContainerID{ID: "initid1"}, 2111 Name: "init1", State: kubecontainer.ContainerStateExited, 2112 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]), 2113 }, &kubecontainer.Status{ 2114 ID: kubecontainer.ContainerID{ID: "debug1"}, 2115 Name: "debug", State: kubecontainer.ContainerStateRunning, 2116 Hash: kubecontainer.HashContainer((*v1.Container)(&pod.Spec.EphemeralContainers[0].EphemeralContainerCommon)), 2117 }) 2118 return pod, status 2119 } 2120 2121 func TestComputePodActionsForPodResize(t *testing.T) { 2122 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)() 2123 fakeRuntime, _, m, err := createTestRuntimeManager() 2124 m.machineInfo.MemoryCapacity = 17179860387 // 16GB 2125 assert.NoError(t, err) 2126 2127 cpu100m := resource.MustParse("100m") 2128 cpu200m := resource.MustParse("200m") 2129 mem100M := resource.MustParse("100Mi") 2130 mem200M := resource.MustParse("200Mi") 2131 cpuPolicyRestartNotRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: v1.NotRequired} 2132 memPolicyRestartNotRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: v1.NotRequired} 2133 cpuPolicyRestartRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer} 2134 memPolicyRestartRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: v1.RestartContainer} 2135 2136 for desc, test := range map[string]struct { 2137 podResizePolicyFn func(*v1.Pod) 2138 mutatePodFn func(*v1.Pod) 2139 getExpectedPodActionsFn func(*v1.Pod, *kubecontainer.PodStatus) *podActions 2140 }{ 2141 "Update container CPU and memory resources": { 2142 mutatePodFn: func(pod *v1.Pod) { 2143 pod.Spec.Containers[1].Resources = v1.ResourceRequirements{ 2144 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}, 2145 } 2146 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[1].Name); found { 2147 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{ 2148 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M}, 2149 } 2150 } 2151 }, 2152 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions { 2153 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[1].Name) 2154 pa := podActions{ 2155 SandboxID: podStatus.SandboxStatuses[0].Id, 2156 ContainersToStart: []int{}, 2157 ContainersToKill: getKillMap(pod, podStatus, []int{}), 2158 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ 2159 v1.ResourceMemory: { 2160 { 2161 apiContainerIdx: 1, 2162 kubeContainerID: kcs.ID, 2163 desiredContainerResources: containerResources{ 2164 memoryLimit: mem100M.Value(), 2165 cpuLimit: cpu100m.MilliValue(), 2166 }, 2167 currentContainerResources: &containerResources{ 2168 memoryLimit: mem200M.Value(), 2169 cpuLimit: cpu200m.MilliValue(), 2170 }, 2171 }, 2172 }, 2173 v1.ResourceCPU: { 2174 { 2175 apiContainerIdx: 1, 2176 kubeContainerID: kcs.ID, 2177 desiredContainerResources: containerResources{ 2178 memoryLimit: mem100M.Value(), 2179 cpuLimit: cpu100m.MilliValue(), 2180 }, 2181 currentContainerResources: &containerResources{ 2182 memoryLimit: mem200M.Value(), 2183 cpuLimit: cpu200m.MilliValue(), 2184 }, 2185 }, 2186 }, 2187 }, 2188 } 2189 return &pa 2190 }, 2191 }, 2192 "Update container CPU resources": { 2193 mutatePodFn: func(pod *v1.Pod) { 2194 pod.Spec.Containers[1].Resources = v1.ResourceRequirements{ 2195 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}, 2196 } 2197 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[1].Name); found { 2198 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{ 2199 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem100M}, 2200 } 2201 } 2202 }, 2203 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions { 2204 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[1].Name) 2205 pa := podActions{ 2206 SandboxID: podStatus.SandboxStatuses[0].Id, 2207 ContainersToStart: []int{}, 2208 ContainersToKill: getKillMap(pod, podStatus, []int{}), 2209 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ 2210 v1.ResourceCPU: { 2211 { 2212 apiContainerIdx: 1, 2213 kubeContainerID: kcs.ID, 2214 desiredContainerResources: containerResources{ 2215 memoryLimit: mem100M.Value(), 2216 cpuLimit: cpu100m.MilliValue(), 2217 }, 2218 currentContainerResources: &containerResources{ 2219 memoryLimit: mem100M.Value(), 2220 cpuLimit: cpu200m.MilliValue(), 2221 }, 2222 }, 2223 }, 2224 }, 2225 } 2226 return &pa 2227 }, 2228 }, 2229 "Update container memory resources": { 2230 mutatePodFn: func(pod *v1.Pod) { 2231 pod.Spec.Containers[2].Resources = v1.ResourceRequirements{ 2232 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M}, 2233 } 2234 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[2].Name); found { 2235 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{ 2236 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem100M}, 2237 } 2238 } 2239 }, 2240 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions { 2241 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name) 2242 pa := podActions{ 2243 SandboxID: podStatus.SandboxStatuses[0].Id, 2244 ContainersToStart: []int{}, 2245 ContainersToKill: getKillMap(pod, podStatus, []int{}), 2246 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ 2247 v1.ResourceMemory: { 2248 { 2249 apiContainerIdx: 2, 2250 kubeContainerID: kcs.ID, 2251 desiredContainerResources: containerResources{ 2252 memoryLimit: mem200M.Value(), 2253 cpuLimit: cpu200m.MilliValue(), 2254 }, 2255 currentContainerResources: &containerResources{ 2256 memoryLimit: mem100M.Value(), 2257 cpuLimit: cpu200m.MilliValue(), 2258 }, 2259 }, 2260 }, 2261 }, 2262 } 2263 return &pa 2264 }, 2265 }, 2266 "Nothing when spec.Resources and status.Resources are equal": { 2267 mutatePodFn: func(pod *v1.Pod) { 2268 pod.Spec.Containers[1].Resources = v1.ResourceRequirements{ 2269 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m}, 2270 } 2271 pod.Status.ContainerStatuses[1].Resources = &v1.ResourceRequirements{ 2272 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m}, 2273 } 2274 }, 2275 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions { 2276 pa := podActions{ 2277 SandboxID: podStatus.SandboxStatuses[0].Id, 2278 ContainersToKill: getKillMap(pod, podStatus, []int{}), 2279 ContainersToStart: []int{}, 2280 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{}, 2281 } 2282 return &pa 2283 }, 2284 }, 2285 "Update container CPU and memory resources with Restart policy for CPU": { 2286 podResizePolicyFn: func(pod *v1.Pod) { 2287 pod.Spec.Containers[0].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired} 2288 }, 2289 mutatePodFn: func(pod *v1.Pod) { 2290 pod.Spec.Containers[0].Resources = v1.ResourceRequirements{ 2291 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M}, 2292 } 2293 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[0].Name); found { 2294 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{ 2295 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}, 2296 } 2297 } 2298 }, 2299 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions { 2300 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[0].Name) 2301 killMap := make(map[kubecontainer.ContainerID]containerToKillInfo) 2302 killMap[kcs.ID] = containerToKillInfo{ 2303 container: &pod.Spec.Containers[0], 2304 name: pod.Spec.Containers[0].Name, 2305 } 2306 pa := podActions{ 2307 SandboxID: podStatus.SandboxStatuses[0].Id, 2308 ContainersToStart: []int{0}, 2309 ContainersToKill: killMap, 2310 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{}, 2311 UpdatePodResources: true, 2312 } 2313 return &pa 2314 }, 2315 }, 2316 "Update container CPU and memory resources with Restart policy for memory": { 2317 podResizePolicyFn: func(pod *v1.Pod) { 2318 pod.Spec.Containers[2].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired} 2319 }, 2320 mutatePodFn: func(pod *v1.Pod) { 2321 pod.Spec.Containers[2].Resources = v1.ResourceRequirements{ 2322 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M}, 2323 } 2324 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[2].Name); found { 2325 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{ 2326 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}, 2327 } 2328 } 2329 }, 2330 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions { 2331 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name) 2332 killMap := make(map[kubecontainer.ContainerID]containerToKillInfo) 2333 killMap[kcs.ID] = containerToKillInfo{ 2334 container: &pod.Spec.Containers[2], 2335 name: pod.Spec.Containers[2].Name, 2336 } 2337 pa := podActions{ 2338 SandboxID: podStatus.SandboxStatuses[0].Id, 2339 ContainersToStart: []int{2}, 2340 ContainersToKill: killMap, 2341 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{}, 2342 UpdatePodResources: true, 2343 } 2344 return &pa 2345 }, 2346 }, 2347 "Update container memory resources with Restart policy for CPU": { 2348 podResizePolicyFn: func(pod *v1.Pod) { 2349 pod.Spec.Containers[1].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired} 2350 }, 2351 mutatePodFn: func(pod *v1.Pod) { 2352 pod.Spec.Containers[1].Resources = v1.ResourceRequirements{ 2353 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem200M}, 2354 } 2355 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[1].Name); found { 2356 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{ 2357 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}, 2358 } 2359 } 2360 }, 2361 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions { 2362 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[1].Name) 2363 pa := podActions{ 2364 SandboxID: podStatus.SandboxStatuses[0].Id, 2365 ContainersToStart: []int{}, 2366 ContainersToKill: getKillMap(pod, podStatus, []int{}), 2367 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ 2368 v1.ResourceMemory: { 2369 { 2370 apiContainerIdx: 1, 2371 kubeContainerID: kcs.ID, 2372 desiredContainerResources: containerResources{ 2373 memoryLimit: mem200M.Value(), 2374 cpuLimit: cpu100m.MilliValue(), 2375 }, 2376 currentContainerResources: &containerResources{ 2377 memoryLimit: mem100M.Value(), 2378 cpuLimit: cpu100m.MilliValue(), 2379 }, 2380 }, 2381 }, 2382 }, 2383 } 2384 return &pa 2385 }, 2386 }, 2387 "Update container CPU resources with Restart policy for memory": { 2388 podResizePolicyFn: func(pod *v1.Pod) { 2389 pod.Spec.Containers[2].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired} 2390 }, 2391 mutatePodFn: func(pod *v1.Pod) { 2392 pod.Spec.Containers[2].Resources = v1.ResourceRequirements{ 2393 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem100M}, 2394 } 2395 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[2].Name); found { 2396 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{ 2397 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}, 2398 } 2399 } 2400 }, 2401 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions { 2402 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name) 2403 pa := podActions{ 2404 SandboxID: podStatus.SandboxStatuses[0].Id, 2405 ContainersToStart: []int{}, 2406 ContainersToKill: getKillMap(pod, podStatus, []int{}), 2407 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{ 2408 v1.ResourceCPU: { 2409 { 2410 apiContainerIdx: 2, 2411 kubeContainerID: kcs.ID, 2412 desiredContainerResources: containerResources{ 2413 memoryLimit: mem100M.Value(), 2414 cpuLimit: cpu200m.MilliValue(), 2415 }, 2416 currentContainerResources: &containerResources{ 2417 memoryLimit: mem100M.Value(), 2418 cpuLimit: cpu100m.MilliValue(), 2419 }, 2420 }, 2421 }, 2422 }, 2423 } 2424 return &pa 2425 }, 2426 }, 2427 } { 2428 pod, kps := makeBasePodAndStatus() 2429 for idx := range pod.Spec.Containers { 2430 // default resize policy when pod resize feature is enabled 2431 pod.Spec.Containers[idx].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartNotRequired} 2432 } 2433 if test.podResizePolicyFn != nil { 2434 test.podResizePolicyFn(pod) 2435 } 2436 for idx := range pod.Spec.Containers { 2437 // compute hash 2438 if kcs := kps.FindContainerStatusByName(pod.Spec.Containers[idx].Name); kcs != nil { 2439 kcs.Hash = kubecontainer.HashContainer(&pod.Spec.Containers[idx]) 2440 kcs.HashWithoutResources = kubecontainer.HashContainerWithoutResources(&pod.Spec.Containers[idx]) 2441 } 2442 } 2443 makeAndSetFakePod(t, m, fakeRuntime, pod) 2444 ctx := context.Background() 2445 status, _ := m.GetPodStatus(ctx, kps.ID, pod.Name, pod.Namespace) 2446 for idx := range pod.Spec.Containers { 2447 if rcs := status.FindContainerStatusByName(pod.Spec.Containers[idx].Name); rcs != nil { 2448 if csIdx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[idx].Name); found { 2449 pod.Status.ContainerStatuses[csIdx].ContainerID = rcs.ID.String() 2450 } 2451 } 2452 } 2453 for idx := range pod.Spec.Containers { 2454 if kcs := kps.FindContainerStatusByName(pod.Spec.Containers[idx].Name); kcs != nil { 2455 kcs.Hash = kubecontainer.HashContainer(&pod.Spec.Containers[idx]) 2456 kcs.HashWithoutResources = kubecontainer.HashContainerWithoutResources(&pod.Spec.Containers[idx]) 2457 } 2458 } 2459 if test.mutatePodFn != nil { 2460 test.mutatePodFn(pod) 2461 } 2462 expectedActions := test.getExpectedPodActionsFn(pod, status) 2463 actions := m.computePodActions(ctx, pod, status) 2464 verifyActions(t, expectedActions, &actions, desc) 2465 } 2466 } 2467 2468 func TestUpdatePodContainerResources(t *testing.T) { 2469 // TODO: remove this check on this PR merges: https://github.com/kubernetes/kubernetes/pull/112599 2470 if goruntime.GOOS == "windows" { 2471 t.Skip("Updating Pod Container Resources is not supported on Windows.") 2472 } 2473 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)() 2474 fakeRuntime, _, m, err := createTestRuntimeManager() 2475 m.machineInfo.MemoryCapacity = 17179860387 // 16GB 2476 assert.NoError(t, err) 2477 2478 cpu100m := resource.MustParse("100m") 2479 cpu150m := resource.MustParse("150m") 2480 cpu200m := resource.MustParse("200m") 2481 cpu250m := resource.MustParse("250m") 2482 cpu300m := resource.MustParse("300m") 2483 cpu350m := resource.MustParse("350m") 2484 mem100M := resource.MustParse("100Mi") 2485 mem150M := resource.MustParse("150Mi") 2486 mem200M := resource.MustParse("200Mi") 2487 mem250M := resource.MustParse("250Mi") 2488 mem300M := resource.MustParse("300Mi") 2489 mem350M := resource.MustParse("350Mi") 2490 res100m100Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M} 2491 res150m100Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem100M} 2492 res100m150Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem150M} 2493 res150m150Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem150M} 2494 res200m200Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M} 2495 res250m200Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem200M} 2496 res200m250Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem250M} 2497 res250m250Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem250M} 2498 res300m300Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem300M} 2499 res350m300Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem300M} 2500 res300m350Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem350M} 2501 res350m350Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem350M} 2502 2503 pod, _ := makeBasePodAndStatus() 2504 makeAndSetFakePod(t, m, fakeRuntime, pod) 2505 2506 for dsc, tc := range map[string]struct { 2507 resourceName v1.ResourceName 2508 apiSpecResources []v1.ResourceRequirements 2509 apiStatusResources []v1.ResourceRequirements 2510 requiresRestart []bool 2511 invokeUpdateResources bool 2512 expectedCurrentLimits []v1.ResourceList 2513 expectedCurrentRequests []v1.ResourceList 2514 }{ 2515 "Guaranteed QoS Pod - CPU & memory resize requested, update CPU": { 2516 resourceName: v1.ResourceCPU, 2517 apiSpecResources: []v1.ResourceRequirements{ 2518 {Limits: res150m150Mi, Requests: res150m150Mi}, 2519 {Limits: res250m250Mi, Requests: res250m250Mi}, 2520 {Limits: res350m350Mi, Requests: res350m350Mi}, 2521 }, 2522 apiStatusResources: []v1.ResourceRequirements{ 2523 {Limits: res100m100Mi, Requests: res100m100Mi}, 2524 {Limits: res200m200Mi, Requests: res200m200Mi}, 2525 {Limits: res300m300Mi, Requests: res300m300Mi}, 2526 }, 2527 requiresRestart: []bool{false, false, false}, 2528 invokeUpdateResources: true, 2529 expectedCurrentLimits: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi}, 2530 expectedCurrentRequests: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi}, 2531 }, 2532 "Guaranteed QoS Pod - CPU & memory resize requested, update memory": { 2533 resourceName: v1.ResourceMemory, 2534 apiSpecResources: []v1.ResourceRequirements{ 2535 {Limits: res150m150Mi, Requests: res150m150Mi}, 2536 {Limits: res250m250Mi, Requests: res250m250Mi}, 2537 {Limits: res350m350Mi, Requests: res350m350Mi}, 2538 }, 2539 apiStatusResources: []v1.ResourceRequirements{ 2540 {Limits: res100m100Mi, Requests: res100m100Mi}, 2541 {Limits: res200m200Mi, Requests: res200m200Mi}, 2542 {Limits: res300m300Mi, Requests: res300m300Mi}, 2543 }, 2544 requiresRestart: []bool{false, false, false}, 2545 invokeUpdateResources: true, 2546 expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi}, 2547 expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi}, 2548 }, 2549 } { 2550 var containersToUpdate []containerToUpdateInfo 2551 for idx := range pod.Spec.Containers { 2552 // default resize policy when pod resize feature is enabled 2553 pod.Spec.Containers[idx].Resources = tc.apiSpecResources[idx] 2554 pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx] 2555 cInfo := containerToUpdateInfo{ 2556 apiContainerIdx: idx, 2557 kubeContainerID: kubecontainer.ContainerID{}, 2558 desiredContainerResources: containerResources{ 2559 memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(), 2560 memoryRequest: tc.apiSpecResources[idx].Requests.Memory().Value(), 2561 cpuLimit: tc.apiSpecResources[idx].Limits.Cpu().MilliValue(), 2562 cpuRequest: tc.apiSpecResources[idx].Requests.Cpu().MilliValue(), 2563 }, 2564 currentContainerResources: &containerResources{ 2565 memoryLimit: tc.apiStatusResources[idx].Limits.Memory().Value(), 2566 memoryRequest: tc.apiStatusResources[idx].Requests.Memory().Value(), 2567 cpuLimit: tc.apiStatusResources[idx].Limits.Cpu().MilliValue(), 2568 cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(), 2569 }, 2570 } 2571 containersToUpdate = append(containersToUpdate, cInfo) 2572 } 2573 fakeRuntime.Called = []string{} 2574 err := m.updatePodContainerResources(pod, tc.resourceName, containersToUpdate) 2575 assert.NoError(t, err, dsc) 2576 2577 if tc.invokeUpdateResources { 2578 assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc) 2579 } 2580 for idx := range pod.Spec.Containers { 2581 assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryLimit, dsc) 2582 assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryRequest, dsc) 2583 assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuLimit, dsc) 2584 assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuRequest, dsc) 2585 } 2586 } 2587 }