k8s.io/kubernetes@v1.29.3/pkg/kubelet/status/status_manager_test.go (about) 1 /* 2 Copyright 2014 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package status 18 19 import ( 20 "fmt" 21 "math/rand" 22 "os" 23 "reflect" 24 "strconv" 25 "strings" 26 "testing" 27 "time" 28 29 "github.com/google/go-cmp/cmp" 30 "github.com/google/go-cmp/cmp/cmpopts" 31 "github.com/stretchr/testify/assert" 32 33 v1 "k8s.io/api/core/v1" 34 "k8s.io/apimachinery/pkg/api/errors" 35 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 36 "k8s.io/apimachinery/pkg/runtime" 37 "k8s.io/apimachinery/pkg/runtime/schema" 38 utilfeature "k8s.io/apiserver/pkg/util/feature" 39 clientset "k8s.io/client-go/kubernetes" 40 "k8s.io/client-go/kubernetes/fake" 41 core "k8s.io/client-go/testing" 42 featuregatetesting "k8s.io/component-base/featuregate/testing" 43 podutil "k8s.io/kubernetes/pkg/api/v1/pod" 44 api "k8s.io/kubernetes/pkg/apis/core" 45 "k8s.io/kubernetes/pkg/features" 46 kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" 47 kubepod "k8s.io/kubernetes/pkg/kubelet/pod" 48 statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" 49 kubetypes "k8s.io/kubernetes/pkg/kubelet/types" 50 "k8s.io/kubernetes/pkg/kubelet/util" 51 ) 52 53 type mutablePodManager interface { 54 AddPod(*v1.Pod) 55 UpdatePod(*v1.Pod) 56 RemovePod(*v1.Pod) 57 } 58 59 // Generate new instance of test pod with the same initial value. 60 func getTestPod() *v1.Pod { 61 return &v1.Pod{ 62 TypeMeta: metav1.TypeMeta{ 63 Kind: "Pod", 64 APIVersion: "v1", 65 }, 66 ObjectMeta: metav1.ObjectMeta{ 67 UID: "12345678", 68 Name: "foo", 69 Namespace: "new", 70 }, 71 } 72 } 73 74 // After adding reconciliation, if status in pod manager is different from the cached status, a reconciliation 75 // will be triggered, which will mess up all the old unit test. 76 // To simplify the implementation of unit test, we add testSyncBatch() here, it will make sure the statuses in 77 // pod manager the same with cached ones before syncBatch(true) so as to avoid reconciling. 78 func (m *manager) testSyncBatch() { 79 for uid, status := range m.podStatuses { 80 pod, ok := m.podManager.GetPodByUID(uid) 81 if ok { 82 pod.Status = status.status 83 } 84 pod, ok = m.podManager.GetMirrorPodByPod(pod) 85 if ok { 86 pod.Status = status.status 87 } 88 } 89 m.syncBatch(true) 90 } 91 92 func newTestManager(kubeClient clientset.Interface) *manager { 93 podManager := kubepod.NewBasicPodManager() 94 podManager.(mutablePodManager).AddPod(getTestPod()) 95 podStartupLatencyTracker := util.NewPodStartupLatencyTracker() 96 testRootDir := "" 97 if tempDir, err := os.MkdirTemp("", "kubelet_test."); err != nil { 98 return nil 99 } else { 100 testRootDir = tempDir 101 } 102 return NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{}, podStartupLatencyTracker, testRootDir).(*manager) 103 } 104 105 func generateRandomMessage() string { 106 return strconv.Itoa(rand.Int()) 107 } 108 109 func getRandomPodStatus() v1.PodStatus { 110 return v1.PodStatus{ 111 Message: generateRandomMessage(), 112 } 113 } 114 115 func verifyActions(t *testing.T, manager *manager, expectedActions []core.Action) { 116 t.Helper() 117 manager.consumeUpdates() 118 actions := manager.kubeClient.(*fake.Clientset).Actions() 119 defer manager.kubeClient.(*fake.Clientset).ClearActions() 120 if len(actions) != len(expectedActions) { 121 t.Fatalf("unexpected actions: %s", cmp.Diff(expectedActions, actions)) 122 } 123 for i := 0; i < len(actions); i++ { 124 e := expectedActions[i] 125 a := actions[i] 126 if !a.Matches(e.GetVerb(), e.GetResource().Resource) || a.GetSubresource() != e.GetSubresource() { 127 t.Errorf("unexpected actions: %s", cmp.Diff(expectedActions, actions)) 128 } 129 } 130 } 131 132 func verifyUpdates(t *testing.T, manager *manager, expectedUpdates int) { 133 t.Helper() 134 // Consume all updates in the channel. 135 numUpdates := manager.consumeUpdates() 136 if numUpdates != expectedUpdates { 137 t.Errorf("unexpected number of updates %d, expected %d", numUpdates, expectedUpdates) 138 } 139 } 140 141 func (m *manager) consumeUpdates() int { 142 updates := 0 143 for { 144 select { 145 case <-m.podStatusChannel: 146 updates += m.syncBatch(false) 147 default: 148 return updates 149 } 150 } 151 } 152 153 func TestNewStatus(t *testing.T) { 154 syncer := newTestManager(&fake.Clientset{}) 155 testPod := getTestPod() 156 syncer.SetPodStatus(testPod, getRandomPodStatus()) 157 verifyUpdates(t, syncer, 1) 158 159 status := expectPodStatus(t, syncer, testPod) 160 if status.StartTime.IsZero() { 161 t.Errorf("SetPodStatus did not set a proper start time value") 162 } 163 } 164 165 func TestNewStatusPreservesPodStartTime(t *testing.T) { 166 syncer := newTestManager(&fake.Clientset{}) 167 pod := &v1.Pod{ 168 ObjectMeta: metav1.ObjectMeta{ 169 UID: "12345678", 170 Name: "foo", 171 Namespace: "new", 172 }, 173 Status: v1.PodStatus{}, 174 } 175 now := metav1.Now() 176 startTime := metav1.NewTime(now.Time.Add(-1 * time.Minute)) 177 pod.Status.StartTime = &startTime 178 syncer.SetPodStatus(pod, getRandomPodStatus()) 179 180 status := expectPodStatus(t, syncer, pod) 181 if !status.StartTime.Time.Equal(startTime.Time) { 182 t.Errorf("Unexpected start time, expected %v, actual %v", startTime, status.StartTime) 183 } 184 } 185 186 func getReadyPodStatus() v1.PodStatus { 187 return v1.PodStatus{ 188 Conditions: []v1.PodCondition{ 189 { 190 Type: v1.PodReady, 191 Status: v1.ConditionTrue, 192 }, 193 }, 194 } 195 } 196 197 func TestNewStatusSetsReadyTransitionTime(t *testing.T) { 198 syncer := newTestManager(&fake.Clientset{}) 199 podStatus := getReadyPodStatus() 200 pod := &v1.Pod{ 201 ObjectMeta: metav1.ObjectMeta{ 202 UID: "12345678", 203 Name: "foo", 204 Namespace: "new", 205 }, 206 Status: v1.PodStatus{}, 207 } 208 syncer.SetPodStatus(pod, podStatus) 209 verifyUpdates(t, syncer, 1) 210 status := expectPodStatus(t, syncer, pod) 211 readyCondition := podutil.GetPodReadyCondition(status) 212 if readyCondition.LastTransitionTime.IsZero() { 213 t.Errorf("Unexpected: last transition time not set") 214 } 215 } 216 217 func TestChangedStatus(t *testing.T) { 218 syncer := newTestManager(&fake.Clientset{}) 219 testPod := getTestPod() 220 syncer.SetPodStatus(testPod, getRandomPodStatus()) 221 verifyUpdates(t, syncer, 1) 222 syncer.SetPodStatus(testPod, getRandomPodStatus()) 223 verifyUpdates(t, syncer, 1) 224 } 225 226 func TestChangedStatusKeepsStartTime(t *testing.T) { 227 syncer := newTestManager(&fake.Clientset{}) 228 testPod := getTestPod() 229 now := metav1.Now() 230 firstStatus := getRandomPodStatus() 231 firstStatus.StartTime = &now 232 syncer.SetPodStatus(testPod, firstStatus) 233 verifyUpdates(t, syncer, 1) 234 syncer.SetPodStatus(testPod, getRandomPodStatus()) 235 verifyUpdates(t, syncer, 1) 236 finalStatus := expectPodStatus(t, syncer, testPod) 237 if finalStatus.StartTime.IsZero() { 238 t.Errorf("StartTime should not be zero") 239 } 240 expected := now.Rfc3339Copy() 241 if !finalStatus.StartTime.Equal(&expected) { 242 t.Errorf("Expected %v, but got %v", expected, finalStatus.StartTime) 243 } 244 } 245 246 func TestChangedStatusUpdatesLastTransitionTime(t *testing.T) { 247 syncer := newTestManager(&fake.Clientset{}) 248 podStatus := getReadyPodStatus() 249 pod := &v1.Pod{ 250 ObjectMeta: metav1.ObjectMeta{ 251 UID: "12345678", 252 Name: "foo", 253 Namespace: "new", 254 }, 255 Status: v1.PodStatus{}, 256 } 257 syncer.SetPodStatus(pod, podStatus) 258 verifyUpdates(t, syncer, 1) 259 oldStatus := expectPodStatus(t, syncer, pod) 260 anotherStatus := getReadyPodStatus() 261 anotherStatus.Conditions[0].Status = v1.ConditionFalse 262 syncer.SetPodStatus(pod, anotherStatus) 263 verifyUpdates(t, syncer, 1) 264 newStatus := expectPodStatus(t, syncer, pod) 265 266 oldReadyCondition := podutil.GetPodReadyCondition(oldStatus) 267 newReadyCondition := podutil.GetPodReadyCondition(newStatus) 268 if newReadyCondition.LastTransitionTime.IsZero() { 269 t.Errorf("Unexpected: last transition time not set") 270 } 271 if newReadyCondition.LastTransitionTime.Before(&oldReadyCondition.LastTransitionTime) { 272 t.Errorf("Unexpected: new transition time %s, is before old transition time %s", newReadyCondition.LastTransitionTime, oldReadyCondition.LastTransitionTime) 273 } 274 } 275 276 func TestUnchangedStatus(t *testing.T) { 277 syncer := newTestManager(&fake.Clientset{}) 278 testPod := getTestPod() 279 podStatus := getRandomPodStatus() 280 syncer.SetPodStatus(testPod, podStatus) 281 syncer.SetPodStatus(testPod, podStatus) 282 verifyUpdates(t, syncer, 1) 283 } 284 285 func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) { 286 syncer := newTestManager(&fake.Clientset{}) 287 podStatus := getReadyPodStatus() 288 pod := &v1.Pod{ 289 ObjectMeta: metav1.ObjectMeta{ 290 UID: "12345678", 291 Name: "foo", 292 Namespace: "new", 293 }, 294 Status: v1.PodStatus{}, 295 } 296 syncer.SetPodStatus(pod, podStatus) 297 verifyUpdates(t, syncer, 1) 298 oldStatus := expectPodStatus(t, syncer, pod) 299 anotherStatus := getReadyPodStatus() 300 syncer.SetPodStatus(pod, anotherStatus) 301 // No update. 302 verifyUpdates(t, syncer, 0) 303 newStatus := expectPodStatus(t, syncer, pod) 304 305 oldReadyCondition := podutil.GetPodReadyCondition(oldStatus) 306 newReadyCondition := podutil.GetPodReadyCondition(newStatus) 307 if newReadyCondition.LastTransitionTime.IsZero() { 308 t.Errorf("Unexpected: last transition time not set") 309 } 310 if !oldReadyCondition.LastTransitionTime.Equal(&newReadyCondition.LastTransitionTime) { 311 t.Errorf("Unexpected: new transition time %s, is not equal to old transition time %s", newReadyCondition.LastTransitionTime, oldReadyCondition.LastTransitionTime) 312 } 313 } 314 315 func TestSyncPodIgnoresNotFound(t *testing.T) { 316 client := fake.Clientset{} 317 syncer := newTestManager(&client) 318 client.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) { 319 return true, nil, errors.NewNotFound(api.Resource("pods"), "test-pod") 320 }) 321 syncer.SetPodStatus(getTestPod(), getRandomPodStatus()) 322 verifyActions(t, syncer, []core.Action{getAction()}) 323 } 324 325 func TestSyncPod(t *testing.T) { 326 syncer := newTestManager(&fake.Clientset{}) 327 testPod := getTestPod() 328 syncer.kubeClient = fake.NewSimpleClientset(testPod) 329 syncer.SetPodStatus(testPod, getRandomPodStatus()) 330 verifyActions(t, syncer, []core.Action{getAction(), patchAction()}) 331 } 332 333 func TestSyncPodChecksMismatchedUID(t *testing.T) { 334 syncer := newTestManager(&fake.Clientset{}) 335 pod := getTestPod() 336 pod.UID = "first" 337 syncer.podManager.(mutablePodManager).AddPod(pod) 338 differentPod := getTestPod() 339 differentPod.UID = "second" 340 syncer.podManager.(mutablePodManager).AddPod(differentPod) 341 syncer.kubeClient = fake.NewSimpleClientset(pod) 342 syncer.SetPodStatus(differentPod, getRandomPodStatus()) 343 verifyActions(t, syncer, []core.Action{getAction()}) 344 } 345 346 func TestSyncPodNoDeadlock(t *testing.T) { 347 client := &fake.Clientset{} 348 m := newTestManager(client) 349 pod := getTestPod() 350 351 // Setup fake client. 352 var ret *v1.Pod 353 var err error 354 client.AddReactor("*", "pods", func(action core.Action) (bool, runtime.Object, error) { 355 switch action := action.(type) { 356 case core.GetAction: 357 assert.Equal(t, pod.Name, action.GetName(), "Unexpected GetAction: %+v", action) 358 case core.UpdateAction: 359 assert.Equal(t, pod.Name, action.GetObject().(*v1.Pod).Name, "Unexpected UpdateAction: %+v", action) 360 default: 361 assert.Fail(t, "Unexpected Action: %+v", action) 362 } 363 return true, ret, err 364 }) 365 366 pod.Status.ContainerStatuses = []v1.ContainerStatus{{State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}} 367 368 t.Logf("Pod not found.") 369 ret = nil 370 err = errors.NewNotFound(api.Resource("pods"), pod.Name) 371 m.SetPodStatus(pod, getRandomPodStatus()) 372 verifyActions(t, m, []core.Action{getAction()}) 373 374 t.Logf("Pod was recreated.") 375 ret = getTestPod() 376 ret.UID = "other_pod" 377 err = nil 378 m.SetPodStatus(pod, getRandomPodStatus()) 379 verifyActions(t, m, []core.Action{getAction()}) 380 381 t.Logf("Pod not deleted (success case).") 382 ret = getTestPod() 383 m.SetPodStatus(pod, getRandomPodStatus()) 384 verifyActions(t, m, []core.Action{getAction(), patchAction()}) 385 386 t.Logf("Pod is terminated, but still running.") 387 pod.DeletionTimestamp = &metav1.Time{Time: time.Now()} 388 m.SetPodStatus(pod, getRandomPodStatus()) 389 verifyActions(t, m, []core.Action{getAction(), patchAction()}) 390 391 t.Logf("Pod is terminated successfully.") 392 pod.Status.ContainerStatuses[0].State.Running = nil 393 pod.Status.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{} 394 m.SetPodStatus(pod, getRandomPodStatus()) 395 verifyActions(t, m, []core.Action{getAction(), patchAction()}) 396 397 t.Logf("Error case.") 398 ret = nil 399 err = fmt.Errorf("intentional test error") 400 m.SetPodStatus(pod, getRandomPodStatus()) 401 verifyActions(t, m, []core.Action{getAction()}) 402 } 403 404 func TestStaleUpdates(t *testing.T) { 405 pod := getTestPod() 406 client := fake.NewSimpleClientset(pod) 407 m := newTestManager(client) 408 409 status := v1.PodStatus{Message: "initial status"} 410 m.SetPodStatus(pod, status) 411 status.Message = "first version bump" 412 m.SetPodStatus(pod, status) 413 status.Message = "second version bump" 414 m.SetPodStatus(pod, status) 415 416 t.Logf("sync batch before syncPods pushes latest status, resulting in one update during the batch") 417 m.syncBatch(true) 418 verifyUpdates(t, m, 0) 419 verifyActions(t, m, []core.Action{getAction(), patchAction()}) 420 t.Logf("Nothing left in the channel to sync") 421 verifyActions(t, m, []core.Action{}) 422 423 t.Log("Unchanged status should not send an update") 424 m.SetPodStatus(pod, status) 425 verifyUpdates(t, m, 0) 426 427 t.Log("... even if it's stale as long as nothing changes") 428 mirrorPodUID := kubetypes.MirrorPodUID(pod.UID) 429 m.apiStatusVersions[mirrorPodUID] = m.apiStatusVersions[mirrorPodUID] - 1 430 431 m.SetPodStatus(pod, status) 432 m.syncBatch(true) 433 verifyActions(t, m, []core.Action{getAction()}) 434 435 t.Logf("Nothing stuck in the pipe.") 436 verifyUpdates(t, m, 0) 437 } 438 439 // shuffle returns a new shuffled list of container statuses. 440 func shuffle(statuses []v1.ContainerStatus) []v1.ContainerStatus { 441 numStatuses := len(statuses) 442 randIndexes := rand.Perm(numStatuses) 443 shuffled := make([]v1.ContainerStatus, numStatuses) 444 for i := 0; i < numStatuses; i++ { 445 shuffled[i] = statuses[randIndexes[i]] 446 } 447 return shuffled 448 } 449 450 func TestStatusEquality(t *testing.T) { 451 pod := v1.Pod{ 452 Spec: v1.PodSpec{}, 453 } 454 containerStatus := []v1.ContainerStatus{} 455 for i := 0; i < 10; i++ { 456 s := v1.ContainerStatus{ 457 Name: fmt.Sprintf("container%d", i), 458 } 459 containerStatus = append(containerStatus, s) 460 } 461 podStatus := v1.PodStatus{ 462 ContainerStatuses: containerStatus, 463 } 464 for i := 0; i < 10; i++ { 465 oldPodStatus := v1.PodStatus{ 466 ContainerStatuses: shuffle(podStatus.ContainerStatuses), 467 } 468 normalizeStatus(&pod, &oldPodStatus) 469 normalizeStatus(&pod, &podStatus) 470 if !isPodStatusByKubeletEqual(&oldPodStatus, &podStatus) { 471 t.Fatalf("Order of container statuses should not affect normalized equality.") 472 } 473 } 474 475 oldPodStatus := podStatus 476 podStatus.Conditions = append(podStatus.Conditions, v1.PodCondition{ 477 Type: v1.PodConditionType("www.example.com/feature"), 478 Status: v1.ConditionTrue, 479 }) 480 481 oldPodStatus.Conditions = append(podStatus.Conditions, v1.PodCondition{ 482 Type: v1.PodConditionType("www.example.com/feature"), 483 Status: v1.ConditionFalse, 484 }) 485 486 normalizeStatus(&pod, &oldPodStatus) 487 normalizeStatus(&pod, &podStatus) 488 if !isPodStatusByKubeletEqual(&oldPodStatus, &podStatus) { 489 t.Fatalf("Differences in pod condition not owned by kubelet should not affect normalized equality.") 490 } 491 } 492 493 func TestStatusNormalizationEnforcesMaxBytes(t *testing.T) { 494 pod := v1.Pod{ 495 Spec: v1.PodSpec{}, 496 } 497 containerStatus := []v1.ContainerStatus{} 498 for i := 0; i < 48; i++ { 499 s := v1.ContainerStatus{ 500 Name: fmt.Sprintf("container%d", i), 501 LastTerminationState: v1.ContainerState{ 502 Terminated: &v1.ContainerStateTerminated{ 503 Message: strings.Repeat("abcdefgh", 24+i%3), 504 }, 505 }, 506 } 507 containerStatus = append(containerStatus, s) 508 } 509 podStatus := v1.PodStatus{ 510 InitContainerStatuses: containerStatus[:24], 511 ContainerStatuses: containerStatus[24:], 512 } 513 result := normalizeStatus(&pod, &podStatus) 514 count := 0 515 for _, s := range result.InitContainerStatuses { 516 l := len(s.LastTerminationState.Terminated.Message) 517 if l < 192 || l > 256 { 518 t.Errorf("container message had length %d", l) 519 } 520 count += l 521 } 522 if count > kubecontainer.MaxPodTerminationMessageLogLength { 523 t.Errorf("message length not truncated") 524 } 525 } 526 527 func TestStaticPod(t *testing.T) { 528 staticPod := getTestPod() 529 staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"} 530 mirrorPod := getTestPod() 531 mirrorPod.UID = "mirror-12345678" 532 mirrorPod.Annotations = map[string]string{ 533 kubetypes.ConfigSourceAnnotationKey: "api", 534 kubetypes.ConfigMirrorAnnotationKey: "mirror", 535 } 536 client := fake.NewSimpleClientset(mirrorPod) 537 m := newTestManager(client) 538 539 t.Logf("Create the static pod") 540 m.podManager.(mutablePodManager).AddPod(staticPod) 541 assert.True(t, kubetypes.IsStaticPod(staticPod), "SetUp error: staticPod") 542 543 status := getRandomPodStatus() 544 now := metav1.Now() 545 status.StartTime = &now 546 m.SetPodStatus(staticPod, status) 547 548 t.Logf("Should be able to get the static pod status from status manager") 549 retrievedStatus := expectPodStatus(t, m, staticPod) 550 normalizeStatus(staticPod, &status) 551 assert.True(t, isPodStatusByKubeletEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) 552 553 t.Logf("Should not sync pod in syncBatch because there is no corresponding mirror pod for the static pod.") 554 m.syncBatch(true) 555 assert.Equal(t, len(m.kubeClient.(*fake.Clientset).Actions()), 0, "Expected no updates after syncBatch, got %+v", m.kubeClient.(*fake.Clientset).Actions()) 556 557 t.Logf("Create the mirror pod") 558 m.podManager.(mutablePodManager).AddPod(mirrorPod) 559 assert.True(t, kubetypes.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod") 560 assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), kubetypes.ResolvedPodUID(staticPod.UID)) 561 562 t.Logf("Should be able to get the mirror pod status from status manager") 563 retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID) 564 assert.True(t, isPodStatusByKubeletEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus) 565 566 t.Logf("Should sync pod because the corresponding mirror pod is created") 567 assert.Equal(t, m.syncBatch(true), 1) 568 verifyActions(t, m, []core.Action{getAction(), patchAction()}) 569 570 t.Logf("syncBatch should not sync any pods because nothing is changed.") 571 m.testSyncBatch() 572 verifyActions(t, m, []core.Action{}) 573 574 t.Logf("Change mirror pod identity.") 575 m.podManager.(mutablePodManager).RemovePod(mirrorPod) 576 mirrorPod.UID = "new-mirror-pod" 577 mirrorPod.Status = v1.PodStatus{} 578 m.podManager.(mutablePodManager).AddPod(mirrorPod) 579 580 t.Logf("Should not update to mirror pod, because UID has changed.") 581 assert.Equal(t, m.syncBatch(true), 1) 582 verifyActions(t, m, []core.Action{getAction()}) 583 } 584 585 func TestTerminatePod(t *testing.T) { 586 syncer := newTestManager(&fake.Clientset{}) 587 testPod := getTestPod() 588 testPod.Spec.InitContainers = []v1.Container{ 589 {Name: "init-test-1"}, 590 {Name: "init-test-2"}, 591 {Name: "init-test-3"}, 592 } 593 testPod.Spec.Containers = []v1.Container{ 594 {Name: "test-1"}, 595 {Name: "test-2"}, 596 {Name: "test-3"}, 597 } 598 t.Logf("update the pod's status to Failed. TerminatePod should preserve this status update.") 599 firstStatus := getRandomPodStatus() 600 firstStatus.Phase = v1.PodFailed 601 firstStatus.InitContainerStatuses = []v1.ContainerStatus{ 602 {Name: "init-test-1"}, 603 {Name: "init-test-2", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "InitTest", ExitCode: 0}}}, 604 {Name: "init-test-3", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "InitTest", ExitCode: 3}}}, 605 // TODO: If the last init container had failed, the pod would not have been 606 // able to start any containers. Maybe, we have to separate this test case 607 // into two cases, one for init containers and one for containers. 608 } 609 firstStatus.ContainerStatuses = []v1.ContainerStatus{ 610 {Name: "test-1"}, 611 {Name: "test-2", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "Test", ExitCode: 2}}}, 612 {Name: "test-3", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "Test", ExitCode: 0}}}, 613 } 614 syncer.SetPodStatus(testPod, firstStatus) 615 616 t.Logf("set the testPod to a pod with Phase running, to simulate a stale pod") 617 testPod.Status = getRandomPodStatus() 618 testPod.Status.Phase = v1.PodRunning 619 testPod.Status.InitContainerStatuses = []v1.ContainerStatus{ 620 {Name: "test-1"}, 621 {Name: "init-test-2", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "InitTest", ExitCode: 0}}}, 622 {Name: "init-test-3", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "InitTest", ExitCode: 0}}}, 623 } 624 testPod.Status.ContainerStatuses = []v1.ContainerStatus{ 625 {Name: "test-1", State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}, 626 {Name: "test-2", State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}, 627 {Name: "test-3", State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}, 628 } 629 630 syncer.TerminatePod(testPod) 631 632 t.Logf("we expect the container statuses to have changed to terminated") 633 newStatus := expectPodStatus(t, syncer, testPod) 634 for i := range newStatus.ContainerStatuses { 635 assert.False(t, newStatus.ContainerStatuses[i].State.Terminated == nil, "expected containers to be terminated") 636 } 637 for i := range newStatus.InitContainerStatuses { 638 assert.False(t, newStatus.InitContainerStatuses[i].State.Terminated == nil, "expected init containers to be terminated") 639 } 640 641 expectUnknownState := v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "ContainerStatusUnknown", Message: "The container could not be located when the pod was terminated", ExitCode: 137}} 642 if !reflect.DeepEqual(newStatus.InitContainerStatuses[0].State, expectUnknownState) { 643 t.Errorf("terminated container state not defaulted: %s", cmp.Diff(newStatus.InitContainerStatuses[0].State, expectUnknownState)) 644 } 645 if !reflect.DeepEqual(newStatus.InitContainerStatuses[1].State, firstStatus.InitContainerStatuses[1].State) { 646 t.Errorf("existing terminated container state not preserved: %#v", newStatus.ContainerStatuses) 647 } 648 if !reflect.DeepEqual(newStatus.InitContainerStatuses[2].State, firstStatus.InitContainerStatuses[2].State) { 649 t.Errorf("existing terminated container state not preserved: %#v", newStatus.ContainerStatuses) 650 } 651 if !reflect.DeepEqual(newStatus.ContainerStatuses[0].State, expectUnknownState) { 652 t.Errorf("terminated container state not defaulted: %s", cmp.Diff(newStatus.ContainerStatuses[0].State, expectUnknownState)) 653 } 654 if !reflect.DeepEqual(newStatus.ContainerStatuses[1].State, firstStatus.ContainerStatuses[1].State) { 655 t.Errorf("existing terminated container state not preserved: %#v", newStatus.ContainerStatuses) 656 } 657 if !reflect.DeepEqual(newStatus.ContainerStatuses[2].State, firstStatus.ContainerStatuses[2].State) { 658 t.Errorf("existing terminated container state not preserved: %#v", newStatus.ContainerStatuses) 659 } 660 661 t.Logf("we expect the previous status update to be preserved.") 662 assert.Equal(t, newStatus.Phase, firstStatus.Phase) 663 assert.Equal(t, newStatus.Message, firstStatus.Message) 664 } 665 666 func TestTerminatePodWaiting(t *testing.T) { 667 syncer := newTestManager(&fake.Clientset{}) 668 testPod := getTestPod() 669 testPod.Spec.InitContainers = []v1.Container{ 670 {Name: "init-test-1"}, 671 {Name: "init-test-2"}, 672 {Name: "init-test-3"}, 673 } 674 testPod.Spec.Containers = []v1.Container{ 675 {Name: "test-1"}, 676 {Name: "test-2"}, 677 {Name: "test-3"}, 678 } 679 t.Logf("update the pod's status to Failed. TerminatePod should preserve this status update.") 680 firstStatus := getRandomPodStatus() 681 firstStatus.Phase = v1.PodFailed 682 firstStatus.InitContainerStatuses = []v1.ContainerStatus{ 683 {Name: "init-test-1"}, 684 {Name: "init-test-2", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "InitTest", ExitCode: 0}}}, 685 {Name: "init-test-3", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "InitTest"}}}, 686 // TODO: If the last init container had been in a waiting state, it would 687 // not have been able to start any containers. Maybe, we have to separate 688 // this test case into two cases, one for init containers and one for 689 // containers. 690 } 691 firstStatus.ContainerStatuses = []v1.ContainerStatus{ 692 {Name: "test-1"}, 693 {Name: "test-2", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "Test", ExitCode: 2}}}, 694 {Name: "test-3", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{Reason: "Test"}}}, 695 } 696 syncer.SetPodStatus(testPod, firstStatus) 697 698 t.Logf("set the testPod to a pod with Phase running, to simulate a stale pod") 699 testPod.Status = getRandomPodStatus() 700 testPod.Status.Phase = v1.PodRunning 701 testPod.Status.InitContainerStatuses = []v1.ContainerStatus{ 702 {Name: "test-1"}, 703 {Name: "init-test-2", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "InitTest", ExitCode: 0}}}, 704 {Name: "init-test-3", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "InitTest", ExitCode: 0}}}, 705 } 706 testPod.Status.ContainerStatuses = []v1.ContainerStatus{ 707 {Name: "test-1", State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}, 708 {Name: "test-2", State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}, 709 {Name: "test-3", State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}, 710 } 711 712 syncer.TerminatePod(testPod) 713 714 t.Logf("we expect the container statuses to have changed to terminated") 715 newStatus := expectPodStatus(t, syncer, testPod) 716 for _, container := range newStatus.ContainerStatuses { 717 assert.False(t, container.State.Terminated == nil, "expected containers to be terminated") 718 } 719 for _, container := range newStatus.InitContainerStatuses[:2] { 720 assert.False(t, container.State.Terminated == nil, "expected init containers to be terminated") 721 } 722 for _, container := range newStatus.InitContainerStatuses[2:] { 723 assert.False(t, container.State.Waiting == nil, "expected init containers to be waiting") 724 } 725 726 expectUnknownState := v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "ContainerStatusUnknown", Message: "The container could not be located when the pod was terminated", ExitCode: 137}} 727 if !reflect.DeepEqual(newStatus.InitContainerStatuses[0].State, expectUnknownState) { 728 t.Errorf("terminated container state not defaulted: %s", cmp.Diff(newStatus.InitContainerStatuses[0].State, expectUnknownState)) 729 } 730 if !reflect.DeepEqual(newStatus.InitContainerStatuses[1].State, firstStatus.InitContainerStatuses[1].State) { 731 t.Errorf("existing terminated container state not preserved: %#v", newStatus.ContainerStatuses) 732 } 733 if !reflect.DeepEqual(newStatus.InitContainerStatuses[2].State, firstStatus.InitContainerStatuses[2].State) { 734 t.Errorf("waiting container state not defaulted: %s", cmp.Diff(newStatus.InitContainerStatuses[2].State, firstStatus.InitContainerStatuses[2].State)) 735 } 736 if !reflect.DeepEqual(newStatus.ContainerStatuses[0].State, expectUnknownState) { 737 t.Errorf("terminated container state not defaulted: %s", cmp.Diff(newStatus.ContainerStatuses[0].State, expectUnknownState)) 738 } 739 if !reflect.DeepEqual(newStatus.ContainerStatuses[1].State, firstStatus.ContainerStatuses[1].State) { 740 t.Errorf("existing terminated container state not preserved: %#v", newStatus.ContainerStatuses) 741 } 742 if !reflect.DeepEqual(newStatus.ContainerStatuses[2].State, expectUnknownState) { 743 t.Errorf("waiting container state not defaulted: %s", cmp.Diff(newStatus.ContainerStatuses[2].State, expectUnknownState)) 744 } 745 746 t.Logf("we expect the previous status update to be preserved.") 747 assert.Equal(t, newStatus.Phase, firstStatus.Phase) 748 assert.Equal(t, newStatus.Message, firstStatus.Message) 749 } 750 751 func TestTerminatePod_DefaultUnknownStatus(t *testing.T) { 752 newPod := func(initContainers, containers int, fns ...func(*v1.Pod)) *v1.Pod { 753 pod := getTestPod() 754 for i := 0; i < initContainers; i++ { 755 pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{ 756 Name: fmt.Sprintf("init-%d", i), 757 }) 758 } 759 for i := 0; i < containers; i++ { 760 pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{ 761 Name: fmt.Sprintf("%d", i), 762 }) 763 } 764 pod.Status.StartTime = &metav1.Time{Time: time.Unix(1, 0).UTC()} 765 for _, fn := range fns { 766 fn(pod) 767 } 768 return pod 769 } 770 expectTerminatedUnknown := func(t *testing.T, state v1.ContainerState) { 771 t.Helper() 772 if state.Terminated == nil || state.Running != nil || state.Waiting != nil { 773 t.Fatalf("unexpected state: %#v", state) 774 } 775 if state.Terminated.ExitCode != 137 || state.Terminated.Reason != "ContainerStatusUnknown" || len(state.Terminated.Message) == 0 { 776 t.Fatalf("unexpected terminated state: %#v", state.Terminated) 777 } 778 } 779 expectTerminated := func(t *testing.T, state v1.ContainerState, exitCode int32) { 780 t.Helper() 781 if state.Terminated == nil || state.Running != nil || state.Waiting != nil { 782 t.Fatalf("unexpected state: %#v", state) 783 } 784 if state.Terminated.ExitCode != exitCode { 785 t.Fatalf("unexpected terminated state: %#v", state.Terminated) 786 } 787 } 788 expectWaiting := func(t *testing.T, state v1.ContainerState) { 789 t.Helper() 790 if state.Terminated != nil || state.Running != nil || state.Waiting == nil { 791 t.Fatalf("unexpected state: %#v", state) 792 } 793 } 794 795 testCases := []struct { 796 name string 797 pod *v1.Pod 798 updateFn func(*v1.Pod) 799 expectFn func(t *testing.T, status v1.PodStatus) 800 }{ 801 {pod: newPod(0, 1, func(pod *v1.Pod) { pod.Status.Phase = v1.PodFailed })}, 802 { 803 pod: newPod(0, 1, func(pod *v1.Pod) { 804 pod.Status.Phase = v1.PodRunning 805 }), 806 expectFn: func(t *testing.T, status v1.PodStatus) { 807 status.Phase = v1.PodFailed 808 }, 809 }, 810 { 811 pod: newPod(0, 1, func(pod *v1.Pod) { 812 pod.Status.Phase = v1.PodRunning 813 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 814 {Name: "0", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "Test", ExitCode: 2}}}, 815 } 816 }), 817 expectFn: func(t *testing.T, status v1.PodStatus) { 818 status.Phase = v1.PodFailed 819 }, 820 }, 821 { 822 name: "last termination state set", 823 pod: newPod(0, 1, func(pod *v1.Pod) { 824 pod.Spec.RestartPolicy = v1.RestartPolicyNever 825 pod.Status.Phase = v1.PodRunning 826 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 827 { 828 Name: "0", 829 LastTerminationState: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{Reason: "Test", ExitCode: 2}}, 830 State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}, 831 }, 832 } 833 }), 834 expectFn: func(t *testing.T, status v1.PodStatus) { 835 container := status.ContainerStatuses[0] 836 if container.LastTerminationState.Terminated.ExitCode != 2 { 837 t.Fatalf("unexpected last state: %#v", container.LastTerminationState) 838 } 839 expectTerminatedUnknown(t, container.State) 840 }, 841 }, 842 { 843 name: "no previous state", 844 pod: newPod(0, 1, func(pod *v1.Pod) { 845 pod.Spec.RestartPolicy = v1.RestartPolicyNever 846 pod.Status.Phase = v1.PodRunning 847 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 848 {Name: "0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 849 } 850 }), 851 expectFn: func(t *testing.T, status v1.PodStatus) { 852 expectTerminatedUnknown(t, status.ContainerStatuses[0].State) 853 }, 854 }, 855 { 856 name: "uninitialized pod defaults the first init container", 857 pod: newPod(1, 1, func(pod *v1.Pod) { 858 pod.Spec.RestartPolicy = v1.RestartPolicyNever 859 pod.Status.Phase = v1.PodPending 860 pod.Status.InitContainerStatuses = []v1.ContainerStatus{ 861 {Name: "init-0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 862 } 863 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 864 {Name: "0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 865 } 866 }), 867 expectFn: func(t *testing.T, status v1.PodStatus) { 868 expectTerminatedUnknown(t, status.InitContainerStatuses[0].State) 869 expectWaiting(t, status.ContainerStatuses[0].State) 870 }, 871 }, 872 { 873 name: "uninitialized pod defaults only the first init container", 874 pod: newPod(2, 1, func(pod *v1.Pod) { 875 pod.Spec.RestartPolicy = v1.RestartPolicyNever 876 pod.Status.Phase = v1.PodPending 877 pod.Status.InitContainerStatuses = []v1.ContainerStatus{ 878 {Name: "init-0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 879 {Name: "init-1", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 880 } 881 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 882 {Name: "0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 883 } 884 }), 885 expectFn: func(t *testing.T, status v1.PodStatus) { 886 expectTerminatedUnknown(t, status.InitContainerStatuses[0].State) 887 expectWaiting(t, status.InitContainerStatuses[1].State) 888 expectWaiting(t, status.ContainerStatuses[0].State) 889 }, 890 }, 891 { 892 name: "uninitialized pod defaults gaps", 893 pod: newPod(4, 1, func(pod *v1.Pod) { 894 pod.Spec.RestartPolicy = v1.RestartPolicyNever 895 pod.Status.Phase = v1.PodPending 896 pod.Status.InitContainerStatuses = []v1.ContainerStatus{ 897 {Name: "init-0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 898 {Name: "init-1", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 899 {Name: "init-2", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{ExitCode: 1}}}, 900 {Name: "init-3", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 901 } 902 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 903 {Name: "0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 904 } 905 }), 906 expectFn: func(t *testing.T, status v1.PodStatus) { 907 expectTerminatedUnknown(t, status.InitContainerStatuses[0].State) 908 expectTerminatedUnknown(t, status.InitContainerStatuses[1].State) 909 expectTerminated(t, status.InitContainerStatuses[2].State, 1) 910 expectWaiting(t, status.InitContainerStatuses[3].State) 911 expectWaiting(t, status.ContainerStatuses[0].State) 912 }, 913 }, 914 { 915 name: "failed last container is uninitialized", 916 pod: newPod(3, 1, func(pod *v1.Pod) { 917 pod.Spec.RestartPolicy = v1.RestartPolicyNever 918 pod.Status.Phase = v1.PodPending 919 pod.Status.InitContainerStatuses = []v1.ContainerStatus{ 920 {Name: "init-0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 921 {Name: "init-1", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 922 {Name: "init-2", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{ExitCode: 1}}}, 923 } 924 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 925 {Name: "0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 926 } 927 }), 928 expectFn: func(t *testing.T, status v1.PodStatus) { 929 expectTerminatedUnknown(t, status.InitContainerStatuses[0].State) 930 expectTerminatedUnknown(t, status.InitContainerStatuses[1].State) 931 expectTerminated(t, status.InitContainerStatuses[2].State, 1) 932 expectWaiting(t, status.ContainerStatuses[0].State) 933 }, 934 }, 935 { 936 name: "successful last container is initialized", 937 pod: newPod(3, 1, func(pod *v1.Pod) { 938 pod.Spec.RestartPolicy = v1.RestartPolicyNever 939 pod.Status.Phase = v1.PodRunning 940 pod.Status.InitContainerStatuses = []v1.ContainerStatus{ 941 {Name: "init-0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 942 {Name: "init-1", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 943 {Name: "init-2", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{ExitCode: 0}}}, 944 } 945 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 946 {Name: "0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 947 } 948 }), 949 expectFn: func(t *testing.T, status v1.PodStatus) { 950 expectTerminatedUnknown(t, status.InitContainerStatuses[0].State) 951 expectTerminatedUnknown(t, status.InitContainerStatuses[1].State) 952 expectTerminated(t, status.InitContainerStatuses[2].State, 0) 953 expectTerminatedUnknown(t, status.ContainerStatuses[0].State) 954 }, 955 }, 956 { 957 name: "successful last previous container is initialized, and container state is overwritten", 958 pod: newPod(3, 1, func(pod *v1.Pod) { 959 pod.Spec.RestartPolicy = v1.RestartPolicyNever 960 pod.Status.Phase = v1.PodRunning 961 pod.Status.InitContainerStatuses = []v1.ContainerStatus{ 962 {Name: "init-0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 963 {Name: "init-1", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 964 { 965 Name: "init-2", 966 LastTerminationState: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{ExitCode: 0}}, 967 State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}, 968 }, 969 } 970 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 971 {Name: "0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 972 } 973 }), 974 expectFn: func(t *testing.T, status v1.PodStatus) { 975 expectTerminatedUnknown(t, status.InitContainerStatuses[0].State) 976 expectTerminatedUnknown(t, status.InitContainerStatuses[1].State) 977 expectTerminatedUnknown(t, status.InitContainerStatuses[2].State) 978 expectTerminatedUnknown(t, status.ContainerStatuses[0].State) 979 }, 980 }, 981 { 982 name: "running container proves initialization", 983 pod: newPod(1, 1, func(pod *v1.Pod) { 984 pod.Spec.RestartPolicy = v1.RestartPolicyNever 985 pod.Status.Phase = v1.PodRunning 986 pod.Status.InitContainerStatuses = []v1.ContainerStatus{ 987 {Name: "init-0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 988 } 989 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 990 {Name: "0", State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}, 991 } 992 }), 993 expectFn: func(t *testing.T, status v1.PodStatus) { 994 expectTerminatedUnknown(t, status.InitContainerStatuses[0].State) 995 expectTerminatedUnknown(t, status.ContainerStatuses[0].State) 996 }, 997 }, 998 { 999 name: "evidence of terminated container proves initialization", 1000 pod: newPod(1, 1, func(pod *v1.Pod) { 1001 pod.Spec.RestartPolicy = v1.RestartPolicyNever 1002 pod.Status.Phase = v1.PodRunning 1003 pod.Status.InitContainerStatuses = []v1.ContainerStatus{ 1004 {Name: "init-0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 1005 } 1006 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 1007 {Name: "0", State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{ExitCode: 0}}}, 1008 } 1009 }), 1010 expectFn: func(t *testing.T, status v1.PodStatus) { 1011 expectTerminatedUnknown(t, status.InitContainerStatuses[0].State) 1012 expectTerminated(t, status.ContainerStatuses[0].State, 0) 1013 }, 1014 }, 1015 { 1016 name: "evidence of previously terminated container proves initialization", 1017 pod: newPod(1, 1, func(pod *v1.Pod) { 1018 pod.Spec.RestartPolicy = v1.RestartPolicyNever 1019 pod.Status.Phase = v1.PodRunning 1020 pod.Status.InitContainerStatuses = []v1.ContainerStatus{ 1021 {Name: "init-0", State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}}}, 1022 } 1023 pod.Status.ContainerStatuses = []v1.ContainerStatus{ 1024 {Name: "0", LastTerminationState: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{ExitCode: 0}}}, 1025 } 1026 }), 1027 expectFn: func(t *testing.T, status v1.PodStatus) { 1028 expectTerminatedUnknown(t, status.InitContainerStatuses[0].State) 1029 expectTerminatedUnknown(t, status.ContainerStatuses[0].State) 1030 }, 1031 }, 1032 } 1033 1034 for _, tc := range testCases { 1035 t.Run(tc.name, func(t *testing.T) { 1036 podManager := kubepod.NewBasicPodManager() 1037 podStartupLatencyTracker := util.NewPodStartupLatencyTracker() 1038 syncer := NewManager(&fake.Clientset{}, podManager, &statustest.FakePodDeletionSafetyProvider{}, podStartupLatencyTracker, "").(*manager) 1039 1040 original := tc.pod.DeepCopy() 1041 syncer.SetPodStatus(original, original.Status) 1042 1043 copied := tc.pod.DeepCopy() 1044 if tc.updateFn != nil { 1045 tc.updateFn(copied) 1046 } 1047 expected := copied.DeepCopy() 1048 1049 syncer.TerminatePod(copied) 1050 status := expectPodStatus(t, syncer, tc.pod.DeepCopy()) 1051 if tc.expectFn != nil { 1052 tc.expectFn(t, status) 1053 return 1054 } 1055 if !reflect.DeepEqual(expected.Status, status) { 1056 diff := cmp.Diff(expected.Status, status) 1057 if len(diff) == 0 { 1058 t.Fatalf("diff returned no results for failed DeepEqual: %#v != %#v", expected.Status, status) 1059 } 1060 t.Fatalf("unexpected status: %s", diff) 1061 } 1062 }) 1063 } 1064 } 1065 1066 func TestTerminatePod_EnsurePodPhaseIsTerminal(t *testing.T) { 1067 testCases := map[string]struct { 1068 enablePodDisruptionConditions bool 1069 status v1.PodStatus 1070 wantStatus v1.PodStatus 1071 }{ 1072 "Pending pod": { 1073 status: v1.PodStatus{ 1074 Phase: v1.PodPending, 1075 }, 1076 wantStatus: v1.PodStatus{ 1077 Phase: v1.PodFailed, 1078 }, 1079 }, 1080 "Running pod": { 1081 status: v1.PodStatus{ 1082 Phase: v1.PodRunning, 1083 }, 1084 wantStatus: v1.PodStatus{ 1085 Phase: v1.PodFailed, 1086 }, 1087 }, 1088 "Succeeded pod": { 1089 status: v1.PodStatus{ 1090 Phase: v1.PodSucceeded, 1091 }, 1092 wantStatus: v1.PodStatus{ 1093 Phase: v1.PodSucceeded, 1094 }, 1095 }, 1096 "Failed pod": { 1097 status: v1.PodStatus{ 1098 Phase: v1.PodFailed, 1099 }, 1100 wantStatus: v1.PodStatus{ 1101 Phase: v1.PodFailed, 1102 }, 1103 }, 1104 "Unknown pod": { 1105 status: v1.PodStatus{ 1106 Phase: v1.PodUnknown, 1107 }, 1108 wantStatus: v1.PodStatus{ 1109 Phase: v1.PodFailed, 1110 }, 1111 }, 1112 "Unknown phase pod": { 1113 status: v1.PodStatus{ 1114 Phase: v1.PodPhase("SomeUnknownPhase"), 1115 }, 1116 wantStatus: v1.PodStatus{ 1117 Phase: v1.PodFailed, 1118 }, 1119 }, 1120 } 1121 for name, tc := range testCases { 1122 t.Run(name, func(t *testing.T) { 1123 podManager := kubepod.NewBasicPodManager() 1124 podStartupLatencyTracker := util.NewPodStartupLatencyTracker() 1125 syncer := NewManager(&fake.Clientset{}, podManager, &statustest.FakePodDeletionSafetyProvider{}, podStartupLatencyTracker, "").(*manager) 1126 1127 pod := getTestPod() 1128 pod.Status = tc.status 1129 syncer.TerminatePod(pod) 1130 gotStatus := expectPodStatus(t, syncer, pod.DeepCopy()) 1131 if diff := cmp.Diff(tc.wantStatus, gotStatus, cmpopts.IgnoreFields(v1.PodStatus{}, "StartTime")); diff != "" { 1132 t.Fatalf("unexpected status: %s", diff) 1133 } 1134 }) 1135 } 1136 } 1137 1138 func TestSetContainerReadiness(t *testing.T) { 1139 cID1 := kubecontainer.ContainerID{Type: "test", ID: "1"} 1140 cID2 := kubecontainer.ContainerID{Type: "test", ID: "2"} 1141 containerStatuses := []v1.ContainerStatus{ 1142 { 1143 Name: "c1", 1144 ContainerID: cID1.String(), 1145 Ready: false, 1146 }, { 1147 Name: "c2", 1148 ContainerID: cID2.String(), 1149 Ready: false, 1150 }, 1151 } 1152 status := v1.PodStatus{ 1153 ContainerStatuses: containerStatuses, 1154 Conditions: []v1.PodCondition{{ 1155 Type: v1.PodReady, 1156 Status: v1.ConditionFalse, 1157 }}, 1158 } 1159 pod := getTestPod() 1160 pod.Spec.Containers = []v1.Container{{Name: "c1"}, {Name: "c2"}} 1161 1162 // Verify expected readiness of containers & pod. 1163 verifyReadiness := func(step string, status *v1.PodStatus, c1Ready, c2Ready, podReady bool) { 1164 for _, c := range status.ContainerStatuses { 1165 switch c.ContainerID { 1166 case cID1.String(): 1167 if c.Ready != c1Ready { 1168 t.Errorf("[%s] Expected readiness of c1 to be %v but was %v", step, c1Ready, c.Ready) 1169 } 1170 case cID2.String(): 1171 if c.Ready != c2Ready { 1172 t.Errorf("[%s] Expected readiness of c2 to be %v but was %v", step, c2Ready, c.Ready) 1173 } 1174 default: 1175 t.Fatalf("[%s] Unexpected container: %+v", step, c) 1176 } 1177 } 1178 if status.Conditions[0].Type != v1.PodReady { 1179 t.Fatalf("[%s] Unexpected condition: %+v", step, status.Conditions[0]) 1180 } else if ready := (status.Conditions[0].Status == v1.ConditionTrue); ready != podReady { 1181 t.Errorf("[%s] Expected readiness of pod to be %v but was %v", step, podReady, ready) 1182 } 1183 } 1184 1185 m := newTestManager(&fake.Clientset{}) 1186 // Add test pod because the container spec has been changed. 1187 m.podManager.(mutablePodManager).AddPod(pod) 1188 1189 t.Log("Setting readiness before status should fail.") 1190 m.SetContainerReadiness(pod.UID, cID1, true) 1191 verifyUpdates(t, m, 0) 1192 if status, ok := m.GetPodStatus(pod.UID); ok { 1193 t.Errorf("Unexpected PodStatus: %+v", status) 1194 } 1195 1196 t.Log("Setting initial status.") 1197 m.SetPodStatus(pod, status) 1198 verifyUpdates(t, m, 1) 1199 status = expectPodStatus(t, m, pod) 1200 verifyReadiness("initial", &status, false, false, false) 1201 1202 t.Log("Setting unchanged readiness should do nothing.") 1203 m.SetContainerReadiness(pod.UID, cID1, false) 1204 verifyUpdates(t, m, 0) 1205 status = expectPodStatus(t, m, pod) 1206 verifyReadiness("unchanged", &status, false, false, false) 1207 1208 t.Log("Setting container readiness should generate update but not pod readiness.") 1209 m.SetContainerReadiness(pod.UID, cID1, true) 1210 verifyUpdates(t, m, 1) 1211 status = expectPodStatus(t, m, pod) 1212 verifyReadiness("c1 ready", &status, true, false, false) 1213 1214 t.Log("Setting both containers to ready should update pod readiness.") 1215 m.SetContainerReadiness(pod.UID, cID2, true) 1216 verifyUpdates(t, m, 1) 1217 status = expectPodStatus(t, m, pod) 1218 verifyReadiness("all ready", &status, true, true, true) 1219 1220 t.Log("Setting non-existent container readiness should fail.") 1221 m.SetContainerReadiness(pod.UID, kubecontainer.ContainerID{Type: "test", ID: "foo"}, true) 1222 verifyUpdates(t, m, 0) 1223 status = expectPodStatus(t, m, pod) 1224 verifyReadiness("ignore non-existent", &status, true, true, true) 1225 } 1226 1227 func TestSetContainerStartup(t *testing.T) { 1228 cID1 := kubecontainer.ContainerID{Type: "test", ID: "1"} 1229 cID2 := kubecontainer.ContainerID{Type: "test", ID: "2"} 1230 containerStatuses := []v1.ContainerStatus{ 1231 { 1232 Name: "c1", 1233 ContainerID: cID1.String(), 1234 Ready: false, 1235 }, { 1236 Name: "c2", 1237 ContainerID: cID2.String(), 1238 Ready: false, 1239 }, 1240 } 1241 status := v1.PodStatus{ 1242 ContainerStatuses: containerStatuses, 1243 Conditions: []v1.PodCondition{{ 1244 Type: v1.PodReady, 1245 Status: v1.ConditionFalse, 1246 }}, 1247 } 1248 pod := getTestPod() 1249 pod.Spec.Containers = []v1.Container{{Name: "c1"}, {Name: "c2"}} 1250 1251 // Verify expected startup of containers & pod. 1252 verifyStartup := func(step string, status *v1.PodStatus, c1Started, c2Started, podStarted bool) { 1253 for _, c := range status.ContainerStatuses { 1254 switch c.ContainerID { 1255 case cID1.String(): 1256 if (c.Started != nil && *c.Started) != c1Started { 1257 t.Errorf("[%s] Expected startup of c1 to be %v but was %v", step, c1Started, c.Started) 1258 } 1259 case cID2.String(): 1260 if (c.Started != nil && *c.Started) != c2Started { 1261 t.Errorf("[%s] Expected startup of c2 to be %v but was %v", step, c2Started, c.Started) 1262 } 1263 default: 1264 t.Fatalf("[%s] Unexpected container: %+v", step, c) 1265 } 1266 } 1267 } 1268 1269 m := newTestManager(&fake.Clientset{}) 1270 // Add test pod because the container spec has been changed. 1271 m.podManager.(mutablePodManager).AddPod(pod) 1272 1273 t.Log("Setting startup before status should fail.") 1274 m.SetContainerStartup(pod.UID, cID1, true) 1275 verifyUpdates(t, m, 0) 1276 if status, ok := m.GetPodStatus(pod.UID); ok { 1277 t.Errorf("Unexpected PodStatus: %+v", status) 1278 } 1279 1280 t.Log("Setting initial status.") 1281 m.SetPodStatus(pod, status) 1282 verifyUpdates(t, m, 1) 1283 status = expectPodStatus(t, m, pod) 1284 verifyStartup("initial", &status, false, false, false) 1285 1286 t.Log("Setting unchanged startup should do nothing.") 1287 m.SetContainerStartup(pod.UID, cID1, false) 1288 verifyUpdates(t, m, 1) 1289 status = expectPodStatus(t, m, pod) 1290 verifyStartup("unchanged", &status, false, false, false) 1291 1292 t.Log("Setting container startup should generate update but not pod startup.") 1293 m.SetContainerStartup(pod.UID, cID1, true) 1294 verifyUpdates(t, m, 1) // Started = nil to false 1295 status = expectPodStatus(t, m, pod) 1296 verifyStartup("c1 ready", &status, true, false, false) 1297 1298 t.Log("Setting both containers to ready should update pod startup.") 1299 m.SetContainerStartup(pod.UID, cID2, true) 1300 verifyUpdates(t, m, 1) 1301 status = expectPodStatus(t, m, pod) 1302 verifyStartup("all ready", &status, true, true, true) 1303 1304 t.Log("Setting non-existent container startup should fail.") 1305 m.SetContainerStartup(pod.UID, kubecontainer.ContainerID{Type: "test", ID: "foo"}, true) 1306 verifyUpdates(t, m, 0) 1307 status = expectPodStatus(t, m, pod) 1308 verifyStartup("ignore non-existent", &status, true, true, true) 1309 } 1310 1311 func TestSyncBatchCleanupVersions(t *testing.T) { 1312 m := newTestManager(&fake.Clientset{}) 1313 testPod := getTestPod() 1314 mirrorPod := getTestPod() 1315 mirrorPod.UID = "mirror-uid" 1316 mirrorPod.Name = "mirror_pod" 1317 mirrorPod.Annotations = map[string]string{ 1318 kubetypes.ConfigSourceAnnotationKey: "api", 1319 kubetypes.ConfigMirrorAnnotationKey: "mirror", 1320 } 1321 1322 t.Logf("Orphaned pods should be removed.") 1323 m.apiStatusVersions[kubetypes.MirrorPodUID(testPod.UID)] = 100 1324 m.apiStatusVersions[kubetypes.MirrorPodUID(mirrorPod.UID)] = 200 1325 m.syncBatch(true) 1326 if _, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(testPod.UID)]; ok { 1327 t.Errorf("Should have cleared status for testPod") 1328 } 1329 if _, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(mirrorPod.UID)]; ok { 1330 t.Errorf("Should have cleared status for mirrorPod") 1331 } 1332 1333 t.Logf("Non-orphaned pods should not be removed.") 1334 m.SetPodStatus(testPod, getRandomPodStatus()) 1335 m.podManager.(mutablePodManager).AddPod(mirrorPod) 1336 staticPod := mirrorPod 1337 staticPod.UID = "static-uid" 1338 staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"} 1339 m.podManager.(mutablePodManager).AddPod(staticPod) 1340 m.apiStatusVersions[kubetypes.MirrorPodUID(testPod.UID)] = 100 1341 m.apiStatusVersions[kubetypes.MirrorPodUID(mirrorPod.UID)] = 200 1342 m.testSyncBatch() 1343 if _, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(testPod.UID)]; !ok { 1344 t.Errorf("Should not have cleared status for testPod") 1345 } 1346 if _, ok := m.apiStatusVersions[kubetypes.MirrorPodUID(mirrorPod.UID)]; !ok { 1347 t.Errorf("Should not have cleared status for mirrorPod") 1348 } 1349 } 1350 1351 func TestReconcilePodStatus(t *testing.T) { 1352 testPod := getTestPod() 1353 client := fake.NewSimpleClientset(testPod) 1354 syncer := newTestManager(client) 1355 syncer.SetPodStatus(testPod, getRandomPodStatus()) 1356 t.Logf("Call syncBatch directly to test reconcile") 1357 syncer.syncBatch(true) // The apiStatusVersions should be set now 1358 client.ClearActions() 1359 1360 podStatus, ok := syncer.GetPodStatus(testPod.UID) 1361 if !ok { 1362 t.Fatalf("Should find pod status for pod: %#v", testPod) 1363 } 1364 testPod.Status = podStatus 1365 1366 t.Logf("If the pod status is the same, a reconciliation is not needed and syncBatch should do nothing") 1367 syncer.podManager.(mutablePodManager).UpdatePod(testPod) 1368 if syncer.needsReconcile(testPod.UID, podStatus) { 1369 t.Fatalf("Pod status is the same, a reconciliation is not needed") 1370 } 1371 syncer.SetPodStatus(testPod, podStatus) 1372 syncer.syncBatch(true) 1373 verifyActions(t, syncer, []core.Action{}) 1374 1375 // If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond), 1376 // a reconciliation is not needed, syncBatch should do nothing. 1377 // The StartTime should have been set in SetPodStatus(). 1378 // This test is done because the related issue #15262/PR #15263 to move apiserver to RFC339NANO is closed. 1379 t.Logf("Syncbatch should do nothing, as a reconciliation is not required") 1380 normalizedStartTime := testPod.Status.StartTime.Rfc3339Copy() 1381 testPod.Status.StartTime = &normalizedStartTime 1382 syncer.podManager.(mutablePodManager).UpdatePod(testPod) 1383 if syncer.needsReconcile(testPod.UID, podStatus) { 1384 t.Fatalf("Pod status only differs for timestamp format, a reconciliation is not needed") 1385 } 1386 syncer.SetPodStatus(testPod, podStatus) 1387 syncer.syncBatch(true) 1388 verifyActions(t, syncer, []core.Action{}) 1389 1390 t.Logf("If the pod status is different, a reconciliation is needed, syncBatch should trigger an update") 1391 changedPodStatus := getRandomPodStatus() 1392 syncer.podManager.(mutablePodManager).UpdatePod(testPod) 1393 if !syncer.needsReconcile(testPod.UID, changedPodStatus) { 1394 t.Fatalf("Pod status is different, a reconciliation is needed") 1395 } 1396 syncer.SetPodStatus(testPod, changedPodStatus) 1397 syncer.syncBatch(true) 1398 verifyActions(t, syncer, []core.Action{getAction(), patchAction()}) 1399 } 1400 1401 func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus { 1402 status, ok := m.GetPodStatus(pod.UID) 1403 if !ok { 1404 t.Fatalf("Expected PodStatus for %q not found", pod.UID) 1405 } 1406 return status 1407 } 1408 1409 func TestDeletePodBeforeFinished(t *testing.T) { 1410 pod := getTestPod() 1411 t.Logf("Set the deletion timestamp.") 1412 pod.DeletionTimestamp = &metav1.Time{Time: time.Now()} 1413 client := fake.NewSimpleClientset(pod) 1414 m := newTestManager(client) 1415 m.podManager.(mutablePodManager).AddPod(pod) 1416 status := getRandomPodStatus() 1417 status.Phase = v1.PodFailed 1418 m.SetPodStatus(pod, status) 1419 t.Logf("Expect not to see a delete action as the pod isn't finished yet (TerminatePod isn't called)") 1420 verifyActions(t, m, []core.Action{getAction(), patchAction()}) 1421 } 1422 1423 func TestDeletePodFinished(t *testing.T) { 1424 pod := getTestPod() 1425 t.Logf("Set the deletion timestamp.") 1426 pod.DeletionTimestamp = &metav1.Time{Time: time.Now()} 1427 client := fake.NewSimpleClientset(pod) 1428 m := newTestManager(client) 1429 m.podManager.(mutablePodManager).AddPod(pod) 1430 status := getRandomPodStatus() 1431 status.Phase = v1.PodFailed 1432 m.TerminatePod(pod) 1433 t.Logf("Expect to see a delete action as the pod is finished (TerminatePod called)") 1434 verifyActions(t, m, []core.Action{getAction(), patchAction(), deleteAction()}) 1435 } 1436 1437 func TestDoNotDeleteMirrorPods(t *testing.T) { 1438 staticPod := getTestPod() 1439 staticPod.Annotations = map[string]string{kubetypes.ConfigSourceAnnotationKey: "file"} 1440 mirrorPod := getTestPod() 1441 mirrorPod.UID = "mirror-12345678" 1442 mirrorPod.Annotations = map[string]string{ 1443 kubetypes.ConfigSourceAnnotationKey: "api", 1444 kubetypes.ConfigMirrorAnnotationKey: "mirror", 1445 } 1446 t.Logf("Set the deletion timestamp.") 1447 mirrorPod.DeletionTimestamp = &metav1.Time{Time: time.Now()} 1448 client := fake.NewSimpleClientset(mirrorPod) 1449 m := newTestManager(client) 1450 m.podManager.(mutablePodManager).AddPod(staticPod) 1451 m.podManager.(mutablePodManager).AddPod(mirrorPod) 1452 t.Logf("Verify setup.") 1453 assert.True(t, kubetypes.IsStaticPod(staticPod), "SetUp error: staticPod") 1454 assert.True(t, kubetypes.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod") 1455 assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), kubetypes.ResolvedPodUID(staticPod.UID)) 1456 1457 status := getRandomPodStatus() 1458 now := metav1.Now() 1459 status.StartTime = &now 1460 m.SetPodStatus(staticPod, status) 1461 1462 t.Logf("Expect not to see a delete action.") 1463 verifyActions(t, m, []core.Action{getAction(), patchAction()}) 1464 } 1465 1466 func TestUpdateLastTransitionTime(t *testing.T) { 1467 // On Windows, time.Now() is not as precise, which means that 2 consecutive calls may 1468 // return the same timestamp. This test expects the old timestamp to be updated with a 1469 // newer one, so we set the old timestamp to one second in the past. 1470 // See: https://github.com/golang/go/issues/8687 1471 old := metav1.NewTime(time.Now().Add(-time.Second)) 1472 for desc, test := range map[string]struct { 1473 condition *v1.PodCondition 1474 oldCondition *v1.PodCondition 1475 expectUpdate bool 1476 }{ 1477 "should do nothing if no corresponding condition": { 1478 expectUpdate: false, 1479 }, 1480 "should update last transition time if no old condition": { 1481 condition: &v1.PodCondition{ 1482 Type: "test-type", 1483 Status: v1.ConditionTrue, 1484 }, 1485 oldCondition: nil, 1486 expectUpdate: true, 1487 }, 1488 "should update last transition time if condition is changed": { 1489 condition: &v1.PodCondition{ 1490 Type: "test-type", 1491 Status: v1.ConditionTrue, 1492 }, 1493 oldCondition: &v1.PodCondition{ 1494 Type: "test-type", 1495 Status: v1.ConditionFalse, 1496 LastTransitionTime: old, 1497 }, 1498 expectUpdate: true, 1499 }, 1500 "should keep last transition time if condition is not changed": { 1501 condition: &v1.PodCondition{ 1502 Type: "test-type", 1503 Status: v1.ConditionFalse, 1504 }, 1505 oldCondition: &v1.PodCondition{ 1506 Type: "test-type", 1507 Status: v1.ConditionFalse, 1508 LastTransitionTime: old, 1509 }, 1510 expectUpdate: false, 1511 }, 1512 } { 1513 t.Logf("TestCase %q", desc) 1514 status := &v1.PodStatus{} 1515 oldStatus := &v1.PodStatus{} 1516 if test.condition != nil { 1517 status.Conditions = []v1.PodCondition{*test.condition} 1518 } 1519 if test.oldCondition != nil { 1520 oldStatus.Conditions = []v1.PodCondition{*test.oldCondition} 1521 } 1522 updateLastTransitionTime(status, oldStatus, "test-type") 1523 if test.expectUpdate { 1524 assert.True(t, status.Conditions[0].LastTransitionTime.After(old.Time)) 1525 } else if test.condition != nil { 1526 assert.Equal(t, old, status.Conditions[0].LastTransitionTime) 1527 } 1528 } 1529 } 1530 1531 func getAction() core.GetAction { 1532 return core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}} 1533 } 1534 1535 func patchAction() core.PatchAction { 1536 return core.PatchActionImpl{ActionImpl: core.ActionImpl{Verb: "patch", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}} 1537 } 1538 1539 func deleteAction() core.DeleteAction { 1540 return core.DeleteActionImpl{ActionImpl: core.ActionImpl{Verb: "delete", Resource: schema.GroupVersionResource{Resource: "pods"}}} 1541 } 1542 1543 func TestMergePodStatus(t *testing.T) { 1544 useCases := []struct { 1545 desc string 1546 enablePodDisruptionConditions bool 1547 hasRunningContainers bool 1548 oldPodStatus func(input v1.PodStatus) v1.PodStatus 1549 newPodStatus func(input v1.PodStatus) v1.PodStatus 1550 expectPodStatus v1.PodStatus 1551 }{ 1552 { 1553 "no change", 1554 false, 1555 false, 1556 func(input v1.PodStatus) v1.PodStatus { return input }, 1557 func(input v1.PodStatus) v1.PodStatus { return input }, 1558 getPodStatus(), 1559 }, 1560 { 1561 "add DisruptionTarget condition when transitioning into failed phase; PodDisruptionConditions enabled", 1562 true, 1563 false, 1564 func(input v1.PodStatus) v1.PodStatus { return input }, 1565 func(input v1.PodStatus) v1.PodStatus { 1566 input.Phase = v1.PodFailed 1567 input.Conditions = append(input.Conditions, v1.PodCondition{ 1568 Type: v1.DisruptionTarget, 1569 Status: v1.ConditionTrue, 1570 Reason: "TerminationByKubelet", 1571 }) 1572 return input 1573 }, 1574 v1.PodStatus{ 1575 Phase: v1.PodFailed, 1576 Conditions: []v1.PodCondition{ 1577 { 1578 Type: v1.DisruptionTarget, 1579 Status: v1.ConditionTrue, 1580 Reason: "TerminationByKubelet", 1581 }, 1582 { 1583 Type: v1.PodReady, 1584 Status: v1.ConditionFalse, 1585 Reason: "PodFailed", 1586 }, 1587 { 1588 Type: v1.PodScheduled, 1589 Status: v1.ConditionTrue, 1590 }, 1591 { 1592 Type: v1.ContainersReady, 1593 Status: v1.ConditionFalse, 1594 Reason: "PodFailed", 1595 }, 1596 }, 1597 Message: "Message", 1598 }, 1599 }, 1600 { 1601 "don't add DisruptionTarget condition when transitioning into failed phase, but there are might still be running containers; PodDisruptionConditions enabled", 1602 true, 1603 true, 1604 func(input v1.PodStatus) v1.PodStatus { return input }, 1605 func(input v1.PodStatus) v1.PodStatus { 1606 input.Phase = v1.PodFailed 1607 input.Conditions = append(input.Conditions, v1.PodCondition{ 1608 Type: v1.DisruptionTarget, 1609 Status: v1.ConditionTrue, 1610 Reason: "TerminationByKubelet", 1611 }) 1612 return input 1613 }, 1614 v1.PodStatus{ 1615 Phase: v1.PodRunning, 1616 Conditions: []v1.PodCondition{ 1617 { 1618 Type: v1.PodReady, 1619 Status: v1.ConditionTrue, 1620 }, 1621 { 1622 Type: v1.PodScheduled, 1623 Status: v1.ConditionTrue, 1624 }, 1625 }, 1626 Message: "Message", 1627 }, 1628 }, 1629 { 1630 "preserve DisruptionTarget condition; PodDisruptionConditions enabled", 1631 true, 1632 false, 1633 func(input v1.PodStatus) v1.PodStatus { 1634 input.Conditions = append(input.Conditions, v1.PodCondition{ 1635 Type: v1.DisruptionTarget, 1636 Status: v1.ConditionTrue, 1637 Reason: "TerminationByKubelet", 1638 }) 1639 return input 1640 }, 1641 func(input v1.PodStatus) v1.PodStatus { 1642 return input 1643 }, 1644 v1.PodStatus{ 1645 Phase: v1.PodRunning, 1646 Conditions: []v1.PodCondition{ 1647 { 1648 Type: v1.PodReady, 1649 Status: v1.ConditionTrue, 1650 }, 1651 { 1652 Type: v1.PodScheduled, 1653 Status: v1.ConditionTrue, 1654 }, 1655 { 1656 Type: v1.DisruptionTarget, 1657 Status: v1.ConditionTrue, 1658 Reason: "TerminationByKubelet", 1659 }, 1660 }, 1661 Message: "Message", 1662 }, 1663 }, 1664 { 1665 "preserve DisruptionTarget condition; PodDisruptionConditions disabled", 1666 false, 1667 false, 1668 func(input v1.PodStatus) v1.PodStatus { 1669 input.Conditions = append(input.Conditions, v1.PodCondition{ 1670 Type: v1.DisruptionTarget, 1671 Status: v1.ConditionTrue, 1672 Reason: "TerminationByKubelet", 1673 }) 1674 return input 1675 }, 1676 func(input v1.PodStatus) v1.PodStatus { 1677 return input 1678 }, 1679 v1.PodStatus{ 1680 Phase: v1.PodRunning, 1681 Conditions: []v1.PodCondition{ 1682 { 1683 Type: v1.PodReady, 1684 Status: v1.ConditionTrue, 1685 }, 1686 { 1687 Type: v1.PodScheduled, 1688 Status: v1.ConditionTrue, 1689 }, 1690 { 1691 Type: v1.DisruptionTarget, 1692 Status: v1.ConditionTrue, 1693 Reason: "TerminationByKubelet", 1694 }, 1695 }, 1696 Message: "Message", 1697 }, 1698 }, 1699 { 1700 "override DisruptionTarget condition; PodDisruptionConditions enabled", 1701 true, 1702 false, 1703 func(input v1.PodStatus) v1.PodStatus { 1704 input.Conditions = append(input.Conditions, v1.PodCondition{ 1705 Type: v1.DisruptionTarget, 1706 Status: v1.ConditionTrue, 1707 Reason: "EvictedByEvictionAPI", 1708 }) 1709 return input 1710 }, 1711 func(input v1.PodStatus) v1.PodStatus { 1712 input.Phase = v1.PodFailed 1713 input.Conditions = append(input.Conditions, v1.PodCondition{ 1714 Type: v1.DisruptionTarget, 1715 Status: v1.ConditionTrue, 1716 Reason: "TerminationByKubelet", 1717 }) 1718 return input 1719 }, 1720 v1.PodStatus{ 1721 Phase: v1.PodFailed, 1722 Conditions: []v1.PodCondition{ 1723 { 1724 Type: v1.PodReady, 1725 Status: v1.ConditionFalse, 1726 Reason: "PodFailed", 1727 }, 1728 { 1729 Type: v1.ContainersReady, 1730 Status: v1.ConditionFalse, 1731 Reason: "PodFailed", 1732 }, 1733 { 1734 Type: v1.PodScheduled, 1735 Status: v1.ConditionTrue, 1736 }, 1737 { 1738 Type: v1.DisruptionTarget, 1739 Status: v1.ConditionTrue, 1740 Reason: "TerminationByKubelet", 1741 }, 1742 }, 1743 Message: "Message", 1744 }, 1745 }, 1746 { 1747 "don't override DisruptionTarget condition when remaining in running phase; PodDisruptionConditions enabled", 1748 true, 1749 false, 1750 func(input v1.PodStatus) v1.PodStatus { 1751 input.Conditions = append(input.Conditions, v1.PodCondition{ 1752 Type: v1.DisruptionTarget, 1753 Status: v1.ConditionTrue, 1754 Reason: "EvictedByEvictionAPI", 1755 }) 1756 return input 1757 }, 1758 func(input v1.PodStatus) v1.PodStatus { 1759 input.Conditions = append(input.Conditions, v1.PodCondition{ 1760 Type: v1.DisruptionTarget, 1761 Status: v1.ConditionTrue, 1762 Reason: "TerminationByKubelet", 1763 }) 1764 return input 1765 }, 1766 v1.PodStatus{ 1767 Phase: v1.PodRunning, 1768 Conditions: []v1.PodCondition{ 1769 { 1770 Type: v1.DisruptionTarget, 1771 Status: v1.ConditionTrue, 1772 Reason: "EvictedByEvictionAPI", 1773 }, 1774 { 1775 Type: v1.PodReady, 1776 Status: v1.ConditionTrue, 1777 }, 1778 { 1779 Type: v1.PodScheduled, 1780 Status: v1.ConditionTrue, 1781 }, 1782 }, 1783 Message: "Message", 1784 }, 1785 }, 1786 { 1787 "don't override DisruptionTarget condition when transitioning to failed phase but there might still be running containers; PodDisruptionConditions enabled", 1788 true, 1789 true, 1790 func(input v1.PodStatus) v1.PodStatus { 1791 input.Conditions = append(input.Conditions, v1.PodCondition{ 1792 Type: v1.DisruptionTarget, 1793 Status: v1.ConditionTrue, 1794 Reason: "EvictedByEvictionAPI", 1795 }) 1796 return input 1797 }, 1798 func(input v1.PodStatus) v1.PodStatus { 1799 input.Phase = v1.PodFailed 1800 input.Conditions = append(input.Conditions, v1.PodCondition{ 1801 Type: v1.DisruptionTarget, 1802 Status: v1.ConditionTrue, 1803 Reason: "TerminationByKubelet", 1804 }) 1805 return input 1806 }, 1807 v1.PodStatus{ 1808 Phase: v1.PodRunning, 1809 Conditions: []v1.PodCondition{ 1810 { 1811 Type: v1.DisruptionTarget, 1812 Status: v1.ConditionTrue, 1813 Reason: "EvictedByEvictionAPI", 1814 }, 1815 { 1816 Type: v1.PodReady, 1817 Status: v1.ConditionTrue, 1818 }, 1819 { 1820 Type: v1.PodScheduled, 1821 Status: v1.ConditionTrue, 1822 }, 1823 }, 1824 Message: "Message", 1825 }, 1826 }, 1827 { 1828 "readiness changes", 1829 false, 1830 false, 1831 func(input v1.PodStatus) v1.PodStatus { return input }, 1832 func(input v1.PodStatus) v1.PodStatus { 1833 input.Conditions[0].Status = v1.ConditionFalse 1834 return input 1835 }, 1836 v1.PodStatus{ 1837 Phase: v1.PodRunning, 1838 Conditions: []v1.PodCondition{ 1839 { 1840 Type: v1.PodReady, 1841 Status: v1.ConditionFalse, 1842 }, 1843 { 1844 Type: v1.PodScheduled, 1845 Status: v1.ConditionTrue, 1846 }, 1847 }, 1848 Message: "Message", 1849 }, 1850 }, 1851 { 1852 "additional pod condition", 1853 false, 1854 false, 1855 func(input v1.PodStatus) v1.PodStatus { 1856 input.Conditions = append(input.Conditions, v1.PodCondition{ 1857 Type: v1.PodConditionType("example.com/feature"), 1858 Status: v1.ConditionTrue, 1859 }) 1860 return input 1861 }, 1862 func(input v1.PodStatus) v1.PodStatus { return input }, 1863 v1.PodStatus{ 1864 Phase: v1.PodRunning, 1865 Conditions: []v1.PodCondition{ 1866 { 1867 Type: v1.PodReady, 1868 Status: v1.ConditionTrue, 1869 }, 1870 { 1871 Type: v1.PodScheduled, 1872 Status: v1.ConditionTrue, 1873 }, 1874 { 1875 Type: v1.PodConditionType("example.com/feature"), 1876 Status: v1.ConditionTrue, 1877 }, 1878 }, 1879 Message: "Message", 1880 }, 1881 }, 1882 { 1883 "additional pod condition and readiness changes", 1884 false, 1885 false, 1886 func(input v1.PodStatus) v1.PodStatus { 1887 input.Conditions = append(input.Conditions, v1.PodCondition{ 1888 Type: v1.PodConditionType("example.com/feature"), 1889 Status: v1.ConditionTrue, 1890 }) 1891 return input 1892 }, 1893 func(input v1.PodStatus) v1.PodStatus { 1894 input.Conditions[0].Status = v1.ConditionFalse 1895 return input 1896 }, 1897 v1.PodStatus{ 1898 Phase: v1.PodRunning, 1899 Conditions: []v1.PodCondition{ 1900 { 1901 Type: v1.PodReady, 1902 Status: v1.ConditionFalse, 1903 }, 1904 { 1905 Type: v1.PodScheduled, 1906 Status: v1.ConditionTrue, 1907 }, 1908 { 1909 Type: v1.PodConditionType("example.com/feature"), 1910 Status: v1.ConditionTrue, 1911 }, 1912 }, 1913 Message: "Message", 1914 }, 1915 }, 1916 { 1917 "additional pod condition changes", 1918 false, 1919 false, 1920 func(input v1.PodStatus) v1.PodStatus { 1921 input.Conditions = append(input.Conditions, v1.PodCondition{ 1922 Type: v1.PodConditionType("example.com/feature"), 1923 Status: v1.ConditionTrue, 1924 }) 1925 return input 1926 }, 1927 func(input v1.PodStatus) v1.PodStatus { 1928 input.Conditions = append(input.Conditions, v1.PodCondition{ 1929 Type: v1.PodConditionType("example.com/feature"), 1930 Status: v1.ConditionFalse, 1931 }) 1932 return input 1933 }, 1934 v1.PodStatus{ 1935 Phase: v1.PodRunning, 1936 Conditions: []v1.PodCondition{ 1937 { 1938 Type: v1.PodReady, 1939 Status: v1.ConditionTrue, 1940 }, 1941 { 1942 Type: v1.PodScheduled, 1943 Status: v1.ConditionTrue, 1944 }, 1945 { 1946 Type: v1.PodConditionType("example.com/feature"), 1947 Status: v1.ConditionTrue, 1948 }, 1949 }, 1950 Message: "Message", 1951 }, 1952 }, 1953 { 1954 "phase is transitioning to failed and no containers running", 1955 false, 1956 false, 1957 func(input v1.PodStatus) v1.PodStatus { 1958 input.Phase = v1.PodRunning 1959 input.Reason = "Unknown" 1960 input.Message = "Message" 1961 return input 1962 }, 1963 func(input v1.PodStatus) v1.PodStatus { 1964 input.Phase = v1.PodFailed 1965 input.Reason = "Evicted" 1966 input.Message = "Was Evicted" 1967 return input 1968 }, 1969 v1.PodStatus{ 1970 Phase: v1.PodFailed, 1971 Conditions: []v1.PodCondition{ 1972 { 1973 Type: v1.PodReady, 1974 Status: v1.ConditionFalse, 1975 Reason: "PodFailed", 1976 }, 1977 { 1978 Type: v1.ContainersReady, 1979 Status: v1.ConditionFalse, 1980 Reason: "PodFailed", 1981 }, 1982 { 1983 Type: v1.PodScheduled, 1984 Status: v1.ConditionTrue, 1985 }, 1986 }, 1987 Reason: "Evicted", 1988 Message: "Was Evicted", 1989 }, 1990 }, 1991 { 1992 "phase is transitioning to failed and containers running", 1993 false, 1994 true, 1995 func(input v1.PodStatus) v1.PodStatus { 1996 input.Phase = v1.PodRunning 1997 input.Reason = "Unknown" 1998 input.Message = "Message" 1999 return input 2000 }, 2001 func(input v1.PodStatus) v1.PodStatus { 2002 input.Phase = v1.PodFailed 2003 input.Reason = "Evicted" 2004 input.Message = "Was Evicted" 2005 return input 2006 }, 2007 v1.PodStatus{ 2008 Phase: v1.PodRunning, 2009 Conditions: []v1.PodCondition{ 2010 { 2011 Type: v1.PodReady, 2012 Status: v1.ConditionTrue, 2013 }, 2014 { 2015 Type: v1.PodScheduled, 2016 Status: v1.ConditionTrue, 2017 }, 2018 }, 2019 Reason: "Unknown", 2020 Message: "Message", 2021 }, 2022 }, 2023 } 2024 2025 for _, tc := range useCases { 2026 t.Run(tc.desc, func(t *testing.T) { 2027 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)() 2028 output := mergePodStatus(tc.oldPodStatus(getPodStatus()), tc.newPodStatus(getPodStatus()), tc.hasRunningContainers) 2029 if !conditionsEqual(output.Conditions, tc.expectPodStatus.Conditions) || !statusEqual(output, tc.expectPodStatus) { 2030 t.Fatalf("unexpected output: %s", cmp.Diff(tc.expectPodStatus, output)) 2031 } 2032 }) 2033 } 2034 2035 } 2036 2037 func statusEqual(left, right v1.PodStatus) bool { 2038 left.Conditions = nil 2039 right.Conditions = nil 2040 return reflect.DeepEqual(left, right) 2041 } 2042 2043 func conditionsEqual(left, right []v1.PodCondition) bool { 2044 if len(left) != len(right) { 2045 return false 2046 } 2047 2048 for _, l := range left { 2049 found := false 2050 for _, r := range right { 2051 if l.Type == r.Type { 2052 found = true 2053 if l.Status != r.Status || l.Reason != r.Reason { 2054 return false 2055 } 2056 } 2057 } 2058 if !found { 2059 return false 2060 } 2061 } 2062 return true 2063 } 2064 2065 func getPodStatus() v1.PodStatus { 2066 return v1.PodStatus{ 2067 Phase: v1.PodRunning, 2068 Conditions: []v1.PodCondition{ 2069 { 2070 Type: v1.PodReady, 2071 Status: v1.ConditionTrue, 2072 }, 2073 { 2074 Type: v1.PodScheduled, 2075 Status: v1.ConditionTrue, 2076 }, 2077 }, 2078 Message: "Message", 2079 } 2080 }