k8s.io/kubernetes@v1.29.3/pkg/controller/daemon/update_test.go (about) 1 /* 2 Copyright 2017 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package daemon 18 19 import ( 20 "context" 21 "reflect" 22 "testing" 23 "time" 24 25 apps "k8s.io/api/apps/v1" 26 v1 "k8s.io/api/core/v1" 27 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 28 "k8s.io/apimachinery/pkg/labels" 29 "k8s.io/apimachinery/pkg/util/intstr" 30 "k8s.io/klog/v2/ktesting" 31 podutil "k8s.io/kubernetes/pkg/api/v1/pod" 32 "k8s.io/kubernetes/pkg/controller/daemon/util" 33 testingclock "k8s.io/utils/clock/testing" 34 ) 35 36 func TestDaemonSetUpdatesPods(t *testing.T) { 37 _, ctx := ktesting.NewTestContext(t) 38 ds := newDaemonSet("foo") 39 manager, podControl, _, err := newTestController(ctx, ds) 40 if err != nil { 41 t.Fatalf("error creating DaemonSets controller: %v", err) 42 } 43 maxUnavailable := 2 44 addNodes(manager.nodeStore, 0, 5, nil) 45 manager.dsStore.Add(ds) 46 expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) 47 markPodsReady(podControl.podStore) 48 49 ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" 50 ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType 51 intStr := intstr.FromInt32(int32(maxUnavailable)) 52 ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} 53 manager.dsStore.Update(ds) 54 55 clearExpectations(t, manager, ds, podControl) 56 expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) 57 clearExpectations(t, manager, ds, podControl) 58 expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) 59 markPodsReady(podControl.podStore) 60 61 clearExpectations(t, manager, ds, podControl) 62 expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) 63 clearExpectations(t, manager, ds, podControl) 64 expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) 65 markPodsReady(podControl.podStore) 66 67 clearExpectations(t, manager, ds, podControl) 68 expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0) 69 clearExpectations(t, manager, ds, podControl) 70 expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) 71 markPodsReady(podControl.podStore) 72 73 clearExpectations(t, manager, ds, podControl) 74 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 75 clearExpectations(t, manager, ds, podControl) 76 } 77 78 func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) { 79 _, ctx := ktesting.NewTestContext(t) 80 ds := newDaemonSet("foo") 81 manager, podControl, _, err := newTestController(ctx, ds) 82 if err != nil { 83 t.Fatalf("error creating DaemonSets controller: %v", err) 84 } 85 addNodes(manager.nodeStore, 0, 5, nil) 86 manager.dsStore.Add(ds) 87 expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) 88 markPodsReady(podControl.podStore) 89 90 // surge is thhe controlling amount 91 maxSurge := 2 92 ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" 93 ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(int32(maxSurge))) 94 manager.dsStore.Update(ds) 95 96 clearExpectations(t, manager, ds, podControl) 97 expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, 0, 0) 98 clearExpectations(t, manager, ds, podControl) 99 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 100 markPodsReady(podControl.podStore) 101 102 clearExpectations(t, manager, ds, podControl) 103 expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0) 104 clearExpectations(t, manager, ds, podControl) 105 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 106 markPodsReady(podControl.podStore) 107 108 clearExpectations(t, manager, ds, podControl) 109 expectSyncDaemonSets(t, manager, ds, podControl, 5%maxSurge, maxSurge, 0) 110 clearExpectations(t, manager, ds, podControl) 111 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 112 markPodsReady(podControl.podStore) 113 114 clearExpectations(t, manager, ds, podControl) 115 expectSyncDaemonSets(t, manager, ds, podControl, 0, 5%maxSurge, 0) 116 clearExpectations(t, manager, ds, podControl) 117 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 118 } 119 120 func TestDaemonSetUpdatesPodsNotMatchTainstWithMaxSurge(t *testing.T) { 121 _, ctx := ktesting.NewTestContext(t) 122 123 ds := newDaemonSet("foo") 124 maxSurge := 1 125 ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge)) 126 tolerations := []v1.Toleration{ 127 {Key: "node-role.kubernetes.io/control-plane", Operator: v1.TolerationOpExists}, 128 } 129 setDaemonSetToleration(ds, tolerations) 130 manager, podControl, _, err := newTestController(ctx, ds) 131 if err != nil { 132 t.Fatalf("error creating DaemonSets controller: %v", err) 133 } 134 err = manager.dsStore.Add(ds) 135 if err != nil { 136 t.Fatal(err) 137 } 138 139 // Add five nodes and taint to one node 140 addNodes(manager.nodeStore, 0, 5, nil) 141 taints := []v1.Taint{ 142 {Key: "node-role.kubernetes.io/control-plane", Effect: v1.TaintEffectNoSchedule}, 143 } 144 node := newNode("node-0", nil) 145 setNodeTaint(node, taints) 146 err = manager.nodeStore.Update(node) 147 if err != nil { 148 t.Fatal(err) 149 } 150 151 // Create DaemonSet with toleration 152 expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) 153 markPodsReady(podControl.podStore) 154 155 // RollingUpdate DaemonSet without toleration 156 ds.Spec.Template.Spec.Tolerations = nil 157 err = manager.dsStore.Update(ds) 158 if err != nil { 159 t.Fatal(err) 160 } 161 162 clearExpectations(t, manager, ds, podControl) 163 expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, 1, 0) 164 clearExpectations(t, manager, ds, podControl) 165 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 166 markPodsReady(podControl.podStore) 167 168 clearExpectations(t, manager, ds, podControl) 169 expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0) 170 clearExpectations(t, manager, ds, podControl) 171 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 172 markPodsReady(podControl.podStore) 173 174 clearExpectations(t, manager, ds, podControl) 175 expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0) 176 clearExpectations(t, manager, ds, podControl) 177 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 178 markPodsReady(podControl.podStore) 179 180 clearExpectations(t, manager, ds, podControl) 181 expectSyncDaemonSets(t, manager, ds, podControl, maxSurge, maxSurge, 0) 182 clearExpectations(t, manager, ds, podControl) 183 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 184 markPodsReady(podControl.podStore) 185 186 clearExpectations(t, manager, ds, podControl) 187 expectSyncDaemonSets(t, manager, ds, podControl, 0, maxSurge, 0) 188 clearExpectations(t, manager, ds, podControl) 189 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 190 } 191 192 func TestDaemonSetUpdatesWhenNewPodIsNotReady(t *testing.T) { 193 _, ctx := ktesting.NewTestContext(t) 194 ds := newDaemonSet("foo") 195 manager, podControl, _, err := newTestController(ctx, ds) 196 if err != nil { 197 t.Fatalf("error creating DaemonSets controller: %v", err) 198 } 199 maxUnavailable := 3 200 addNodes(manager.nodeStore, 0, 5, nil) 201 err = manager.dsStore.Add(ds) 202 if err != nil { 203 t.Fatal(err) 204 } 205 expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) 206 markPodsReady(podControl.podStore) 207 208 ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" 209 ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType 210 intStr := intstr.FromInt32(int32(maxUnavailable)) 211 ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} 212 err = manager.dsStore.Update(ds) 213 if err != nil { 214 t.Fatal(err) 215 } 216 217 // new pods are not ready numUnavailable == maxUnavailable 218 clearExpectations(t, manager, ds, podControl) 219 expectSyncDaemonSets(t, manager, ds, podControl, 0, maxUnavailable, 0) 220 221 clearExpectations(t, manager, ds, podControl) 222 expectSyncDaemonSets(t, manager, ds, podControl, maxUnavailable, 0, 0) 223 224 clearExpectations(t, manager, ds, podControl) 225 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 226 clearExpectations(t, manager, ds, podControl) 227 } 228 229 func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) { 230 _, ctx := ktesting.NewTestContext(t) 231 ds := newDaemonSet("foo") 232 manager, podControl, _, err := newTestController(ctx, ds) 233 if err != nil { 234 t.Fatalf("error creating DaemonSets controller: %v", err) 235 } 236 maxUnavailable := 3 237 addNodes(manager.nodeStore, 0, 5, nil) 238 err = manager.dsStore.Add(ds) 239 if err != nil { 240 t.Fatal(err) 241 } 242 expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) 243 244 ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" 245 ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType 246 intStr := intstr.FromInt32(int32(maxUnavailable)) 247 ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} 248 err = manager.dsStore.Update(ds) 249 if err != nil { 250 t.Fatal(err) 251 } 252 253 // all old pods are unavailable so should be removed 254 clearExpectations(t, manager, ds, podControl) 255 expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0) 256 257 clearExpectations(t, manager, ds, podControl) 258 expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) 259 260 clearExpectations(t, manager, ds, podControl) 261 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 262 clearExpectations(t, manager, ds, podControl) 263 } 264 265 func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) { 266 _, ctx := ktesting.NewTestContext(t) 267 ds := newDaemonSet("foo") 268 manager, podControl, _, err := newTestController(ctx, ds) 269 if err != nil { 270 t.Fatalf("error creating DaemonSets controller: %v", err) 271 } 272 addNodes(manager.nodeStore, 0, 5, nil) 273 manager.dsStore.Add(ds) 274 expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) 275 276 maxSurge := 3 277 ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2" 278 ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(int32(maxSurge))) 279 manager.dsStore.Update(ds) 280 281 // all old pods are unavailable so should be surged 282 manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(100, 0)) 283 clearExpectations(t, manager, ds, podControl) 284 expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) 285 286 // waiting for pods to go ready, old pods are deleted 287 manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(200, 0)) 288 clearExpectations(t, manager, ds, podControl) 289 expectSyncDaemonSets(t, manager, ds, podControl, 0, 5, 0) 290 291 setPodReadiness(t, manager, true, 5, func(_ *v1.Pod) bool { return true }) 292 ds.Spec.MinReadySeconds = 15 293 ds.Spec.Template.Spec.Containers[0].Image = "foo3/bar3" 294 manager.dsStore.Update(ds) 295 296 manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(300, 0)) 297 clearExpectations(t, manager, ds, podControl) 298 expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0) 299 300 hash, err := currentDSHash(manager, ds) 301 if err != nil { 302 t.Fatal(err) 303 } 304 currentPods := podsByNodeMatchingHash(manager, hash) 305 // mark two updated pods as ready at time 300 306 setPodReadiness(t, manager, true, 2, func(pod *v1.Pod) bool { 307 return pod.Labels[apps.ControllerRevisionHashLabelKey] == hash 308 }) 309 // mark one of the old pods that is on a node without an updated pod as unready 310 setPodReadiness(t, manager, false, 1, func(pod *v1.Pod) bool { 311 nodeName, err := util.GetTargetNodeName(pod) 312 if err != nil { 313 t.Fatal(err) 314 } 315 return pod.Labels[apps.ControllerRevisionHashLabelKey] != hash && len(currentPods[nodeName]) == 0 316 }) 317 318 // the new pods should still be considered waiting to hit min readiness, so one pod should be created to replace 319 // the deleted old pod 320 manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(310, 0)) 321 clearExpectations(t, manager, ds, podControl) 322 expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0) 323 324 // the new pods are now considered available, so delete the old pods 325 manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(320, 0)) 326 clearExpectations(t, manager, ds, podControl) 327 expectSyncDaemonSets(t, manager, ds, podControl, 1, 3, 0) 328 329 // mark all updated pods as ready at time 320 330 currentPods = podsByNodeMatchingHash(manager, hash) 331 setPodReadiness(t, manager, true, 3, func(pod *v1.Pod) bool { 332 return pod.Labels[apps.ControllerRevisionHashLabelKey] == hash 333 }) 334 335 // the new pods are now considered available, so delete the old pods 336 manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(340, 0)) 337 clearExpectations(t, manager, ds, podControl) 338 expectSyncDaemonSets(t, manager, ds, podControl, 0, 2, 0) 339 340 // controller has completed upgrade 341 manager.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(350, 0)) 342 clearExpectations(t, manager, ds, podControl) 343 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 344 } 345 346 func podsByNodeMatchingHash(dsc *daemonSetsController, hash string) map[string][]string { 347 byNode := make(map[string][]string) 348 for _, obj := range dsc.podStore.List() { 349 pod := obj.(*v1.Pod) 350 if pod.Labels[apps.ControllerRevisionHashLabelKey] != hash { 351 continue 352 } 353 nodeName, err := util.GetTargetNodeName(pod) 354 if err != nil { 355 panic(err) 356 } 357 byNode[nodeName] = append(byNode[nodeName], pod.Name) 358 } 359 return byNode 360 } 361 362 func setPodReadiness(t *testing.T, dsc *daemonSetsController, ready bool, count int, fn func(*v1.Pod) bool) { 363 t.Helper() 364 logger, _ := ktesting.NewTestContext(t) 365 for _, obj := range dsc.podStore.List() { 366 if count <= 0 { 367 break 368 } 369 pod := obj.(*v1.Pod) 370 if pod.DeletionTimestamp != nil { 371 continue 372 } 373 if podutil.IsPodReady(pod) == ready { 374 continue 375 } 376 if !fn(pod) { 377 continue 378 } 379 condition := v1.PodCondition{Type: v1.PodReady} 380 if ready { 381 condition.Status = v1.ConditionTrue 382 } else { 383 condition.Status = v1.ConditionFalse 384 } 385 if !podutil.UpdatePodCondition(&pod.Status, &condition) { 386 t.Fatal("failed to update pod") 387 } 388 // TODO: workaround UpdatePodCondition calling time.Now() directly 389 setCondition := podutil.GetPodReadyCondition(pod.Status) 390 setCondition.LastTransitionTime.Time = dsc.failedPodsBackoff.Clock.Now() 391 logger.Info("marked pod ready", "pod", pod.Name, "ready", ready) 392 count-- 393 } 394 if count > 0 { 395 t.Fatalf("could not mark %d pods ready=%t", count, ready) 396 } 397 } 398 399 func currentDSHash(dsc *daemonSetsController, ds *apps.DaemonSet) (string, error) { 400 // Construct histories of the DaemonSet, and get the hash of current history 401 cur, _, err := dsc.constructHistory(context.TODO(), ds) 402 if err != nil { 403 return "", err 404 } 405 return cur.Labels[apps.DefaultDaemonSetUniqueLabelKey], nil 406 407 } 408 409 func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) { 410 _, ctx := ktesting.NewTestContext(t) 411 ds := newDaemonSet("foo") 412 manager, podControl, _, err := newTestController(ctx, ds) 413 if err != nil { 414 t.Fatalf("error creating DaemonSets controller: %v", err) 415 } 416 maxUnavailable := 3 417 addNodes(manager.nodeStore, 0, 5, nil) 418 manager.dsStore.Add(ds) 419 expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0) 420 421 ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType 422 intStr := intstr.FromInt32(int32(maxUnavailable)) 423 ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr} 424 manager.dsStore.Update(ds) 425 426 // template is not changed no pod should be removed 427 clearExpectations(t, manager, ds, podControl) 428 expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0) 429 clearExpectations(t, manager, ds, podControl) 430 } 431 432 func newUpdateSurge(value intstr.IntOrString) apps.DaemonSetUpdateStrategy { 433 zero := intstr.FromInt32(0) 434 return apps.DaemonSetUpdateStrategy{ 435 Type: apps.RollingUpdateDaemonSetStrategyType, 436 RollingUpdate: &apps.RollingUpdateDaemonSet{ 437 MaxUnavailable: &zero, 438 MaxSurge: &value, 439 }, 440 } 441 } 442 443 func newUpdateUnavailable(value intstr.IntOrString) apps.DaemonSetUpdateStrategy { 444 return apps.DaemonSetUpdateStrategy{ 445 Type: apps.RollingUpdateDaemonSetStrategyType, 446 RollingUpdate: &apps.RollingUpdateDaemonSet{ 447 MaxUnavailable: &value, 448 }, 449 } 450 } 451 452 func TestGetUnavailableNumbers(t *testing.T) { 453 cases := []struct { 454 name string 455 ManagerFunc func(ctx context.Context) *daemonSetsController 456 ds *apps.DaemonSet 457 nodeToPods map[string][]*v1.Pod 458 maxSurge int 459 maxUnavailable int 460 desiredNumberScheduled int 461 emptyNodes int 462 Err error 463 }{ 464 { 465 name: "No nodes", 466 ManagerFunc: func(ctx context.Context) *daemonSetsController { 467 manager, _, _, err := newTestController(ctx) 468 if err != nil { 469 t.Fatalf("error creating DaemonSets controller: %v", err) 470 } 471 return manager 472 }, 473 ds: func() *apps.DaemonSet { 474 ds := newDaemonSet("x") 475 ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(0)) 476 return ds 477 }(), 478 nodeToPods: make(map[string][]*v1.Pod), 479 maxUnavailable: 0, 480 emptyNodes: 0, 481 }, 482 { 483 name: "Two nodes with ready pods", 484 ManagerFunc: func(ctx context.Context) *daemonSetsController { 485 manager, _, _, err := newTestController(ctx) 486 if err != nil { 487 t.Fatalf("error creating DaemonSets controller: %v", err) 488 } 489 addNodes(manager.nodeStore, 0, 2, nil) 490 return manager 491 }, 492 ds: func() *apps.DaemonSet { 493 ds := newDaemonSet("x") 494 ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(1)) 495 return ds 496 }(), 497 nodeToPods: func() map[string][]*v1.Pod { 498 mapping := make(map[string][]*v1.Pod) 499 pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) 500 pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil) 501 markPodReady(pod0) 502 markPodReady(pod1) 503 mapping["node-0"] = []*v1.Pod{pod0} 504 mapping["node-1"] = []*v1.Pod{pod1} 505 return mapping 506 }(), 507 maxUnavailable: 1, 508 desiredNumberScheduled: 2, 509 emptyNodes: 0, 510 }, 511 { 512 name: "Two nodes, one node without pods", 513 ManagerFunc: func(ctx context.Context) *daemonSetsController { 514 manager, _, _, err := newTestController(ctx) 515 if err != nil { 516 t.Fatalf("error creating DaemonSets controller: %v", err) 517 } 518 addNodes(manager.nodeStore, 0, 2, nil) 519 return manager 520 }, 521 ds: func() *apps.DaemonSet { 522 ds := newDaemonSet("x") 523 ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(0)) 524 return ds 525 }(), 526 nodeToPods: func() map[string][]*v1.Pod { 527 mapping := make(map[string][]*v1.Pod) 528 pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) 529 markPodReady(pod0) 530 mapping["node-0"] = []*v1.Pod{pod0} 531 return mapping 532 }(), 533 maxUnavailable: 1, 534 desiredNumberScheduled: 2, 535 emptyNodes: 1, 536 }, 537 { 538 name: "Two nodes, one node without pods, surge", 539 ManagerFunc: func(ctx context.Context) *daemonSetsController { 540 manager, _, _, err := newTestController(ctx) 541 if err != nil { 542 t.Fatalf("error creating DaemonSets controller: %v", err) 543 } 544 addNodes(manager.nodeStore, 0, 2, nil) 545 return manager 546 }, 547 ds: func() *apps.DaemonSet { 548 ds := newDaemonSet("x") 549 ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(0)) 550 return ds 551 }(), 552 nodeToPods: func() map[string][]*v1.Pod { 553 mapping := make(map[string][]*v1.Pod) 554 pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) 555 markPodReady(pod0) 556 mapping["node-0"] = []*v1.Pod{pod0} 557 return mapping 558 }(), 559 maxUnavailable: 1, 560 desiredNumberScheduled: 2, 561 emptyNodes: 1, 562 }, 563 { 564 name: "Two nodes with pods, MaxUnavailable in percents", 565 ManagerFunc: func(ctx context.Context) *daemonSetsController { 566 manager, _, _, err := newTestController(ctx) 567 if err != nil { 568 t.Fatalf("error creating DaemonSets controller: %v", err) 569 } 570 addNodes(manager.nodeStore, 0, 2, nil) 571 return manager 572 }, 573 ds: func() *apps.DaemonSet { 574 ds := newDaemonSet("x") 575 ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromString("50%")) 576 return ds 577 }(), 578 nodeToPods: func() map[string][]*v1.Pod { 579 mapping := make(map[string][]*v1.Pod) 580 pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) 581 pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil) 582 markPodReady(pod0) 583 markPodReady(pod1) 584 mapping["node-0"] = []*v1.Pod{pod0} 585 mapping["node-1"] = []*v1.Pod{pod1} 586 return mapping 587 }(), 588 maxUnavailable: 1, 589 desiredNumberScheduled: 2, 590 emptyNodes: 0, 591 }, 592 { 593 name: "Two nodes with pods, MaxUnavailable in percents, surge", 594 ManagerFunc: func(ctx context.Context) *daemonSetsController { 595 manager, _, _, err := newTestController(ctx) 596 if err != nil { 597 t.Fatalf("error creating DaemonSets controller: %v", err) 598 } 599 addNodes(manager.nodeStore, 0, 2, nil) 600 return manager 601 }, 602 ds: func() *apps.DaemonSet { 603 ds := newDaemonSet("x") 604 ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromString("50%")) 605 return ds 606 }(), 607 nodeToPods: func() map[string][]*v1.Pod { 608 mapping := make(map[string][]*v1.Pod) 609 pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) 610 pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil) 611 markPodReady(pod0) 612 markPodReady(pod1) 613 mapping["node-0"] = []*v1.Pod{pod0} 614 mapping["node-1"] = []*v1.Pod{pod1} 615 return mapping 616 }(), 617 maxSurge: 1, 618 maxUnavailable: 0, 619 desiredNumberScheduled: 2, 620 emptyNodes: 0, 621 }, 622 { 623 name: "Two nodes with pods, MaxUnavailable is 100%, surge", 624 ManagerFunc: func(ctx context.Context) *daemonSetsController { 625 manager, _, _, err := newTestController(ctx) 626 if err != nil { 627 t.Fatalf("error creating DaemonSets controller: %v", err) 628 } 629 addNodes(manager.nodeStore, 0, 2, nil) 630 return manager 631 }, 632 ds: func() *apps.DaemonSet { 633 ds := newDaemonSet("x") 634 ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromString("100%")) 635 return ds 636 }(), 637 nodeToPods: func() map[string][]*v1.Pod { 638 mapping := make(map[string][]*v1.Pod) 639 pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) 640 pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil) 641 markPodReady(pod0) 642 markPodReady(pod1) 643 mapping["node-0"] = []*v1.Pod{pod0} 644 mapping["node-1"] = []*v1.Pod{pod1} 645 return mapping 646 }(), 647 maxSurge: 2, 648 maxUnavailable: 0, 649 desiredNumberScheduled: 2, 650 emptyNodes: 0, 651 }, 652 { 653 name: "Two nodes with pods, MaxUnavailable in percents, pod terminating", 654 ManagerFunc: func(ctx context.Context) *daemonSetsController { 655 manager, _, _, err := newTestController(ctx) 656 if err != nil { 657 t.Fatalf("error creating DaemonSets controller: %v", err) 658 } 659 addNodes(manager.nodeStore, 0, 3, nil) 660 return manager 661 }, 662 ds: func() *apps.DaemonSet { 663 ds := newDaemonSet("x") 664 ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromString("50%")) 665 return ds 666 }(), 667 nodeToPods: func() map[string][]*v1.Pod { 668 mapping := make(map[string][]*v1.Pod) 669 pod0 := newPod("pod-0", "node-0", simpleDaemonSetLabel, nil) 670 pod1 := newPod("pod-1", "node-1", simpleDaemonSetLabel, nil) 671 now := metav1.Now() 672 markPodReady(pod0) 673 markPodReady(pod1) 674 pod1.DeletionTimestamp = &now 675 mapping["node-0"] = []*v1.Pod{pod0} 676 mapping["node-1"] = []*v1.Pod{pod1} 677 return mapping 678 }(), 679 maxUnavailable: 2, 680 desiredNumberScheduled: 3, 681 emptyNodes: 1, 682 }, 683 } 684 685 for _, c := range cases { 686 t.Run(c.name, func(t *testing.T) { 687 _, ctx := ktesting.NewTestContext(t) 688 manager := c.ManagerFunc(ctx) 689 manager.dsStore.Add(c.ds) 690 nodeList, err := manager.nodeLister.List(labels.Everything()) 691 if err != nil { 692 t.Fatalf("error listing nodes: %v", err) 693 } 694 maxSurge, maxUnavailable, desiredNumberScheduled, err := manager.updatedDesiredNodeCounts(ctx, c.ds, nodeList, c.nodeToPods) 695 if err != nil && c.Err != nil { 696 if c.Err != err { 697 t.Fatalf("Expected error: %v but got: %v", c.Err, err) 698 } 699 } 700 if err != nil { 701 t.Fatalf("Unexpected error: %v", err) 702 } 703 if maxSurge != c.maxSurge || maxUnavailable != c.maxUnavailable || desiredNumberScheduled != c.desiredNumberScheduled { 704 t.Errorf("Wrong values. maxSurge: %d, expected %d, maxUnavailable: %d, expected: %d, desiredNumberScheduled: %d, expected: %d", maxSurge, c.maxSurge, maxUnavailable, c.maxUnavailable, desiredNumberScheduled, c.desiredNumberScheduled) 705 } 706 var emptyNodes int 707 for _, pods := range c.nodeToPods { 708 if len(pods) == 0 { 709 emptyNodes++ 710 } 711 } 712 if emptyNodes != c.emptyNodes { 713 t.Errorf("expected numEmpty to be %d, was %d", c.emptyNodes, emptyNodes) 714 } 715 }) 716 } 717 } 718 719 func TestControlledHistories(t *testing.T) { 720 ds1 := newDaemonSet("ds1") 721 crOfDs1 := newControllerRevision(ds1.GetName()+"-x1", ds1.GetNamespace(), ds1.Spec.Template.Labels, 722 []metav1.OwnerReference{*metav1.NewControllerRef(ds1, controllerKind)}) 723 orphanCrInSameNsWithDs1 := newControllerRevision(ds1.GetName()+"-x2", ds1.GetNamespace(), ds1.Spec.Template.Labels, nil) 724 orphanCrNotInSameNsWithDs1 := newControllerRevision(ds1.GetName()+"-x3", ds1.GetNamespace()+"-other", ds1.Spec.Template.Labels, nil) 725 cases := []struct { 726 name string 727 managerFunc func(ctx context.Context) *daemonSetsController 728 historyCRAll []*apps.ControllerRevision 729 expectControllerRevisions []*apps.ControllerRevision 730 }{ 731 { 732 name: "controller revision in the same namespace", 733 managerFunc: func(ctx context.Context) *daemonSetsController { 734 manager, _, _, err := newTestController(ctx, ds1, crOfDs1, orphanCrInSameNsWithDs1) 735 if err != nil { 736 t.Fatalf("error creating DaemonSets controller: %v", err) 737 } 738 manager.dsStore.Add(ds1) 739 return manager 740 }, 741 historyCRAll: []*apps.ControllerRevision{crOfDs1, orphanCrInSameNsWithDs1}, 742 expectControllerRevisions: []*apps.ControllerRevision{crOfDs1, orphanCrInSameNsWithDs1}, 743 }, 744 { 745 name: "Skip adopting the controller revision in namespace other than the one in which DS lives", 746 managerFunc: func(ctx context.Context) *daemonSetsController { 747 manager, _, _, err := newTestController(ctx, ds1, orphanCrNotInSameNsWithDs1) 748 if err != nil { 749 t.Fatalf("error creating DaemonSets controller: %v", err) 750 } 751 manager.dsStore.Add(ds1) 752 return manager 753 }, 754 historyCRAll: []*apps.ControllerRevision{orphanCrNotInSameNsWithDs1}, 755 expectControllerRevisions: []*apps.ControllerRevision{}, 756 }, 757 } 758 for _, c := range cases { 759 _, ctx := ktesting.NewTestContext(t) 760 manager := c.managerFunc(ctx) 761 for _, h := range c.historyCRAll { 762 manager.historyStore.Add(h) 763 } 764 crList, err := manager.controlledHistories(context.TODO(), ds1) 765 if err != nil { 766 t.Fatalf("Test case: %s. Unexpected error: %v", c.name, err) 767 } 768 if len(crList) != len(c.expectControllerRevisions) { 769 t.Errorf("Test case: %s, expect controllerrevision count %d but got:%d", 770 c.name, len(c.expectControllerRevisions), len(crList)) 771 } else { 772 // check controller revisions match 773 for _, cr := range crList { 774 found := false 775 for _, expectCr := range c.expectControllerRevisions { 776 if reflect.DeepEqual(cr, expectCr) { 777 found = true 778 break 779 } 780 } 781 if !found { 782 t.Errorf("Test case: %s, controllerrevision %v not expected", 783 c.name, cr) 784 } 785 } 786 t.Logf("Test case: %s done", c.name) 787 } 788 } 789 }