k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/integration/replicationcontroller/replicationcontroller_test.go (about) 1 /* 2 Copyright 2015 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package replicationcontroller 18 19 import ( 20 "context" 21 "fmt" 22 "reflect" 23 "testing" 24 "time" 25 26 v1 "k8s.io/api/core/v1" 27 apiequality "k8s.io/apimachinery/pkg/api/equality" 28 apierrors "k8s.io/apimachinery/pkg/api/errors" 29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 "k8s.io/apimachinery/pkg/labels" 31 "k8s.io/apimachinery/pkg/util/uuid" 32 "k8s.io/apimachinery/pkg/util/wait" 33 utilfeature "k8s.io/apiserver/pkg/util/feature" 34 "k8s.io/client-go/informers" 35 clientset "k8s.io/client-go/kubernetes" 36 typedv1 "k8s.io/client-go/kubernetes/typed/core/v1" 37 restclient "k8s.io/client-go/rest" 38 "k8s.io/client-go/tools/cache" 39 "k8s.io/client-go/util/retry" 40 featuregatetesting "k8s.io/component-base/featuregate/testing" 41 kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" 42 podutil "k8s.io/kubernetes/pkg/api/v1/pod" 43 "k8s.io/kubernetes/pkg/controller/replication" 44 "k8s.io/kubernetes/pkg/features" 45 "k8s.io/kubernetes/test/integration/framework" 46 "k8s.io/kubernetes/test/utils/ktesting" 47 ) 48 49 const ( 50 interval = 100 * time.Millisecond 51 timeout = 60 * time.Second 52 ) 53 54 func labelMap() map[string]string { 55 return map[string]string{"foo": "bar"} 56 } 57 58 func newRC(name, namespace string, replicas int) *v1.ReplicationController { 59 replicasCopy := int32(replicas) 60 return &v1.ReplicationController{ 61 TypeMeta: metav1.TypeMeta{ 62 Kind: "ReplicationController", 63 APIVersion: "v1", 64 }, 65 ObjectMeta: metav1.ObjectMeta{ 66 Namespace: namespace, 67 Name: name, 68 }, 69 Spec: v1.ReplicationControllerSpec{ 70 Selector: labelMap(), 71 Replicas: &replicasCopy, 72 Template: &v1.PodTemplateSpec{ 73 ObjectMeta: metav1.ObjectMeta{ 74 Labels: labelMap(), 75 }, 76 Spec: v1.PodSpec{ 77 Containers: []v1.Container{ 78 { 79 Name: "fake-name", 80 Image: "fakeimage", 81 }, 82 }, 83 }, 84 }, 85 }, 86 } 87 } 88 89 func newMatchingPod(podName, namespace string) *v1.Pod { 90 return &v1.Pod{ 91 TypeMeta: metav1.TypeMeta{ 92 Kind: "Pod", 93 APIVersion: "v1", 94 }, 95 ObjectMeta: metav1.ObjectMeta{ 96 Name: podName, 97 Namespace: namespace, 98 Labels: labelMap(), 99 }, 100 Spec: v1.PodSpec{ 101 Containers: []v1.Container{ 102 { 103 Name: "fake-name", 104 Image: "fakeimage", 105 }, 106 }, 107 }, 108 Status: v1.PodStatus{ 109 Phase: v1.PodRunning, 110 }, 111 } 112 } 113 114 func rmSetup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *replication.ReplicationManager, informers.SharedInformerFactory, clientset.Interface) { 115 tCtx := ktesting.Init(t) 116 // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. 117 server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) 118 119 config := restclient.CopyConfig(server.ClientConfig) 120 clientSet, err := clientset.NewForConfig(config) 121 if err != nil { 122 t.Fatalf("Error in create clientset: %v", err) 123 } 124 resyncPeriod := 12 * time.Hour 125 informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "rc-informers")), resyncPeriod) 126 127 rm := replication.NewReplicationManager( 128 tCtx, 129 informers.Core().V1().Pods(), 130 informers.Core().V1().ReplicationControllers(), 131 clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replication-controller")), 132 replication.BurstReplicas, 133 ) 134 newTeardown := func() { 135 tCtx.Cancel("tearing down controller") 136 server.TearDownFn() 137 } 138 139 return tCtx, newTeardown, rm, informers, clientSet 140 } 141 142 // Run RC controller and informers 143 func runControllerAndInformers(t *testing.T, rm *replication.ReplicationManager, informers informers.SharedInformerFactory, podNum int) func() { 144 ctx, cancelFn := context.WithCancel(context.Background()) 145 informers.Start(ctx.Done()) 146 waitToObservePods(t, informers.Core().V1().Pods().Informer(), podNum) 147 go rm.Run(ctx, 5) 148 return cancelFn 149 } 150 151 // wait for the podInformer to observe the pods. Call this function before 152 // running the RC controller to prevent the rc manager from creating new pods 153 // rather than adopting the existing ones. 154 func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podNum int) { 155 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 156 objects := podInformer.GetIndexer().List() 157 return len(objects) == podNum, nil 158 }); err != nil { 159 t.Fatalf("Error encountered when waiting for podInformer to observe the pods: %v", err) 160 } 161 } 162 163 func createRCsPods(t *testing.T, clientSet clientset.Interface, rcs []*v1.ReplicationController, pods []*v1.Pod) ([]*v1.ReplicationController, []*v1.Pod) { 164 var createdRCs []*v1.ReplicationController 165 var createdPods []*v1.Pod 166 for _, rc := range rcs { 167 createdRC, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(context.TODO(), rc, metav1.CreateOptions{}) 168 if err != nil { 169 t.Fatalf("Failed to create replication controller %s: %v", rc.Name, err) 170 } 171 createdRCs = append(createdRCs, createdRC) 172 } 173 for _, pod := range pods { 174 createdPod, err := clientSet.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) 175 if err != nil { 176 t.Fatalf("Failed to create pod %s: %v", pod.Name, err) 177 } 178 createdPods = append(createdPods, createdPod) 179 } 180 181 return createdRCs, createdPods 182 } 183 184 // Verify .Status.Replicas is equal to .Spec.Replicas 185 func waitRCStable(t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) { 186 rcClient := clientSet.CoreV1().ReplicationControllers(rc.Namespace) 187 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 188 newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) 189 if err != nil { 190 return false, err 191 } 192 return newRC.Status.Replicas == *rc.Spec.Replicas, nil 193 }); err != nil { 194 t.Fatalf("Failed to verify .Status.Replicas is equal to .Spec.Replicas for rc %s: %v", rc.Name, err) 195 } 196 } 197 198 // Update .Spec.Replicas to replicas and verify .Status.Replicas is changed accordingly 199 func scaleRC(t *testing.T, c clientset.Interface, rc *v1.ReplicationController, replicas int32) { 200 rcClient := c.CoreV1().ReplicationControllers(rc.Namespace) 201 rc = updateRC(t, rcClient, rc.Name, func(rc *v1.ReplicationController) { 202 *rc.Spec.Replicas = replicas 203 }) 204 waitRCStable(t, c, rc) 205 } 206 207 func updatePod(t *testing.T, podClient typedv1.PodInterface, podName string, updateFunc func(*v1.Pod)) *v1.Pod { 208 var pod *v1.Pod 209 if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { 210 newPod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{}) 211 if err != nil { 212 return err 213 } 214 updateFunc(newPod) 215 pod, err = podClient.Update(context.TODO(), newPod, metav1.UpdateOptions{}) 216 return err 217 }); err != nil { 218 t.Fatalf("Failed to update pod %s: %v", podName, err) 219 } 220 return pod 221 } 222 223 func updatePodStatus(t *testing.T, podClient typedv1.PodInterface, pod *v1.Pod, updateStatusFunc func(*v1.Pod)) { 224 if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { 225 newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) 226 if err != nil { 227 return err 228 } 229 updateStatusFunc(newPod) 230 _, err = podClient.UpdateStatus(context.TODO(), newPod, metav1.UpdateOptions{}) 231 return err 232 }); err != nil { 233 t.Fatalf("Failed to update status of pod %s: %v", pod.Name, err) 234 } 235 } 236 237 func getPods(t *testing.T, podClient typedv1.PodInterface, labelMap map[string]string) *v1.PodList { 238 podSelector := labels.Set(labelMap).AsSelector() 239 options := metav1.ListOptions{LabelSelector: podSelector.String()} 240 pods, err := podClient.List(context.TODO(), options) 241 if err != nil { 242 t.Fatalf("Failed obtaining a list of pods that match the pod labels %v: %v", labelMap, err) 243 } 244 return pods 245 } 246 247 func updateRC(t *testing.T, rcClient typedv1.ReplicationControllerInterface, rcName string, updateFunc func(*v1.ReplicationController)) *v1.ReplicationController { 248 var rc *v1.ReplicationController 249 if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { 250 newRC, err := rcClient.Get(context.TODO(), rcName, metav1.GetOptions{}) 251 if err != nil { 252 return err 253 } 254 updateFunc(newRC) 255 rc, err = rcClient.Update(context.TODO(), newRC, metav1.UpdateOptions{}) 256 return err 257 }); err != nil { 258 t.Fatalf("Failed to update rc %s: %v", rcName, err) 259 } 260 return rc 261 } 262 263 // Verify ControllerRef of a RC pod that has incorrect attributes is automatically patched by the RC 264 func testPodControllerRefPatch(t *testing.T, c clientset.Interface, pod *v1.Pod, ownerReference *metav1.OwnerReference, rc *v1.ReplicationController, expectedOwnerReferenceNum int) { 265 ns := rc.Namespace 266 podClient := c.CoreV1().Pods(ns) 267 updatePod(t, podClient, pod.Name, func(pod *v1.Pod) { 268 pod.OwnerReferences = []metav1.OwnerReference{*ownerReference} 269 }) 270 271 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 272 newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) 273 if err != nil { 274 return false, err 275 } 276 return metav1.GetControllerOf(newPod) != nil, nil 277 }); err != nil { 278 t.Fatalf("Failed to verify ControllerRef for the pod %s is not nil: %v", pod.Name, err) 279 } 280 281 newPod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{}) 282 if err != nil { 283 t.Fatalf("Failed to obtain pod %s: %v", pod.Name, err) 284 } 285 controllerRef := metav1.GetControllerOf(newPod) 286 if controllerRef.UID != rc.UID { 287 t.Fatalf("RC owner of the pod %s has a different UID: Expected %v, got %v", newPod.Name, rc.UID, controllerRef.UID) 288 } 289 ownerReferenceNum := len(newPod.GetOwnerReferences()) 290 if ownerReferenceNum != expectedOwnerReferenceNum { 291 t.Fatalf("Unexpected number of owner references for pod %s: Expected %d, got %d", newPod.Name, expectedOwnerReferenceNum, ownerReferenceNum) 292 } 293 } 294 295 func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1.PodList, conditionStatus v1.ConditionStatus, lastTransitionTime time.Time) { 296 replicas := int32(len(pods.Items)) 297 var readyPods int32 298 err := wait.PollImmediate(interval, timeout, func() (bool, error) { 299 readyPods = 0 300 for i := range pods.Items { 301 pod := &pods.Items[i] 302 if podutil.IsPodReady(pod) { 303 readyPods++ 304 continue 305 } 306 pod.Status.Phase = v1.PodRunning 307 _, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady) 308 if condition != nil { 309 condition.Status = conditionStatus 310 condition.LastTransitionTime = metav1.Time{Time: lastTransitionTime} 311 } else { 312 condition = &v1.PodCondition{ 313 Type: v1.PodReady, 314 Status: conditionStatus, 315 LastTransitionTime: metav1.Time{Time: lastTransitionTime}, 316 } 317 pod.Status.Conditions = append(pod.Status.Conditions, *condition) 318 } 319 _, err := clientSet.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) 320 if err != nil { 321 // When status fails to be updated, we continue to next pod 322 continue 323 } 324 readyPods++ 325 } 326 return readyPods >= replicas, nil 327 }) 328 if err != nil { 329 t.Fatalf("failed to mark all ReplicationController pods to ready: %v", err) 330 } 331 } 332 333 func testScalingUsingScaleSubresource(t *testing.T, c clientset.Interface, rc *v1.ReplicationController, replicas int32) { 334 ns := rc.Namespace 335 rcClient := c.CoreV1().ReplicationControllers(ns) 336 newRC, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) 337 if err != nil { 338 t.Fatalf("Failed to obtain rc %s: %v", rc.Name, err) 339 } 340 scale, err := c.CoreV1().ReplicationControllers(ns).GetScale(context.TODO(), rc.Name, metav1.GetOptions{}) 341 if err != nil { 342 t.Fatalf("Failed to obtain scale subresource for rc %s: %v", rc.Name, err) 343 } 344 if scale.Spec.Replicas != *newRC.Spec.Replicas { 345 t.Fatalf("Scale subresource for rc %s does not match .Spec.Replicas: expected %d, got %d", rc.Name, *newRC.Spec.Replicas, scale.Spec.Replicas) 346 } 347 348 if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { 349 scale, err := c.CoreV1().ReplicationControllers(ns).GetScale(context.TODO(), rc.Name, metav1.GetOptions{}) 350 if err != nil { 351 return err 352 } 353 scale.Spec.Replicas = replicas 354 _, err = c.CoreV1().ReplicationControllers(ns).UpdateScale(context.TODO(), rc.Name, scale, metav1.UpdateOptions{}) 355 return err 356 }); err != nil { 357 t.Fatalf("Failed to set .Spec.Replicas of scale subresource for rc %s: %v", rc.Name, err) 358 } 359 360 newRC, err = rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{}) 361 if err != nil { 362 t.Fatalf("Failed to obtain rc %s: %v", rc.Name, err) 363 } 364 if *newRC.Spec.Replicas != replicas { 365 t.Fatalf(".Spec.Replicas of rc %s does not match its scale subresource: expected %d, got %d", rc.Name, replicas, *newRC.Spec.Replicas) 366 } 367 } 368 369 func TestAdoption(t *testing.T) { 370 boolPtr := func(b bool) *bool { return &b } 371 testCases := []struct { 372 name string 373 existingOwnerReferences func(rc *v1.ReplicationController) []metav1.OwnerReference 374 expectedOwnerReferences func(rc *v1.ReplicationController) []metav1.OwnerReference 375 }{ 376 { 377 "pod refers rc as an owner, not a controller", 378 func(rc *v1.ReplicationController) []metav1.OwnerReference { 379 return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"}} 380 }, 381 func(rc *v1.ReplicationController) []metav1.OwnerReference { 382 return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}} 383 }, 384 }, 385 { 386 "pod doesn't have owner references", 387 func(rc *v1.ReplicationController) []metav1.OwnerReference { 388 return []metav1.OwnerReference{} 389 }, 390 func(rc *v1.ReplicationController) []metav1.OwnerReference { 391 return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true), BlockOwnerDeletion: boolPtr(true)}} 392 }, 393 }, 394 { 395 "pod refers rc as a controller", 396 func(rc *v1.ReplicationController) []metav1.OwnerReference { 397 return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}} 398 }, 399 func(rc *v1.ReplicationController) []metav1.OwnerReference { 400 return []metav1.OwnerReference{{UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}} 401 }, 402 }, 403 { 404 "pod refers other rc as the controller, refers the rc as an owner", 405 func(rc *v1.ReplicationController) []metav1.OwnerReference { 406 return []metav1.OwnerReference{ 407 {UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}, 408 {UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"}, 409 } 410 }, 411 func(rc *v1.ReplicationController) []metav1.OwnerReference { 412 return []metav1.OwnerReference{ 413 {UID: "1", Name: "anotherRC", APIVersion: "v1", Kind: "ReplicationController", Controller: boolPtr(true)}, 414 {UID: rc.UID, Name: rc.Name, APIVersion: "v1", Kind: "ReplicationController"}, 415 } 416 }, 417 }, 418 } 419 for i, tc := range testCases { 420 t.Run(tc.name, func(t *testing.T) { 421 tCtx, closeFn, rm, informers, clientSet := rmSetup(t) 422 defer closeFn() 423 ns := framework.CreateNamespaceOrDie(clientSet, fmt.Sprintf("rc-adoption-%d", i), t) 424 defer framework.DeleteNamespaceOrDie(clientSet, ns, t) 425 426 rcClient := clientSet.CoreV1().ReplicationControllers(ns.Name) 427 podClient := clientSet.CoreV1().Pods(ns.Name) 428 const rcName = "rc" 429 rc, err := rcClient.Create(tCtx, newRC(rcName, ns.Name, 1), metav1.CreateOptions{}) 430 if err != nil { 431 t.Fatalf("Failed to create replication controllers: %v", err) 432 } 433 podName := fmt.Sprintf("pod%d", i) 434 pod := newMatchingPod(podName, ns.Name) 435 pod.OwnerReferences = tc.existingOwnerReferences(rc) 436 _, err = podClient.Create(tCtx, pod, metav1.CreateOptions{}) 437 if err != nil { 438 t.Fatalf("Failed to create Pod: %v", err) 439 } 440 441 stopControllers := runControllerAndInformers(t, rm, informers, 1) 442 defer stopControllers() 443 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 444 updatedPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{}) 445 if err != nil { 446 return false, err 447 } 448 449 e, a := tc.expectedOwnerReferences(rc), updatedPod.OwnerReferences 450 if reflect.DeepEqual(e, a) { 451 return true, nil 452 } 453 454 t.Logf("ownerReferences don't match, expect %v, got %v", e, a) 455 return false, nil 456 }); err != nil { 457 t.Fatalf("test %q failed: %v", tc.name, err) 458 } 459 }) 460 } 461 } 462 463 func TestSpecReplicasChange(t *testing.T) { 464 tCtx, closeFn, rm, informers, c := rmSetup(t) 465 defer closeFn() 466 ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t) 467 defer framework.DeleteNamespaceOrDie(c, ns, t) 468 stopControllers := runControllerAndInformers(t, rm, informers, 0) 469 defer stopControllers() 470 471 rc := newRC("rc", ns.Name, 2) 472 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{}) 473 rc = rcs[0] 474 waitRCStable(t, c, rc) 475 476 // Update .Spec.Replicas and verify .Status.Replicas is changed accordingly 477 scaleRC(t, c, rc, 3) 478 scaleRC(t, c, rc, 0) 479 scaleRC(t, c, rc, 2) 480 481 // Add a template annotation change to test RC's status does update 482 // without .Spec.Replicas change 483 rcClient := c.CoreV1().ReplicationControllers(ns.Name) 484 var oldGeneration int64 485 newRC := updateRC(t, rcClient, rc.Name, func(rc *v1.ReplicationController) { 486 oldGeneration = rc.Generation 487 rc.Spec.Template.Annotations = map[string]string{"test": "annotation"} 488 }) 489 savedGeneration := newRC.Generation 490 if savedGeneration == oldGeneration { 491 t.Fatalf("Failed to verify .Generation has incremented for rc %s", rc.Name) 492 } 493 494 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 495 newRC, err := rcClient.Get(tCtx, rc.Name, metav1.GetOptions{}) 496 if err != nil { 497 return false, err 498 } 499 return newRC.Status.ObservedGeneration >= savedGeneration, nil 500 }); err != nil { 501 t.Fatalf("Failed to verify .Status.ObservedGeneration has incremented for rc %s: %v", rc.Name, err) 502 } 503 } 504 505 func TestLogarithmicScaleDown(t *testing.T) { 506 featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.LogarithmicScaleDown, true) 507 tCtx, closeFn, rm, informers, c := rmSetup(t) 508 defer closeFn() 509 ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t) 510 defer framework.DeleteNamespaceOrDie(c, ns, t) 511 stopControllers := runControllerAndInformers(t, rm, informers, 0) 512 defer stopControllers() 513 514 rc := newRC("rc", ns.Name, 2) 515 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{}) 516 rc = rcs[0] 517 waitRCStable(t, c, rc) 518 519 // get list of pods in the cluster 520 pods, err := c.CoreV1().Pods(ns.Name).List(tCtx, metav1.ListOptions{}) 521 if err != nil { 522 t.Fatalf("failed to get pods in namespace %s: %+v", ns.Name, err) 523 } 524 525 // Wait 10 seconds and scale up, the new pod should be in a new logarithmic rank from the first 2 526 time.Sleep(10 * time.Second) 527 scaleRC(t, c, rc, 3) 528 529 // scale back down, and confirm that the pods left in the namespace are the original ones 530 // (meaning the 3rd one was deleted) 531 scaleRC(t, c, rc, 2) 532 533 newPods, err := c.CoreV1().Pods(ns.Name).List(tCtx, metav1.ListOptions{}) 534 if err != nil { 535 t.Fatalf("failed to get pods in namespace %s: %+v", ns.Name, err) 536 } 537 538 if !apiequality.Semantic.DeepEqual(pods.Items, newPods.Items) { 539 t.Fatalf("expected pods %+v, got %+v", pods.Items, newPods.Items) 540 } 541 } 542 543 func TestDeletingAndFailedPods(t *testing.T) { 544 tCtx, closeFn, rm, informers, c := rmSetup(t) 545 defer closeFn() 546 ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t) 547 defer framework.DeleteNamespaceOrDie(c, ns, t) 548 stopControllers := runControllerAndInformers(t, rm, informers, 0) 549 defer stopControllers() 550 551 rc := newRC("rc", ns.Name, 2) 552 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{}) 553 rc = rcs[0] 554 waitRCStable(t, c, rc) 555 556 // Verify RC creates 2 pods 557 podClient := c.CoreV1().Pods(ns.Name) 558 pods := getPods(t, podClient, labelMap()) 559 if len(pods.Items) != 2 { 560 t.Fatalf("len(pods) = %d, want 2", len(pods.Items)) 561 } 562 563 // Set first pod as deleting pod 564 // Set finalizers for the pod to simulate pending deletion status 565 deletingPod := &pods.Items[0] 566 updatePod(t, podClient, deletingPod.Name, func(pod *v1.Pod) { 567 pod.Finalizers = []string{"fake.example.com/blockDeletion"} 568 }) 569 if err := c.CoreV1().Pods(ns.Name).Delete(tCtx, deletingPod.Name, metav1.DeleteOptions{}); err != nil { 570 t.Fatalf("Error deleting pod %s: %v", deletingPod.Name, err) 571 } 572 573 // Set second pod as failed pod 574 failedPod := &pods.Items[1] 575 updatePodStatus(t, podClient, failedPod, func(pod *v1.Pod) { 576 pod.Status.Phase = v1.PodFailed 577 }) 578 579 // Pool until 2 new pods have been created to replace deleting and failed pods 580 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 581 pods = getPods(t, podClient, labelMap()) 582 return len(pods.Items) == 4, nil 583 }); err != nil { 584 t.Fatalf("Failed to verify 2 new pods have been created (expected 4 pods): %v", err) 585 } 586 587 // Verify deleting and failed pods are among the four pods 588 foundDeletingPod := false 589 foundFailedPod := false 590 for _, pod := range pods.Items { 591 if pod.UID == deletingPod.UID { 592 foundDeletingPod = true 593 } 594 if pod.UID == failedPod.UID { 595 foundFailedPod = true 596 } 597 } 598 // Verify deleting pod exists 599 if !foundDeletingPod { 600 t.Fatalf("expected deleting pod %s exists, but it is not found", deletingPod.Name) 601 } 602 // Verify failed pod exists 603 if !foundFailedPod { 604 t.Fatalf("expected failed pod %s exists, but it is not found", failedPod.Name) 605 } 606 } 607 608 func TestOverlappingRCs(t *testing.T) { 609 tCtx, closeFn, rm, informers, c := rmSetup(t) 610 defer closeFn() 611 ns := framework.CreateNamespaceOrDie(c, "test-overlapping-rcs", t) 612 defer framework.DeleteNamespaceOrDie(c, ns, t) 613 stopControllers := runControllerAndInformers(t, rm, informers, 0) 614 defer stopControllers() 615 616 // Create 2 RCs with identical selectors 617 for i := 0; i < 2; i++ { 618 // One RC has 1 replica, and another has 2 replicas 619 rc := newRC(fmt.Sprintf("rc-%d", i+1), ns.Name, i+1) 620 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{}) 621 waitRCStable(t, c, rcs[0]) 622 } 623 624 // Expect 3 total Pods to be created 625 podClient := c.CoreV1().Pods(ns.Name) 626 pods := getPods(t, podClient, labelMap()) 627 if len(pods.Items) != 3 { 628 t.Errorf("len(pods) = %d, want 3", len(pods.Items)) 629 } 630 631 // Expect both RCs have .status.replicas = .spec.replicas 632 for i := 0; i < 2; i++ { 633 newRC, err := c.CoreV1().ReplicationControllers(ns.Name).Get(tCtx, fmt.Sprintf("rc-%d", i+1), metav1.GetOptions{}) 634 if err != nil { 635 t.Fatalf("failed to obtain rc rc-%d: %v", i+1, err) 636 } 637 if newRC.Status.Replicas != *newRC.Spec.Replicas { 638 t.Fatalf(".Status.Replicas %d is not equal to .Spec.Replicas %d", newRC.Status.Replicas, *newRC.Spec.Replicas) 639 } 640 } 641 } 642 643 func TestPodOrphaningAndAdoptionWhenLabelsChange(t *testing.T) { 644 tCtx, closeFn, rm, informers, c := rmSetup(t) 645 defer closeFn() 646 ns := framework.CreateNamespaceOrDie(c, "test-pod-orphaning-and-adoption-when-labels-change", t) 647 defer framework.DeleteNamespaceOrDie(c, ns, t) 648 stopControllers := runControllerAndInformers(t, rm, informers, 0) 649 defer stopControllers() 650 651 rc := newRC("rc", ns.Name, 1) 652 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{}) 653 rc = rcs[0] 654 waitRCStable(t, c, rc) 655 656 // Orphaning: RC should remove OwnerReference from a pod when the pod's labels change to not match its labels 657 podClient := c.CoreV1().Pods(ns.Name) 658 pods := getPods(t, podClient, labelMap()) 659 if len(pods.Items) != 1 { 660 t.Fatalf("len(pods) = %d, want 1", len(pods.Items)) 661 } 662 pod := &pods.Items[0] 663 664 // Start by verifying ControllerRef for the pod is not nil 665 if metav1.GetControllerOf(pod) == nil { 666 t.Fatalf("ControllerRef of pod %s is nil", pod.Name) 667 } 668 newLabelMap := map[string]string{"new-foo": "new-bar"} 669 updatePod(t, podClient, pod.Name, func(pod *v1.Pod) { 670 pod.Labels = newLabelMap 671 }) 672 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 673 newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{}) 674 if err != nil { 675 return false, err 676 } 677 pod = newPod 678 return metav1.GetControllerOf(newPod) == nil, nil 679 }); err != nil { 680 t.Fatalf("Failed to verify ControllerRef for the pod %s is nil: %v", pod.Name, err) 681 } 682 683 // Adoption: RC should add ControllerRef to a pod when the pod's labels change to match its labels 684 updatePod(t, podClient, pod.Name, func(pod *v1.Pod) { 685 pod.Labels = labelMap() 686 }) 687 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 688 newPod, err := podClient.Get(tCtx, pod.Name, metav1.GetOptions{}) 689 if err != nil { 690 // If the pod is not found, it means the RC picks the pod for deletion (it is extra) 691 // Verify there is only one pod in namespace and it has ControllerRef to the RC 692 if !apierrors.IsNotFound(err) { 693 return false, err 694 } 695 696 pods := getPods(t, podClient, labelMap()) 697 if len(pods.Items) != 1 { 698 return false, fmt.Errorf("Expected 1 pod in current namespace, got %d", len(pods.Items)) 699 } 700 // Set the pod accordingly 701 pod = &pods.Items[0] 702 return true, nil 703 } 704 // Always update the pod so that we can save a GET call to API server later 705 pod = newPod 706 // If the pod is found, verify the pod has a ControllerRef 707 return metav1.GetControllerOf(newPod) != nil, nil 708 }); err != nil { 709 t.Fatalf("Failed to verify ControllerRef for pod %s is not nil: %v", pod.Name, err) 710 } 711 // Verify the pod has a ControllerRef to the RC 712 // Do nothing if the pod is nil (i.e., has been picked for deletion) 713 if pod != nil { 714 controllerRef := metav1.GetControllerOf(pod) 715 if controllerRef.UID != rc.UID { 716 t.Fatalf("RC owner of the pod %s has a different UID: Expected %v, got %v", pod.Name, rc.UID, controllerRef.UID) 717 } 718 } 719 } 720 721 func TestGeneralPodAdoption(t *testing.T) { 722 _, closeFn, rm, informers, c := rmSetup(t) 723 defer closeFn() 724 ns := framework.CreateNamespaceOrDie(c, "test-general-pod-adoption", t) 725 defer framework.DeleteNamespaceOrDie(c, ns, t) 726 stopControllers := runControllerAndInformers(t, rm, informers, 0) 727 defer stopControllers() 728 729 rc := newRC("rc", ns.Name, 1) 730 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{}) 731 rc = rcs[0] 732 waitRCStable(t, c, rc) 733 734 podClient := c.CoreV1().Pods(ns.Name) 735 pods := getPods(t, podClient, labelMap()) 736 if len(pods.Items) != 1 { 737 t.Fatalf("len(pods) = %d, want 1", len(pods.Items)) 738 } 739 pod := &pods.Items[0] 740 var falseVar = false 741 742 // When the only OwnerReference of the pod points to another type of API object such as statefulset 743 // with Controller=false, the RC should add a second OwnerReference (ControllerRef) pointing to itself 744 // with Controller=true 745 ownerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "apps/v1beta1", Kind: "StatefulSet", Name: rc.Name, Controller: &falseVar} 746 testPodControllerRefPatch(t, c, pod, &ownerReference, rc, 2) 747 748 // When the only OwnerReference of the pod points to the RC, but Controller=false 749 ownerReference = metav1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &falseVar} 750 testPodControllerRefPatch(t, c, pod, &ownerReference, rc, 1) 751 } 752 753 func TestReadyAndAvailableReplicas(t *testing.T) { 754 tCtx, closeFn, rm, informers, c := rmSetup(t) 755 defer closeFn() 756 ns := framework.CreateNamespaceOrDie(c, "test-ready-and-available-replicas", t) 757 defer framework.DeleteNamespaceOrDie(c, ns, t) 758 stopControllers := runControllerAndInformers(t, rm, informers, 0) 759 defer stopControllers() 760 761 rc := newRC("rc", ns.Name, 3) 762 rc.Spec.MinReadySeconds = 3600 763 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{}) 764 rc = rcs[0] 765 waitRCStable(t, c, rc) 766 767 // First verify no pod is available 768 if rc.Status.AvailableReplicas != 0 { 769 t.Fatalf("Unexpected .Status.AvailableReplicas: Expected 0, saw %d", rc.Status.AvailableReplicas) 770 } 771 772 podClient := c.CoreV1().Pods(ns.Name) 773 pods := getPods(t, podClient, labelMap()) 774 if len(pods.Items) != 3 { 775 t.Fatalf("len(pods) = %d, want 3", len(pods.Items)) 776 } 777 778 // Separate 3 pods into their own list 779 firstPodList := &v1.PodList{Items: pods.Items[:1]} 780 secondPodList := &v1.PodList{Items: pods.Items[1:2]} 781 thirdPodList := &v1.PodList{Items: pods.Items[2:]} 782 // First pod: Running, but not Ready 783 // by setting the Ready condition to false with LastTransitionTime to be now 784 setPodsReadyCondition(t, c, firstPodList, v1.ConditionFalse, time.Now()) 785 // Second pod: Running and Ready, but not Available 786 // by setting LastTransitionTime to now 787 setPodsReadyCondition(t, c, secondPodList, v1.ConditionTrue, time.Now()) 788 // Third pod: Running, Ready, and Available 789 // by setting LastTransitionTime to more than 3600 seconds ago 790 setPodsReadyCondition(t, c, thirdPodList, v1.ConditionTrue, time.Now().Add(-120*time.Minute)) 791 792 rcClient := c.CoreV1().ReplicationControllers(ns.Name) 793 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 794 newRC, err := rcClient.Get(tCtx, rc.Name, metav1.GetOptions{}) 795 if err != nil { 796 return false, err 797 } 798 // Verify 3 pods exist, 2 pods are Ready, and 1 pod is Available 799 return newRC.Status.Replicas == 3 && newRC.Status.ReadyReplicas == 2 && newRC.Status.AvailableReplicas == 1, nil 800 }); err != nil { 801 t.Fatalf("Failed to verify number of Replicas, ReadyReplicas and AvailableReplicas of rc %s to be as expected: %v", rc.Name, err) 802 } 803 } 804 805 func TestRCScaleSubresource(t *testing.T) { 806 _, closeFn, rm, informers, c := rmSetup(t) 807 defer closeFn() 808 ns := framework.CreateNamespaceOrDie(c, "test-rc-scale-subresource", t) 809 defer framework.DeleteNamespaceOrDie(c, ns, t) 810 stopControllers := runControllerAndInformers(t, rm, informers, 0) 811 defer stopControllers() 812 813 rc := newRC("rc", ns.Name, 1) 814 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{}) 815 rc = rcs[0] 816 waitRCStable(t, c, rc) 817 818 // Use scale subresource to scale up .Spec.Replicas to 3 819 testScalingUsingScaleSubresource(t, c, rc, 3) 820 // Use the scale subresource to scale down .Spec.Replicas to 0 821 testScalingUsingScaleSubresource(t, c, rc, 0) 822 } 823 824 func TestExtraPodsAdoptionAndDeletion(t *testing.T) { 825 _, closeFn, rm, informers, c := rmSetup(t) 826 defer closeFn() 827 ns := framework.CreateNamespaceOrDie(c, "test-extra-pods-adoption-and-deletion", t) 828 defer framework.DeleteNamespaceOrDie(c, ns, t) 829 830 rc := newRC("rc", ns.Name, 2) 831 // Create 3 pods, RC should adopt only 2 of them 832 podList := []*v1.Pod{} 833 for i := 0; i < 3; i++ { 834 pod := newMatchingPod(fmt.Sprintf("pod-%d", i+1), ns.Name) 835 pod.Labels = labelMap() 836 podList = append(podList, pod) 837 } 838 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, podList) 839 rc = rcs[0] 840 stopControllers := runControllerAndInformers(t, rm, informers, 3) 841 defer stopControllers() 842 waitRCStable(t, c, rc) 843 844 // Verify the extra pod is deleted eventually by determining whether number of 845 // all pods within namespace matches .spec.replicas of the RC (2 in this case) 846 podClient := c.CoreV1().Pods(ns.Name) 847 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 848 // All pods have labelMap as their labels 849 pods := getPods(t, podClient, labelMap()) 850 return int32(len(pods.Items)) == *rc.Spec.Replicas, nil 851 }); err != nil { 852 t.Fatalf("Failed to verify number of all pods within current namespace matches .spec.replicas of rc %s: %v", rc.Name, err) 853 } 854 } 855 856 func TestFullyLabeledReplicas(t *testing.T) { 857 tCtx, closeFn, rm, informers, c := rmSetup(t) 858 defer closeFn() 859 ns := framework.CreateNamespaceOrDie(c, "test-fully-labeled-replicas", t) 860 defer framework.DeleteNamespaceOrDie(c, ns, t) 861 stopControllers := runControllerAndInformers(t, rm, informers, 0) 862 defer stopControllers() 863 864 extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"} 865 rc := newRC("rc", ns.Name, 2) 866 rcs, _ := createRCsPods(t, c, []*v1.ReplicationController{rc}, []*v1.Pod{}) 867 rc = rcs[0] 868 waitRCStable(t, c, rc) 869 870 // Change RC's template labels to have extra labels, but not its selector 871 rcClient := c.CoreV1().ReplicationControllers(ns.Name) 872 updateRC(t, rcClient, rc.Name, func(rc *v1.ReplicationController) { 873 rc.Spec.Template.Labels = extraLabelMap 874 }) 875 876 // Set one of the pods to have extra labels 877 podClient := c.CoreV1().Pods(ns.Name) 878 pods := getPods(t, podClient, labelMap()) 879 if len(pods.Items) != 2 { 880 t.Fatalf("len(pods) = %d, want 2", len(pods.Items)) 881 } 882 fullyLabeledPod := &pods.Items[0] 883 updatePod(t, podClient, fullyLabeledPod.Name, func(pod *v1.Pod) { 884 pod.Labels = extraLabelMap 885 }) 886 887 // Verify only one pod is fully labeled 888 if err := wait.PollImmediate(interval, timeout, func() (bool, error) { 889 newRC, err := rcClient.Get(tCtx, rc.Name, metav1.GetOptions{}) 890 if err != nil { 891 return false, err 892 } 893 return (newRC.Status.Replicas == 2 && newRC.Status.FullyLabeledReplicas == 1), nil 894 }); err != nil { 895 t.Fatalf("Failed to verify only one pod is fully labeled: %v", err) 896 } 897 }