k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/apps/replica_set.go (about) 1 /* 2 Copyright 2016 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package apps 18 19 import ( 20 "context" 21 "encoding/json" 22 "fmt" 23 "strings" 24 "time" 25 26 appsv1 "k8s.io/api/apps/v1" 27 autoscalingv1 "k8s.io/api/autoscaling/v1" 28 "k8s.io/apimachinery/pkg/types" 29 "k8s.io/apimachinery/pkg/watch" 30 "k8s.io/client-go/tools/cache" 31 watchtools "k8s.io/client-go/tools/watch" 32 "k8s.io/client-go/util/retry" 33 34 v1 "k8s.io/api/core/v1" 35 apierrors "k8s.io/apimachinery/pkg/api/errors" 36 "k8s.io/apimachinery/pkg/api/resource" 37 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 38 "k8s.io/apimachinery/pkg/labels" 39 "k8s.io/apimachinery/pkg/runtime/schema" 40 "k8s.io/apimachinery/pkg/util/rand" 41 "k8s.io/apimachinery/pkg/util/uuid" 42 "k8s.io/apimachinery/pkg/util/wait" 43 "k8s.io/kubernetes/pkg/controller/replicaset" 44 "k8s.io/kubernetes/test/e2e/framework" 45 e2epod "k8s.io/kubernetes/test/e2e/framework/pod" 46 e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset" 47 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" 48 imageutils "k8s.io/kubernetes/test/utils/image" 49 admissionapi "k8s.io/pod-security-admission/api" 50 51 "github.com/onsi/ginkgo/v2" 52 "github.com/onsi/gomega" 53 ) 54 55 const ( 56 rsRetryTimeout = 2 * time.Minute 57 ) 58 59 func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string, args []string) *appsv1.ReplicaSet { 60 zero := int64(0) 61 return &appsv1.ReplicaSet{ 62 ObjectMeta: metav1.ObjectMeta{ 63 Name: rsName, 64 Labels: rsPodLabels, 65 }, 66 Spec: appsv1.ReplicaSetSpec{ 67 Selector: &metav1.LabelSelector{ 68 MatchLabels: rsPodLabels, 69 }, 70 Replicas: &replicas, 71 Template: v1.PodTemplateSpec{ 72 ObjectMeta: metav1.ObjectMeta{ 73 Labels: rsPodLabels, 74 }, 75 Spec: v1.PodSpec{ 76 TerminationGracePeriodSeconds: &zero, 77 Containers: []v1.Container{ 78 { 79 Name: imageName, 80 Image: image, 81 Args: args, 82 }, 83 }, 84 }, 85 }, 86 }, 87 } 88 } 89 90 func newPodQuota(name, number string) *v1.ResourceQuota { 91 return &v1.ResourceQuota{ 92 ObjectMeta: metav1.ObjectMeta{ 93 Name: name, 94 }, 95 Spec: v1.ResourceQuotaSpec{ 96 Hard: v1.ResourceList{ 97 v1.ResourcePods: resource.MustParse(number), 98 }, 99 }, 100 } 101 } 102 103 var _ = SIGDescribe("ReplicaSet", func() { 104 f := framework.NewDefaultFramework("replicaset") 105 f.NamespacePodSecurityLevel = admissionapi.LevelBaseline 106 107 /* 108 Release: v1.9 109 Testname: Replica Set, run basic image 110 Description: Create a ReplicaSet with a Pod and a single Container. Make sure that the Pod is running. Pod SHOULD send a valid response when queried. 111 */ 112 framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) { 113 testReplicaSetServeImageOrFail(ctx, f, "basic", imageutils.GetE2EImage(imageutils.Agnhost)) 114 }) 115 116 ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { 117 // requires private images 118 e2eskipper.SkipUnlessProviderIs("gce", "gke") 119 privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) 120 testReplicaSetServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage()) 121 }) 122 123 ginkgo.It("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) { 124 testReplicaSetConditionCheck(ctx, f) 125 }) 126 127 /* 128 Release: v1.13 129 Testname: Replica Set, adopt matching pods and release non matching pods 130 Description: A Pod is created, then a Replica Set (RS) whose label selector will match the Pod. The RS MUST either adopt the Pod or delete and replace it with a new Pod. When the labels on one of the Pods owned by the RS change to no longer match the RS's label selector, the RS MUST release the Pod and update the Pod's owner references 131 */ 132 framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func(ctx context.Context) { 133 testRSAdoptMatchingAndReleaseNotMatching(ctx, f) 134 }) 135 136 /* 137 Release: v1.21 138 Testname: ReplicaSet, completes the scaling of a ReplicaSet subresource 139 Description: Create a ReplicaSet (RS) with a single Pod. The Pod MUST be verified 140 that it is running. The RS MUST get and verify the scale subresource count. 141 The RS MUST update and verify the scale subresource. The RS MUST patch and verify 142 a scale subresource. 143 */ 144 framework.ConformanceIt("Replicaset should have a working scale subresource", func(ctx context.Context) { 145 testRSScaleSubresources(ctx, f) 146 }) 147 148 /* 149 Release: v1.21 150 Testname: ReplicaSet, is created, Replaced and Patched 151 Description: Create a ReplicaSet (RS) with a single Pod. The Pod MUST be verified 152 that it is running. The RS MUST scale to two replicas and verify the scale count 153 The RS MUST be patched and verify that patch succeeded. 154 */ 155 framework.ConformanceIt("Replace and Patch tests", func(ctx context.Context) { 156 testRSLifeCycle(ctx, f) 157 }) 158 159 /* 160 Release: v1.22 161 Testname: ReplicaSet, list and delete a collection of ReplicaSets 162 Description: When a ReplicaSet is created it MUST succeed. It 163 MUST succeed when listing ReplicaSets via a label selector. It 164 MUST succeed when deleting the ReplicaSet via deleteCollection. 165 */ 166 framework.ConformanceIt("should list and delete a collection of ReplicaSets", func(ctx context.Context) { 167 listRSDeleteCollection(ctx, f) 168 169 }) 170 171 /* Release: v1.22 172 Testname: ReplicaSet, status sub-resource 173 Description: Create a ReplicaSet resource which MUST succeed. 174 Attempt to read, update and patch its status sub-resource; all 175 mutating sub-resource operations MUST be visible to subsequent reads. 176 */ 177 framework.ConformanceIt("should validate Replicaset Status endpoints", func(ctx context.Context) { 178 testRSStatus(ctx, f) 179 }) 180 }) 181 182 // A basic test to check the deployment of an image using a ReplicaSet. The 183 // image serves its hostname which is checked for each replica. 184 func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) { 185 name := "my-hostname-" + test + "-" + string(uuid.NewUUID()) 186 replicas := int32(1) 187 188 // Create a ReplicaSet for a service that serves its hostname. 189 // The source for the Docker container kubernetes/serve_hostname is 190 // in contrib/for-demos/serve_hostname 191 framework.Logf("Creating ReplicaSet %s", name) 192 newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) 193 newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} 194 _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, newRS, metav1.CreateOptions{}) 195 framework.ExpectNoError(err) 196 197 // Check that pods for the new RS were created. 198 // TODO: Maybe switch PodsCreated to just check owner references. 199 pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas) 200 framework.ExpectNoError(err) 201 202 // Wait for the pods to enter the running state. Waiting loops until the pods 203 // are running so non-running pods cause a timeout for this test. 204 framework.Logf("Ensuring a pod for ReplicaSet %q is running", name) 205 running := int32(0) 206 for _, pod := range pods.Items { 207 if pod.DeletionTimestamp != nil { 208 continue 209 } 210 err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) 211 if err != nil { 212 updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) 213 if getErr == nil { 214 err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %w", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) 215 } else { 216 err = fmt.Errorf("pod %q never run: %w", pod.Name, err) 217 } 218 } 219 framework.ExpectNoError(err) 220 framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions) 221 running++ 222 } 223 224 // Sanity check 225 gomega.Expect(running).To(gomega.Equal(replicas), "unexpected number of running pods: %+v", pods.Items) 226 227 // Verify that something is listening. 228 framework.Logf("Trying to dial the pod") 229 framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods)) 230 } 231 232 // 1. Create a quota restricting pods in the current namespace to 2. 233 // 2. Create a replica set that wants to run 3 pods. 234 // 3. Check replica set conditions for a ReplicaFailure condition. 235 // 4. Scale down the replica set and observe the condition is gone. 236 func testReplicaSetConditionCheck(ctx context.Context, f *framework.Framework) { 237 c := f.ClientSet 238 namespace := f.Namespace.Name 239 name := "condition-test" 240 241 ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name)) 242 quota := newPodQuota(name, "2") 243 _, err := c.CoreV1().ResourceQuotas(namespace).Create(ctx, quota, metav1.CreateOptions{}) 244 framework.ExpectNoError(err) 245 246 err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { 247 quota, err = c.CoreV1().ResourceQuotas(namespace).Get(ctx, name, metav1.GetOptions{}) 248 if err != nil { 249 return false, err 250 } 251 quantity := resource.MustParse("2") 252 podQuota := quota.Status.Hard[v1.ResourcePods] 253 return (&podQuota).Cmp(quantity) == 0, nil 254 }) 255 if wait.Interrupted(err) { 256 err = fmt.Errorf("resource quota %q never synced", name) 257 } 258 framework.ExpectNoError(err) 259 260 ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name)) 261 rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil) 262 rs, err = c.AppsV1().ReplicaSets(namespace).Create(ctx, rs, metav1.CreateOptions{}) 263 framework.ExpectNoError(err) 264 265 ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name)) 266 generation := rs.Generation 267 conditions := rs.Status.Conditions 268 err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { 269 rs, err = c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{}) 270 if err != nil { 271 return false, err 272 } 273 274 if generation > rs.Status.ObservedGeneration { 275 return false, nil 276 } 277 conditions = rs.Status.Conditions 278 279 cond := replicaset.GetCondition(rs.Status, appsv1.ReplicaSetReplicaFailure) 280 return cond != nil, nil 281 282 }) 283 if wait.Interrupted(err) { 284 err = fmt.Errorf("rs controller never added the failure condition for replica set %q: %#v", name, conditions) 285 } 286 framework.ExpectNoError(err) 287 288 ginkgo.By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name)) 289 rs, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, namespace, name, func(update *appsv1.ReplicaSet) { 290 x := int32(2) 291 update.Spec.Replicas = &x 292 }) 293 framework.ExpectNoError(err) 294 295 ginkgo.By(fmt.Sprintf("Checking replica set %q has no failure condition set", name)) 296 generation = rs.Generation 297 conditions = rs.Status.Conditions 298 err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { 299 rs, err = c.AppsV1().ReplicaSets(namespace).Get(ctx, name, metav1.GetOptions{}) 300 if err != nil { 301 return false, err 302 } 303 304 if generation > rs.Status.ObservedGeneration { 305 return false, nil 306 } 307 conditions = rs.Status.Conditions 308 309 cond := replicaset.GetCondition(rs.Status, appsv1.ReplicaSetReplicaFailure) 310 return cond == nil, nil 311 }) 312 if wait.Interrupted(err) { 313 err = fmt.Errorf("rs controller never removed the failure condition for rs %q: %#v", name, conditions) 314 } 315 framework.ExpectNoError(err) 316 } 317 318 func testRSAdoptMatchingAndReleaseNotMatching(ctx context.Context, f *framework.Framework) { 319 name := "pod-adoption-release" 320 ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name)) 321 p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{ 322 ObjectMeta: metav1.ObjectMeta{ 323 Name: name, 324 Labels: map[string]string{ 325 "name": name, 326 }, 327 }, 328 Spec: v1.PodSpec{ 329 Containers: []v1.Container{ 330 { 331 Name: name, 332 Image: WebserverImage, 333 }, 334 }, 335 }, 336 }) 337 338 ginkgo.By("When a replicaset with a matching selector is created") 339 replicas := int32(1) 340 rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil) 341 rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}} 342 rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(ctx, rsSt, metav1.CreateOptions{}) 343 framework.ExpectNoError(err) 344 345 ginkgo.By("Then the orphan pod is adopted") 346 err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { 347 p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{}) 348 // The Pod p should either be adopted or deleted by the ReplicaSet 349 if apierrors.IsNotFound(err) { 350 return true, nil 351 } 352 framework.ExpectNoError(err) 353 for _, owner := range p2.OwnerReferences { 354 if *owner.Controller && owner.UID == rs.UID { 355 // pod adopted 356 return true, nil 357 } 358 } 359 // pod still not adopted 360 return false, nil 361 }) 362 framework.ExpectNoError(err) 363 364 ginkgo.By("When the matched label of one of its pods change") 365 pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rs.Name, replicas) 366 framework.ExpectNoError(err) 367 368 p = &pods.Items[0] 369 err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { 370 pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{}) 371 framework.ExpectNoError(err) 372 373 pod.Labels = map[string]string{"name": "not-matching-name"} 374 _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, pod, metav1.UpdateOptions{}) 375 if err != nil && apierrors.IsConflict(err) { 376 return false, nil 377 } 378 if err != nil { 379 return false, err 380 } 381 return true, nil 382 }) 383 framework.ExpectNoError(err) 384 385 ginkgo.By("Then the pod is released") 386 err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) { 387 p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{}) 388 framework.ExpectNoError(err) 389 for _, owner := range p2.OwnerReferences { 390 if *owner.Controller && owner.UID == rs.UID { 391 // pod still belonging to the replicaset 392 return false, nil 393 } 394 } 395 // pod already released 396 return true, nil 397 }) 398 framework.ExpectNoError(err) 399 } 400 401 func testRSScaleSubresources(ctx context.Context, f *framework.Framework) { 402 ns := f.Namespace.Name 403 c := f.ClientSet 404 405 // Create webserver pods. 406 rsPodLabels := map[string]string{ 407 "name": "sample-pod", 408 "pod": WebserverImageName, 409 } 410 411 rsName := "test-rs" 412 replicas := int32(1) 413 ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", rsName)) 414 rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) 415 _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{}) 416 framework.ExpectNoError(err) 417 418 // Verify that the required pods have come up. 419 err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) 420 framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err) 421 422 ginkgo.By("getting scale subresource") 423 scale, err := c.AppsV1().ReplicaSets(ns).GetScale(ctx, rsName, metav1.GetOptions{}) 424 if err != nil { 425 framework.Failf("Failed to get scale subresource: %v", err) 426 } 427 gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1))) 428 gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1))) 429 430 ginkgo.By("updating a scale subresource") 431 scale.ResourceVersion = "" // indicate the scale update should be unconditional 432 scale.Spec.Replicas = 2 433 scaleResult, err := c.AppsV1().ReplicaSets(ns).UpdateScale(ctx, rsName, scale, metav1.UpdateOptions{}) 434 if err != nil { 435 framework.Failf("Failed to put scale subresource: %v", err) 436 } 437 gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2))) 438 439 ginkgo.By("verifying the replicaset Spec.Replicas was modified") 440 rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) 441 if err != nil { 442 framework.Failf("Failed to get statefulset resource: %v", err) 443 } 444 gomega.Expect(*(rs.Spec.Replicas)).To(gomega.Equal(int32(2))) 445 446 ginkgo.By("Patch a scale subresource") 447 scale.ResourceVersion = "" // indicate the scale update should be unconditional 448 scale.Spec.Replicas = 4 // should be 2 after "UpdateScale" operation, now Patch to 4 449 rsScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{ 450 Spec: autoscalingv1.ScaleSpec{ 451 Replicas: scale.Spec.Replicas, 452 }, 453 }) 454 framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") 455 456 _, err = c.AppsV1().ReplicaSets(ns).Patch(ctx, rsName, types.StrategicMergePatchType, []byte(rsScalePatchPayload), metav1.PatchOptions{}, "scale") 457 framework.ExpectNoError(err, "Failed to patch replicaset: %v", err) 458 459 rs, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) 460 framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err) 461 gomega.Expect(*(rs.Spec.Replicas)).To(gomega.Equal(int32(4)), "replicaset should have 4 replicas") 462 } 463 464 // ReplicaSet Replace and Patch tests 465 func testRSLifeCycle(ctx context.Context, f *framework.Framework) { 466 ns := f.Namespace.Name 467 c := f.ClientSet 468 zero := int64(0) 469 470 // Create webserver pods. 471 rsPodLabels := map[string]string{ 472 "name": "sample-pod", 473 "pod": WebserverImageName, 474 } 475 476 rsName := "test-rs" 477 label := "test-rs=patched" 478 labelMap := map[string]string{"test-rs": "patched"} 479 replicas := int32(1) 480 rsPatchReplicas := int32(3) 481 rsPatchImage := imageutils.GetE2EImage(imageutils.Pause) 482 483 w := &cache.ListWatch{ 484 WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 485 options.LabelSelector = label 486 return f.ClientSet.AppsV1().ReplicaSets(ns).Watch(ctx, options) 487 }, 488 } 489 rsList, err := f.ClientSet.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: label}) 490 framework.ExpectNoError(err, "failed to list rsList") 491 // Create a ReplicaSet 492 rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) 493 _, err = c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{}) 494 framework.ExpectNoError(err) 495 496 // Verify that the required pods have come up. 497 err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) 498 framework.ExpectNoError(err, "Failed to create pods: %s", err) 499 500 // Scale the ReplicaSet 501 ginkgo.By(fmt.Sprintf("Scaling up %q replicaset", rsName)) 502 _, err = e2ereplicaset.UpdateReplicaSetWithRetries(c, ns, rsName, func(update *appsv1.ReplicaSet) { 503 x := int32(2) 504 update.Spec.Replicas = &x 505 }) 506 framework.ExpectNoError(err, "ReplicaSet fail to scale to %q replicasets") 507 508 // Patch the PeplicaSet 509 ginkgo.By("patching the ReplicaSet") 510 rsPatch, err := json.Marshal(map[string]interface{}{ 511 "metadata": map[string]interface{}{ 512 "labels": labelMap, 513 }, 514 "spec": map[string]interface{}{ 515 "replicas": rsPatchReplicas, 516 "template": map[string]interface{}{ 517 "spec": map[string]interface{}{ 518 "terminationGracePeriodSeconds": &zero, 519 "containers": [1]map[string]interface{}{{ 520 "name": rsName, 521 "image": rsPatchImage, 522 }}, 523 }, 524 }, 525 }, 526 }) 527 framework.ExpectNoError(err, "failed to Marshal ReplicaSet JSON patch") 528 _, err = f.ClientSet.AppsV1().ReplicaSets(ns).Patch(ctx, rsName, types.StrategicMergePatchType, []byte(rsPatch), metav1.PatchOptions{}) 529 framework.ExpectNoError(err, "failed to patch ReplicaSet") 530 531 ctxUntil, cancel := context.WithTimeout(ctx, f.Timeouts.PodStart) 532 defer cancel() 533 _, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 534 if rset, ok := event.Object.(*appsv1.ReplicaSet); ok { 535 found := rset.ObjectMeta.Name == rsName && 536 rset.ObjectMeta.Labels["test-rs"] == "patched" && 537 rset.Status.ReadyReplicas == rsPatchReplicas && 538 rset.Status.AvailableReplicas == rsPatchReplicas && 539 rset.Spec.Template.Spec.Containers[0].Image == rsPatchImage && 540 *rset.Spec.Template.Spec.TerminationGracePeriodSeconds == zero 541 if !found { 542 framework.Logf("observed ReplicaSet %v in namespace %v with ReadyReplicas %v, AvailableReplicas %v", rset.ObjectMeta.Name, rset.ObjectMeta.Namespace, rset.Status.ReadyReplicas, 543 rset.Status.AvailableReplicas) 544 } else { 545 framework.Logf("observed Replicaset %v in namespace %v with ReadyReplicas %v found %v", rset.ObjectMeta.Name, rset.ObjectMeta.Namespace, rset.Status.ReadyReplicas, found) 546 } 547 return found, nil 548 } 549 return false, nil 550 }) 551 552 framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", rs.Name, ns, rsPatchReplicas) 553 } 554 555 // List and DeleteCollection operations 556 func listRSDeleteCollection(ctx context.Context, f *framework.Framework) { 557 558 ns := f.Namespace.Name 559 c := f.ClientSet 560 rsClient := f.ClientSet.AppsV1().ReplicaSets(ns) 561 one := int64(1) 562 rsName := "test-rs" 563 replicas := int32(3) 564 e2eValue := rand.String(5) 565 566 // Define ReplicaSet Labels 567 rsPodLabels := map[string]string{ 568 "name": "sample-pod", 569 "pod": WebserverImageName, 570 "e2e": e2eValue, 571 } 572 573 ginkgo.By("Create a ReplicaSet") 574 rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) 575 _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}) 576 framework.ExpectNoError(err) 577 578 ginkgo.By("Verify that the required pods have come up") 579 err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) 580 framework.ExpectNoError(err, "Failed to create pods: %s", err) 581 r, err := rsClient.Get(ctx, rsName, metav1.GetOptions{}) 582 framework.ExpectNoError(err, "failed to get ReplicaSets") 583 framework.Logf("Replica Status: %+v", r.Status) 584 585 ginkgo.By("Listing all ReplicaSets") 586 rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) 587 framework.ExpectNoError(err, "failed to list ReplicaSets") 588 gomega.Expect(rsList.Items).To(gomega.HaveLen(1), "filtered list wasn't found") 589 590 ginkgo.By("DeleteCollection of the ReplicaSets") 591 err = rsClient.DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) 592 framework.ExpectNoError(err, "failed to delete ReplicaSets") 593 594 ginkgo.By("After DeleteCollection verify that ReplicaSets have been deleted") 595 rsList, err = c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: "e2e=" + e2eValue}) 596 framework.ExpectNoError(err, "failed to list ReplicaSets") 597 gomega.Expect(rsList.Items).To(gomega.BeEmpty(), "filtered list should have no replicas") 598 } 599 600 func testRSStatus(ctx context.Context, f *framework.Framework) { 601 ns := f.Namespace.Name 602 c := f.ClientSet 603 rsClient := c.AppsV1().ReplicaSets(ns) 604 605 // Define ReplicaSet Labels 606 rsPodLabels := map[string]string{ 607 "name": "sample-pod", 608 "pod": WebserverImageName, 609 } 610 labelSelector := labels.SelectorFromSet(rsPodLabels).String() 611 612 rsName := "test-rs" 613 replicas := int32(1) 614 615 w := &cache.ListWatch{ 616 WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 617 options.LabelSelector = labelSelector 618 return rsClient.Watch(ctx, options) 619 }, 620 } 621 rsList, err := c.AppsV1().ReplicaSets("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) 622 framework.ExpectNoError(err, "failed to list Replicasets") 623 624 ginkgo.By("Create a Replicaset") 625 rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) 626 testReplicaSet, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{}) 627 framework.ExpectNoError(err) 628 629 ginkgo.By("Verify that the required pods have come up.") 630 err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) 631 framework.ExpectNoError(err, "Failed to create pods: %s", err) 632 633 ginkgo.By("Getting /status") 634 rsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"} 635 rsStatusUnstructured, err := f.DynamicClient.Resource(rsResource).Namespace(ns).Get(ctx, rsName, metav1.GetOptions{}, "status") 636 framework.ExpectNoError(err, "Failed to fetch the status of replicaset %s in namespace %s", rsName, ns) 637 rsStatusBytes, err := json.Marshal(rsStatusUnstructured) 638 framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) 639 640 var rsStatus appsv1.ReplicaSet 641 err = json.Unmarshal(rsStatusBytes, &rsStatus) 642 framework.ExpectNoError(err, "Failed to unmarshal JSON bytes to a replicaset object type") 643 framework.Logf("Replicaset %s has Conditions: %v", rsName, rsStatus.Status.Conditions) 644 645 ginkgo.By("updating the Replicaset Status") 646 var statusToUpdate, updatedStatus *appsv1.ReplicaSet 647 648 err = retry.RetryOnConflict(retry.DefaultRetry, func() error { 649 statusToUpdate, err = rsClient.Get(ctx, rsName, metav1.GetOptions{}) 650 framework.ExpectNoError(err, "Unable to retrieve replicaset %s", rsName) 651 652 statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.ReplicaSetCondition{ 653 Type: "StatusUpdate", 654 Status: "True", 655 Reason: "E2E", 656 Message: "Set from e2e test", 657 }) 658 659 updatedStatus, err = rsClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) 660 return err 661 }) 662 framework.ExpectNoError(err, "Failed to update status. %v", err) 663 framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) 664 665 ginkgo.By("watching for the ReplicaSet status to be updated") 666 ctxUntil, cancel := context.WithTimeout(ctx, rsRetryTimeout) 667 defer cancel() 668 _, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 669 if rs, ok := event.Object.(*appsv1.ReplicaSet); ok { 670 found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name && 671 rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace && 672 rs.ObjectMeta.Labels["name"] == testReplicaSet.ObjectMeta.Labels["name"] && 673 rs.ObjectMeta.Labels["pod"] == testReplicaSet.ObjectMeta.Labels["pod"] 674 if !found { 675 framework.Logf("Observed replicaset %v in namespace %v with annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.Annotations, rs.Status.Conditions) 676 return false, nil 677 } 678 for _, cond := range rs.Status.Conditions { 679 if cond.Type == "StatusUpdate" && 680 cond.Reason == "E2E" && 681 cond.Message == "Set from e2e test" { 682 framework.Logf("Found replicaset %v in namespace %v with labels: %v annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.ObjectMeta.Labels, rs.Annotations, rs.Status.Conditions) 683 return found, nil 684 } 685 framework.Logf("Observed replicaset %v in namespace %v with annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.Annotations, rs.Status.Conditions) 686 } 687 } 688 object := strings.Split(fmt.Sprintf("%v", event.Object), "{")[0] 689 framework.Logf("Observed %v event: %+v", object, event.Type) 690 return false, nil 691 }) 692 framework.ExpectNoError(err, "failed to locate replicaset %v in namespace %v", testReplicaSet.ObjectMeta.Name, ns) 693 framework.Logf("Replicaset %s has an updated status", rsName) 694 695 ginkgo.By("patching the Replicaset Status") 696 payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`) 697 framework.Logf("Patch payload: %v", string(payload)) 698 699 patchedReplicaSet, err := rsClient.Patch(ctx, rsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") 700 framework.ExpectNoError(err, "Failed to patch status. %v", err) 701 framework.Logf("Patched status conditions: %#v", patchedReplicaSet.Status.Conditions) 702 703 ginkgo.By("watching for the Replicaset status to be patched") 704 ctxUntil, cancel = context.WithTimeout(ctx, rsRetryTimeout) 705 defer cancel() 706 _, err = watchtools.Until(ctxUntil, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 707 if rs, ok := event.Object.(*appsv1.ReplicaSet); ok { 708 found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name && 709 rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace && 710 rs.ObjectMeta.Labels["name"] == testReplicaSet.ObjectMeta.Labels["name"] && 711 rs.ObjectMeta.Labels["pod"] == testReplicaSet.ObjectMeta.Labels["pod"] 712 if !found { 713 framework.Logf("Observed replicaset %v in namespace %v with annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.Annotations, rs.Status.Conditions) 714 return false, nil 715 } 716 for _, cond := range rs.Status.Conditions { 717 if cond.Type == "StatusPatched" { 718 framework.Logf("Found replicaset %v in namespace %v with labels: %v annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.ObjectMeta.Labels, rs.Annotations, cond) 719 return found, nil 720 } 721 framework.Logf("Observed replicaset %v in namespace %v with annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.Annotations, cond) 722 } 723 } 724 object := strings.Split(fmt.Sprintf("%v", event.Object), "{")[0] 725 framework.Logf("Observed %v event: %+v", object, event.Type) 726 return false, nil 727 }) 728 framework.ExpectNoError(err, "failed to locate replicaset %v in namespace %v", testReplicaSet.ObjectMeta.Name, ns) 729 framework.Logf("Replicaset %s has a patched status", rsName) 730 }