k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/apps/deployment.go (about) 1 /* 2 Copyright 2015 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package apps 18 19 import ( 20 "context" 21 "encoding/json" 22 "fmt" 23 "math/rand" 24 "strings" 25 "time" 26 27 "github.com/onsi/ginkgo/v2" 28 "github.com/onsi/gomega" 29 "k8s.io/apimachinery/pkg/fields" 30 "k8s.io/client-go/tools/cache" 31 32 appsv1 "k8s.io/api/apps/v1" 33 autoscalingv1 "k8s.io/api/autoscaling/v1" //Added new 34 v1 "k8s.io/api/core/v1" 35 apierrors "k8s.io/apimachinery/pkg/api/errors" 36 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 37 unstructuredv1 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 38 "k8s.io/apimachinery/pkg/labels" 39 "k8s.io/apimachinery/pkg/runtime" 40 "k8s.io/apimachinery/pkg/runtime/schema" 41 "k8s.io/apimachinery/pkg/types" 42 "k8s.io/apimachinery/pkg/util/dump" 43 "k8s.io/apimachinery/pkg/util/intstr" 44 utilrand "k8s.io/apimachinery/pkg/util/rand" 45 "k8s.io/apimachinery/pkg/util/wait" 46 "k8s.io/apimachinery/pkg/watch" 47 "k8s.io/client-go/dynamic" 48 clientset "k8s.io/client-go/kubernetes" 49 appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" 50 watchtools "k8s.io/client-go/tools/watch" 51 "k8s.io/client-go/util/retry" 52 appsinternal "k8s.io/kubernetes/pkg/apis/apps" 53 deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" 54 "k8s.io/kubernetes/test/e2e/framework" 55 e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment" 56 e2enode "k8s.io/kubernetes/test/e2e/framework/node" 57 e2epod "k8s.io/kubernetes/test/e2e/framework/pod" 58 e2ereplicaset "k8s.io/kubernetes/test/e2e/framework/replicaset" 59 e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" 60 e2eservice "k8s.io/kubernetes/test/e2e/framework/service" 61 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" 62 testutil "k8s.io/kubernetes/test/utils" 63 imageutils "k8s.io/kubernetes/test/utils/image" 64 admissionapi "k8s.io/pod-security-admission/api" 65 "k8s.io/utils/ptr" 66 ) 67 68 const ( 69 poll = 2 * time.Second 70 pollLongTimeout = 5 * time.Minute 71 dRetryPeriod = 2 * time.Second 72 dRetryTimeout = 5 * time.Minute 73 ) 74 75 var ( 76 nilRs *appsv1.ReplicaSet 77 ) 78 79 var _ = SIGDescribe("Deployment", func() { 80 var ns string 81 var c clientset.Interface 82 var dc dynamic.Interface 83 84 ginkgo.AfterEach(func(ctx context.Context) { 85 failureTrap(ctx, c, ns) 86 }) 87 88 f := framework.NewDefaultFramework("deployment") 89 f.NamespacePodSecurityLevel = admissionapi.LevelBaseline 90 91 ginkgo.BeforeEach(func() { 92 c = f.ClientSet 93 ns = f.Namespace.Name 94 dc = f.DynamicClient 95 }) 96 97 ginkgo.It("deployment reaping should cascade to its replica sets and pods", func(ctx context.Context) { 98 testDeleteDeployment(ctx, f) 99 }) 100 /* 101 Release: v1.12 102 Testname: Deployment RollingUpdate 103 Description: A conformant Kubernetes distribution MUST support the Deployment with RollingUpdate strategy. 104 */ 105 framework.ConformanceIt("RollingUpdateDeployment should delete old pods and create new ones", func(ctx context.Context) { 106 testRollingUpdateDeployment(ctx, f) 107 }) 108 /* 109 Release: v1.12 110 Testname: Deployment Recreate 111 Description: A conformant Kubernetes distribution MUST support the Deployment with Recreate strategy. 112 */ 113 framework.ConformanceIt("RecreateDeployment should delete old pods and create new ones", func(ctx context.Context) { 114 testRecreateDeployment(ctx, f) 115 }) 116 /* 117 Release: v1.12 118 Testname: Deployment RevisionHistoryLimit 119 Description: A conformant Kubernetes distribution MUST clean up Deployment's ReplicaSets based on 120 the Deployment's `.spec.revisionHistoryLimit`. 121 */ 122 framework.ConformanceIt("deployment should delete old replica sets", func(ctx context.Context) { 123 testDeploymentCleanUpPolicy(ctx, f) 124 }) 125 /* 126 Release: v1.12 127 Testname: Deployment Rollover 128 Description: A conformant Kubernetes distribution MUST support Deployment rollover, 129 i.e. allow arbitrary number of changes to desired state during rolling update 130 before the rollout finishes. 131 */ 132 framework.ConformanceIt("deployment should support rollover", func(ctx context.Context) { 133 testRolloverDeployment(ctx, f) 134 }) 135 ginkgo.It("iterative rollouts should eventually progress", func(ctx context.Context) { 136 testIterativeDeployments(ctx, f) 137 }) 138 ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func(ctx context.Context) { 139 testDeploymentsControllerRef(ctx, f) 140 }) 141 142 /* 143 Release: v1.21 144 Testname: Deployment, completes the scaling of a Deployment subresource 145 Description: Create a Deployment with a single Pod. The Pod MUST be verified 146 that it is running. The Deployment MUST get and verify the scale subresource count. 147 The Deployment MUST update and verify the scale subresource. The Deployment MUST patch and verify 148 a scale subresource. 149 */ 150 framework.ConformanceIt("Deployment should have a working scale subresource", func(ctx context.Context) { 151 testDeploymentSubresources(ctx, f) 152 }) 153 /* 154 Release: v1.12 155 Testname: Deployment Proportional Scaling 156 Description: A conformant Kubernetes distribution MUST support Deployment 157 proportional scaling, i.e. proportionally scale a Deployment's ReplicaSets 158 when a Deployment is scaled. 159 */ 160 framework.ConformanceIt("deployment should support proportional scaling", func(ctx context.Context) { 161 testProportionalScalingDeployment(ctx, f) 162 }) 163 ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) { 164 e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke") 165 e2eskipper.SkipIfIPv6("aws") 166 nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) 167 framework.ExpectNoError(err) 168 e2eskipper.SkipUnlessAtLeast(len(nodes.Items), 3, "load-balancer test requires at least 3 schedulable nodes") 169 testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(ctx, f) 170 }) 171 // TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues 172 // See https://github.com/kubernetes/kubernetes/issues/29229 173 // Add UnavailableReplicas check because ReadyReplicas or UpdatedReplicas might not represent 174 // the actual number of pods running successfully if some pods failed to start after update or patch. 175 // See issue ##100192 176 177 /* 178 Release: v1.20 179 Testname: Deployment, completes the lifecycle of a Deployment 180 Description: When a Deployment is created it MUST succeed with the required number of replicas. 181 It MUST succeed when the Deployment is patched. When scaling the deployment is MUST succeed. 182 When fetching and patching the DeploymentStatus it MUST succeed. It MUST succeed when deleting 183 the Deployment. 184 */ 185 framework.ConformanceIt("should run the lifecycle of a Deployment", func(ctx context.Context) { 186 one := int64(1) 187 two := int64(2) 188 deploymentResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} 189 testNamespaceName := f.Namespace.Name 190 testDeploymentName := "test-deployment" 191 testDeploymentInitialImage := imageutils.GetE2EImage(imageutils.Agnhost) 192 testDeploymentPatchImage := imageutils.GetE2EImage(imageutils.Pause) 193 testDeploymentUpdateImage := imageutils.GetE2EImage(imageutils.Httpd) 194 testDeploymentDefaultReplicas := int32(2) 195 testDeploymentMinimumReplicas := int32(1) 196 testDeploymentNoReplicas := int32(0) 197 testDeploymentAvailableReplicas := int32(0) 198 testDeploymentLabels := map[string]string{"test-deployment-static": "true"} 199 testDeploymentLabelsFlat := "test-deployment-static=true" 200 w := &cache.ListWatch{ 201 WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 202 options.LabelSelector = testDeploymentLabelsFlat 203 return f.ClientSet.AppsV1().Deployments(testNamespaceName).Watch(ctx, options) 204 }, 205 } 206 deploymentsList, err := f.ClientSet.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) 207 framework.ExpectNoError(err, "failed to list Deployments") 208 209 ginkgo.By("creating a Deployment") 210 testDeployment := e2edeployment.NewDeployment( 211 testDeploymentName, testDeploymentDefaultReplicas, testDeploymentLabels, 212 testDeploymentName, testDeploymentInitialImage, appsv1.RollingUpdateDeploymentStrategyType) 213 testDeployment.ObjectMeta.Labels = map[string]string{"test-deployment-static": "true"} 214 testDeployment.Spec.Template.Spec.TerminationGracePeriodSeconds = &one 215 216 _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Create(ctx, testDeployment, metav1.CreateOptions{}) 217 framework.ExpectNoError(err, "failed to create Deployment %v in namespace %v", testDeploymentName, testNamespaceName) 218 219 ginkgo.By("waiting for Deployment to be created") 220 ctxUntil, cancel := context.WithTimeout(ctx, 30*time.Second) 221 defer cancel() 222 _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 223 switch event.Type { 224 case watch.Added: 225 if deployment, ok := event.Object.(*appsv1.Deployment); ok { 226 found := deployment.ObjectMeta.Name == testDeployment.Name && 227 deployment.ObjectMeta.Labels["test-deployment-static"] == "true" 228 return found, nil 229 } 230 default: 231 framework.Logf("observed event type %v", event.Type) 232 } 233 return false, nil 234 }) 235 framework.ExpectNoError(err, "failed to see %v event", watch.Added) 236 237 ginkgo.By("waiting for all Replicas to be Ready") 238 ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) 239 defer cancel() 240 _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 241 if deployment, ok := event.Object.(*appsv1.Deployment); ok { 242 found := deployment.ObjectMeta.Name == testDeployment.Name && 243 deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && 244 deployment.Status.ReadyReplicas == testDeploymentDefaultReplicas 245 if !found { 246 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v and labels %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas, deployment.ObjectMeta.Labels) 247 } 248 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v and labels %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas, deployment.ObjectMeta.Labels) 249 return found, nil 250 } 251 return false, nil 252 }) 253 framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas) 254 255 ginkgo.By("patching the Deployment") 256 deploymentPatch, err := json.Marshal(map[string]interface{}{ 257 "metadata": map[string]interface{}{ 258 "labels": map[string]string{"test-deployment": "patched"}, 259 }, 260 "spec": map[string]interface{}{ 261 "replicas": testDeploymentMinimumReplicas, 262 "template": map[string]interface{}{ 263 "spec": map[string]interface{}{ 264 "terminationGracePeriodSeconds": &two, 265 "containers": [1]map[string]interface{}{{ 266 "name": testDeploymentName, 267 "image": testDeploymentPatchImage, 268 }}, 269 }, 270 }, 271 }, 272 }) 273 framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch") 274 _, err = f.ClientSet.AppsV1().Deployments(testNamespaceName).Patch(ctx, testDeploymentName, types.StrategicMergePatchType, []byte(deploymentPatch), metav1.PatchOptions{}) 275 framework.ExpectNoError(err, "failed to patch Deployment") 276 ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) 277 defer cancel() 278 _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 279 switch event.Type { 280 case watch.Modified: 281 if deployment, ok := event.Object.(*appsv1.Deployment); ok { 282 found := deployment.ObjectMeta.Name == testDeployment.Name && 283 deployment.ObjectMeta.Labels["test-deployment-static"] == "true" 284 if !found { 285 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas) 286 } 287 return found, nil 288 } 289 default: 290 framework.Logf("observed event type %v", event.Type) 291 } 292 return false, nil 293 }) 294 framework.ExpectNoError(err, "failed to see %v event", watch.Modified) 295 296 ginkgo.By("waiting for Replicas to scale") 297 ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) 298 defer cancel() 299 _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 300 if deployment, ok := event.Object.(*appsv1.Deployment); ok { 301 found := deployment.ObjectMeta.Name == testDeployment.Name && 302 deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && 303 deployment.Status.ReadyReplicas == testDeploymentMinimumReplicas && 304 deployment.Status.UpdatedReplicas == testDeploymentMinimumReplicas && 305 deployment.Status.UnavailableReplicas == 0 && 306 deployment.Spec.Template.Spec.Containers[0].Image == testDeploymentPatchImage && 307 *deployment.Spec.Template.Spec.TerminationGracePeriodSeconds == two 308 if !found { 309 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas) 310 } 311 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas) 312 return found, nil 313 } 314 return false, nil 315 }) 316 framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentMinimumReplicas) 317 318 ginkgo.By("listing Deployments") 319 deploymentsList, err = f.ClientSet.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) 320 framework.ExpectNoError(err, "failed to list Deployments") 321 foundDeployment := false 322 for _, deploymentItem := range deploymentsList.Items { 323 if deploymentItem.ObjectMeta.Name == testDeploymentName && 324 deploymentItem.ObjectMeta.Namespace == testNamespaceName && 325 deploymentItem.ObjectMeta.Labels["test-deployment-static"] == "true" { 326 foundDeployment = true 327 framework.Logf("Found %v with labels: %v", deploymentItem.ObjectMeta.Name, deploymentItem.ObjectMeta.Labels) 328 break 329 } 330 } 331 if !foundDeployment { 332 framework.Failf("unable to find the Deployment in the following list %v", deploymentsList) 333 } 334 335 ginkgo.By("updating the Deployment") 336 testDeploymentUpdate := testDeployment 337 testDeploymentUpdate.ObjectMeta.Labels["test-deployment"] = "updated" 338 testDeploymentUpdate.Spec.Template.Spec.Containers[0].Image = testDeploymentUpdateImage 339 testDeploymentDefaultReplicasPointer := &testDeploymentDefaultReplicas 340 testDeploymentUpdate.Spec.Replicas = testDeploymentDefaultReplicasPointer 341 testDeploymentUpdateUnstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&testDeploymentUpdate) 342 framework.ExpectNoError(err, "failed to convert to unstructured") 343 testDeploymentUpdateUnstructured := unstructuredv1.Unstructured{ 344 Object: testDeploymentUpdateUnstructuredMap, 345 } 346 // currently this hasn't been able to hit the endpoint replaceAppsV1NamespacedDeploymentStatus 347 _, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Update(ctx, &testDeploymentUpdateUnstructured, metav1.UpdateOptions{}) //, "status") 348 framework.ExpectNoError(err, "failed to update the DeploymentStatus") 349 ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) 350 defer cancel() 351 _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 352 switch event.Type { 353 case watch.Modified: 354 if deployment, ok := event.Object.(*appsv1.Deployment); ok { 355 found := deployment.ObjectMeta.Name == testDeployment.Name && 356 deployment.ObjectMeta.Labels["test-deployment-static"] == "true" 357 if !found { 358 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas) 359 } 360 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas) 361 return found, nil 362 } 363 default: 364 framework.Logf("observed event type %v", event.Type) 365 } 366 return false, nil 367 }) 368 framework.ExpectNoError(err, "failed to see %v event", watch.Modified) 369 370 ginkgo.By("fetching the DeploymentStatus") 371 deploymentGetUnstructured, err := dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(ctx, testDeploymentName, metav1.GetOptions{}, "status") 372 framework.ExpectNoError(err, "failed to fetch the Deployment") 373 deploymentGet := appsv1.Deployment{} 374 err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet) 375 framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment") 376 gomega.Expect(deploymentGet.Spec.Template.Spec.Containers[0].Image).To(gomega.Equal(testDeploymentUpdateImage), "failed to update image") 377 gomega.Expect(deploymentGet.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-deployment", "updated"), "failed to update labels") 378 379 ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) 380 defer cancel() 381 _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 382 if deployment, ok := event.Object.(*appsv1.Deployment); ok { 383 found := deployment.ObjectMeta.Name == testDeployment.Name && 384 deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && 385 deployment.Status.ReadyReplicas == testDeploymentDefaultReplicas && 386 deployment.Status.UpdatedReplicas == testDeploymentDefaultReplicas && 387 deployment.Status.UnavailableReplicas == 0 388 if !found { 389 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v and labels %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas, deployment.ObjectMeta.Labels) 390 } 391 return found, nil 392 } 393 return false, nil 394 }) 395 framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas) 396 397 ginkgo.By("patching the DeploymentStatus") 398 deploymentStatusPatch, err := json.Marshal(map[string]interface{}{ 399 "metadata": map[string]interface{}{ 400 "labels": map[string]string{"test-deployment": "patched-status"}, 401 }, 402 "status": map[string]interface{}{ 403 "readyReplicas": testDeploymentNoReplicas, 404 "availableReplicas": testDeploymentAvailableReplicas, 405 }, 406 }) 407 framework.ExpectNoError(err, "failed to Marshal Deployment JSON patch") 408 409 _, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Patch(ctx, testDeploymentName, types.StrategicMergePatchType, []byte(deploymentStatusPatch), metav1.PatchOptions{}, "status") 410 framework.ExpectNoError(err) 411 412 ctxUntil, cancel = context.WithTimeout(ctx, 30*time.Second) 413 defer cancel() 414 _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 415 switch event.Type { 416 case watch.Modified: 417 if deployment, ok := event.Object.(*appsv1.Deployment); ok { 418 found := deployment.ObjectMeta.Name == testDeployment.Name && 419 deployment.ObjectMeta.Labels["test-deployment-static"] == "true" 420 return found, nil 421 } 422 default: 423 framework.Logf("observed event type %v", event.Type) 424 } 425 return false, nil 426 }) 427 framework.ExpectNoError(err, "failed to see %v event", watch.Modified) 428 429 ginkgo.By("fetching the DeploymentStatus") 430 deploymentGetUnstructured, err = dc.Resource(deploymentResource).Namespace(testNamespaceName).Get(ctx, testDeploymentName, metav1.GetOptions{}, "status") 431 framework.ExpectNoError(err, "failed to fetch the DeploymentStatus") 432 deploymentGet = appsv1.Deployment{} 433 err = runtime.DefaultUnstructuredConverter.FromUnstructured(deploymentGetUnstructured.Object, &deploymentGet) 434 framework.ExpectNoError(err, "failed to convert the unstructured response to a Deployment") 435 gomega.Expect(deploymentGet.Spec.Template.Spec.Containers[0].Image).To(gomega.Equal(testDeploymentUpdateImage), "failed to update image") 436 gomega.Expect(deploymentGet.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-deployment", "updated"), "failed to update labels") 437 438 ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart) 439 defer cancel() 440 _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 441 if deployment, ok := event.Object.(*appsv1.Deployment); ok { 442 found := deployment.ObjectMeta.Name == testDeployment.Name && 443 deployment.ObjectMeta.Labels["test-deployment-static"] == "true" && 444 deployment.Status.ReadyReplicas == testDeploymentDefaultReplicas && 445 deployment.Status.UpdatedReplicas == testDeploymentDefaultReplicas && 446 deployment.Status.UnavailableReplicas == 0 && 447 deployment.Spec.Template.Spec.Containers[0].Image == testDeploymentUpdateImage 448 if !found { 449 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas) 450 } 451 return found, nil 452 } 453 return false, nil 454 }) 455 framework.ExpectNoError(err, "failed to see replicas of %v in namespace %v scale to requested amount of %v", testDeployment.Name, testNamespaceName, testDeploymentDefaultReplicas) 456 457 ginkgo.By("deleting the Deployment") 458 err = f.ClientSet.AppsV1().Deployments(testNamespaceName).DeleteCollection(ctx, metav1.DeleteOptions{GracePeriodSeconds: &one}, metav1.ListOptions{LabelSelector: testDeploymentLabelsFlat}) 459 framework.ExpectNoError(err, "failed to delete Deployment via collection") 460 461 ctxUntil, cancel = context.WithTimeout(ctx, 1*time.Minute) 462 defer cancel() 463 _, err = watchtools.Until(ctxUntil, deploymentsList.ResourceVersion, w, func(event watch.Event) (bool, error) { 464 switch event.Type { 465 case watch.Deleted: 466 if deployment, ok := event.Object.(*appsv1.Deployment); ok { 467 found := deployment.ObjectMeta.Name == testDeployment.Name && 468 deployment.ObjectMeta.Labels["test-deployment-static"] == "true" 469 if !found { 470 framework.Logf("observed Deployment %v in namespace %v with ReadyReplicas %v", deployment.ObjectMeta.Name, deployment.ObjectMeta.Namespace, deployment.Status.ReadyReplicas) 471 } 472 return found, nil 473 } 474 default: 475 framework.Logf("observed event type %v", event.Type) 476 } 477 return false, nil 478 }) 479 framework.ExpectNoError(err, "failed to see %v event", watch.Deleted) 480 }) 481 482 /* 483 Release: v1.22 484 Testname: Deployment, status sub-resource 485 Description: When a Deployment is created it MUST succeed. 486 Attempt to read, update and patch its status sub-resource; all 487 mutating sub-resource operations MUST be visible to subsequent reads. 488 */ 489 framework.ConformanceIt("should validate Deployment Status endpoints", func(ctx context.Context) { 490 dClient := c.AppsV1().Deployments(ns) 491 dName := "test-deployment-" + utilrand.String(5) 492 labelSelector := "e2e=testing" 493 494 w := &cache.ListWatch{ 495 WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 496 options.LabelSelector = labelSelector 497 return dClient.Watch(ctx, options) 498 }, 499 } 500 dList, err := c.AppsV1().Deployments("").List(ctx, metav1.ListOptions{LabelSelector: labelSelector}) 501 framework.ExpectNoError(err, "failed to list Deployments") 502 503 ginkgo.By("creating a Deployment") 504 505 podLabels := map[string]string{"name": WebserverImageName, "e2e": "testing"} 506 replicas := int32(1) 507 framework.Logf("Creating simple deployment %s", dName) 508 d := e2edeployment.NewDeployment(dName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) 509 deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 510 framework.ExpectNoError(err) 511 512 // Wait for it to be updated to revision 1 513 err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, dName, "1", WebserverImage) 514 framework.ExpectNoError(err) 515 516 err = e2edeployment.WaitForDeploymentComplete(c, deploy) 517 framework.ExpectNoError(err) 518 519 testDeployment, err := dClient.Get(ctx, dName, metav1.GetOptions{}) 520 framework.ExpectNoError(err) 521 522 ginkgo.By("Getting /status") 523 dResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} 524 dStatusUnstructured, err := f.DynamicClient.Resource(dResource).Namespace(ns).Get(ctx, dName, metav1.GetOptions{}, "status") 525 framework.ExpectNoError(err, "Failed to fetch the status of deployment %s in namespace %s", dName, ns) 526 dStatusBytes, err := json.Marshal(dStatusUnstructured) 527 framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err) 528 529 var dStatus appsv1.Deployment 530 err = json.Unmarshal(dStatusBytes, &dStatus) 531 framework.ExpectNoError(err, "Failed to unmarshal JSON bytes to a deployment object type") 532 framework.Logf("Deployment %s has Conditions: %v", dName, dStatus.Status.Conditions) 533 534 ginkgo.By("updating Deployment Status") 535 var statusToUpdate, updatedStatus *appsv1.Deployment 536 537 err = retry.RetryOnConflict(retry.DefaultRetry, func() error { 538 statusToUpdate, err = dClient.Get(ctx, dName, metav1.GetOptions{}) 539 framework.ExpectNoError(err, "Unable to retrieve deployment %s", dName) 540 541 statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.DeploymentCondition{ 542 Type: "StatusUpdate", 543 Status: "True", 544 Reason: "E2E", 545 Message: "Set from e2e test", 546 }) 547 548 updatedStatus, err = dClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{}) 549 return err 550 }) 551 framework.ExpectNoError(err, "Failed to update status. %v", err) 552 framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions) 553 554 ginkgo.By("watching for the Deployment status to be updated") 555 ctxUntil, cancel := context.WithTimeout(ctx, dRetryTimeout) 556 defer cancel() 557 558 _, err = watchtools.Until(ctxUntil, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { 559 if d, ok := event.Object.(*appsv1.Deployment); ok { 560 found := d.ObjectMeta.Name == testDeployment.ObjectMeta.Name && 561 d.ObjectMeta.Namespace == testDeployment.ObjectMeta.Namespace && 562 d.Labels["e2e"] == "testing" 563 564 if !found { 565 framework.Logf("Observed Deployment %v in namespace %v with annotations: %v & Conditions: %v\n", d.ObjectMeta.Name, d.ObjectMeta.Namespace, d.Annotations, d.Status.Conditions) 566 return false, nil 567 } 568 for _, cond := range d.Status.Conditions { 569 if cond.Type == "StatusUpdate" && 570 cond.Reason == "E2E" && 571 cond.Message == "Set from e2e test" { 572 framework.Logf("Found Deployment %v in namespace %v with labels: %v annotations: %v & Conditions: %v", d.ObjectMeta.Name, d.ObjectMeta.Namespace, d.ObjectMeta.Labels, d.Annotations, cond) 573 return found, nil 574 } 575 framework.Logf("Observed Deployment %v in namespace %v with annotations: %v & Conditions: %v", d.ObjectMeta.Name, d.ObjectMeta.Namespace, d.Annotations, cond) 576 } 577 } 578 object := strings.Split(fmt.Sprintf("%v", event.Object), "{")[0] 579 framework.Logf("Observed %v event: %+v", object, event.Type) 580 return false, nil 581 }) 582 framework.ExpectNoError(err, "failed to locate Deployment %v in namespace %v", testDeployment.ObjectMeta.Name, ns) 583 framework.Logf("Deployment %s has an updated status", dName) 584 585 ginkgo.By("patching the Statefulset Status") 586 payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`) 587 framework.Logf("Patch payload: %v", string(payload)) 588 589 patchedDeployment, err := dClient.Patch(ctx, dName, types.MergePatchType, payload, metav1.PatchOptions{}, "status") 590 framework.ExpectNoError(err, "Failed to patch status. %v", err) 591 framework.Logf("Patched status conditions: %#v", patchedDeployment.Status.Conditions) 592 593 ginkgo.By("watching for the Deployment status to be patched") 594 ctxUntil, cancel = context.WithTimeout(ctx, dRetryTimeout) 595 defer cancel() 596 597 _, err = watchtools.Until(ctxUntil, dList.ResourceVersion, w, func(event watch.Event) (bool, error) { 598 599 if e, ok := event.Object.(*appsv1.Deployment); ok { 600 found := e.ObjectMeta.Name == testDeployment.ObjectMeta.Name && 601 e.ObjectMeta.Namespace == testDeployment.ObjectMeta.Namespace && 602 e.ObjectMeta.Labels["e2e"] == testDeployment.ObjectMeta.Labels["e2e"] 603 if !found { 604 framework.Logf("Observed deployment %v in namespace %v with annotations: %v & Conditions: %v", testDeployment.ObjectMeta.Name, testDeployment.ObjectMeta.Namespace, testDeployment.Annotations, testDeployment.Status.Conditions) 605 return false, nil 606 } 607 for _, cond := range e.Status.Conditions { 608 if cond.Type == "StatusPatched" { 609 framework.Logf("Found deployment %v in namespace %v with labels: %v annotations: %v & Conditions: %v", testDeployment.ObjectMeta.Name, testDeployment.ObjectMeta.Namespace, testDeployment.ObjectMeta.Labels, testDeployment.Annotations, cond) 610 return found, nil 611 } 612 framework.Logf("Observed deployment %v in namespace %v with annotations: %v & Conditions: %v", testDeployment.ObjectMeta.Name, testDeployment.ObjectMeta.Namespace, testDeployment.Annotations, cond) 613 } 614 } 615 object := strings.Split(fmt.Sprintf("%v", event.Object), "{")[0] 616 framework.Logf("Observed %v event: %+v", object, event.Type) 617 return false, nil 618 }) 619 framework.ExpectNoError(err, "failed to locate deployment %v in namespace %v", testDeployment.ObjectMeta.Name, ns) 620 framework.Logf("Deployment %s has a patched status", dName) 621 }) 622 }) 623 624 func failureTrap(ctx context.Context, c clientset.Interface, ns string) { 625 deployments, err := c.AppsV1().Deployments(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) 626 if err != nil { 627 framework.Logf("Could not list Deployments in namespace %q: %v", ns, err) 628 return 629 } 630 for i := range deployments.Items { 631 d := deployments.Items[i] 632 633 framework.Logf("Deployment %q:\n%s\n", d.Name, dump.Pretty(d)) 634 _, allOldRSs, newRS, err := testutil.GetAllReplicaSets(&d, c) 635 if err != nil { 636 framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err) 637 return 638 } 639 testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf) 640 rsList := allOldRSs 641 if newRS != nil { 642 rsList = append(rsList, newRS) 643 } 644 testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf) 645 } 646 // We need print all the ReplicaSets if there are no Deployment object created 647 if len(deployments.Items) != 0 { 648 return 649 } 650 framework.Logf("Log out all the ReplicaSets if there is no deployment created") 651 rss, err := c.AppsV1().ReplicaSets(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) 652 if err != nil { 653 framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err) 654 return 655 } 656 for _, rs := range rss.Items { 657 framework.Logf("ReplicaSet %q:\n%s\n", rs.Name, dump.Pretty(rs)) 658 selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) 659 if err != nil { 660 framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err) 661 } 662 options := metav1.ListOptions{LabelSelector: selector.String()} 663 podList, err := c.CoreV1().Pods(rs.Namespace).List(ctx, options) 664 if err != nil { 665 framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err) 666 continue 667 } 668 for _, pod := range podList.Items { 669 framework.Logf("pod: %q:\n%s\n", pod.Name, dump.Pretty(pod)) 670 } 671 } 672 } 673 674 func stopDeployment(ctx context.Context, c clientset.Interface, ns, deploymentName string) { 675 deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) 676 framework.ExpectNoError(err) 677 678 framework.Logf("Deleting deployment %s", deploymentName) 679 err = e2eresource.DeleteResourceAndWaitForGC(ctx, c, appsinternal.Kind("Deployment"), ns, deployment.Name) 680 framework.ExpectNoError(err) 681 682 framework.Logf("Ensuring deployment %s was deleted", deploymentName) 683 _, err = c.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{}) 684 gomega.Expect(err).To(gomega.MatchError(apierrors.IsNotFound, fmt.Sprintf("Expected deployment %s to be deleted", deploymentName))) 685 framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) 686 selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) 687 framework.ExpectNoError(err) 688 options := metav1.ListOptions{LabelSelector: selector.String()} 689 rss, err := c.AppsV1().ReplicaSets(ns).List(ctx, options) 690 framework.ExpectNoError(err) 691 gomega.Expect(rss.Items).Should(gomega.BeEmpty()) 692 framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) 693 var pods *v1.PodList 694 if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { 695 pods, err = c.CoreV1().Pods(ns).List(ctx, options) 696 if err != nil { 697 return false, err 698 } 699 // Pods may be created by overlapping deployments right after this deployment is deleted, ignore them 700 if len(pods.Items) == 0 { 701 return true, nil 702 } 703 return false, nil 704 }); err != nil { 705 framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods) 706 } 707 } 708 709 func testDeleteDeployment(ctx context.Context, f *framework.Framework) { 710 ns := f.Namespace.Name 711 c := f.ClientSet 712 713 deploymentName := "test-new-deployment" 714 podLabels := map[string]string{"name": WebserverImageName} 715 replicas := int32(1) 716 framework.Logf("Creating simple deployment %s", deploymentName) 717 d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) 718 d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} 719 deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 720 framework.ExpectNoError(err) 721 722 // Wait for it to be updated to revision 1 723 err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage) 724 framework.ExpectNoError(err) 725 726 err = e2edeployment.WaitForDeploymentComplete(c, deploy) 727 framework.ExpectNoError(err) 728 729 deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) 730 framework.ExpectNoError(err) 731 newRS, err := testutil.GetNewReplicaSet(deployment, c) 732 framework.ExpectNoError(err) 733 gomega.Expect(newRS).NotTo(gomega.Equal(nilRs)) 734 stopDeployment(ctx, c, ns, deploymentName) 735 } 736 737 func testRollingUpdateDeployment(ctx context.Context, f *framework.Framework) { 738 ns := f.Namespace.Name 739 c := f.ClientSet 740 // Create webserver pods. 741 deploymentPodLabels := map[string]string{"name": "sample-pod"} 742 rsPodLabels := map[string]string{ 743 "name": "sample-pod", 744 "pod": WebserverImageName, 745 } 746 747 rsName := "test-rolling-update-controller" 748 replicas := int32(1) 749 rsRevision := "3546343826724305832" 750 annotations := make(map[string]string) 751 annotations[deploymentutil.RevisionAnnotation] = rsRevision 752 rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) 753 rs.Annotations = annotations 754 framework.Logf("Creating replica set %q (going to be adopted)", rs.Name) 755 _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, rs, metav1.CreateOptions{}) 756 framework.ExpectNoError(err) 757 // Verify that the required pods have come up. 758 err = e2epod.VerifyPodsRunning(ctx, c, ns, "sample-pod", false, replicas) 759 framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err) 760 761 // Create a deployment to delete webserver pods and instead bring up agnhost pods. 762 deploymentName := "test-rolling-update-deployment" 763 framework.Logf("Creating deployment %q", deploymentName) 764 d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) 765 deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 766 framework.ExpectNoError(err) 767 768 // Wait for it to be updated to revision 3546343826724305833. 769 framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name) 770 err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", AgnhostImage) 771 framework.ExpectNoError(err) 772 773 framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name) 774 err = e2edeployment.WaitForDeploymentComplete(c, deploy) 775 framework.ExpectNoError(err) 776 777 // There should be 1 old RS (webserver-controller, which is adopted) 778 framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) 779 deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) 780 framework.ExpectNoError(err) 781 _, allOldRSs, err := testutil.GetOldReplicaSets(deployment, c) 782 framework.ExpectNoError(err) 783 gomega.Expect(allOldRSs).To(gomega.HaveLen(1)) 784 } 785 786 func testRecreateDeployment(ctx context.Context, f *framework.Framework) { 787 ns := f.Namespace.Name 788 c := f.ClientSet 789 790 // Create a deployment that brings up agnhost pods. 791 deploymentName := "test-recreate-deployment" 792 framework.Logf("Creating deployment %q", deploymentName) 793 d := e2edeployment.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType) 794 deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 795 framework.ExpectNoError(err) 796 797 // Wait for it to be updated to revision 1 798 framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName) 799 err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", AgnhostImage) 800 framework.ExpectNoError(err) 801 802 framework.Logf("Waiting deployment %q to complete", deploymentName) 803 err = e2edeployment.WaitForDeploymentComplete(c, deployment) 804 framework.ExpectNoError(err) 805 806 // Update deployment to delete agnhost pods and bring up webserver pods. 807 framework.Logf("Triggering a new rollout for deployment %q", deploymentName) 808 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) { 809 update.Spec.Template.Spec.Containers[0].Name = WebserverImageName 810 update.Spec.Template.Spec.Containers[0].Image = WebserverImage 811 }) 812 framework.ExpectNoError(err) 813 814 framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName) 815 err = watchRecreateDeployment(ctx, c, deployment) 816 framework.ExpectNoError(err) 817 } 818 819 // testDeploymentCleanUpPolicy tests that deployment supports cleanup policy 820 func testDeploymentCleanUpPolicy(ctx context.Context, f *framework.Framework) { 821 ns := f.Namespace.Name 822 c := f.ClientSet 823 // Create webserver pods. 824 deploymentPodLabels := map[string]string{"name": "cleanup-pod"} 825 rsPodLabels := map[string]string{ 826 "name": "cleanup-pod", 827 "pod": WebserverImageName, 828 } 829 rsName := "test-cleanup-controller" 830 replicas := int32(1) 831 revisionHistoryLimit := ptr.To[int32](0) 832 _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) 833 framework.ExpectNoError(err) 834 835 // Verify that the required pods have come up. 836 err = e2epod.VerifyPodsRunning(ctx, c, ns, "cleanup-pod", false, replicas) 837 framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) 838 839 // Create a deployment to delete webserver pods and instead bring up agnhost pods. 840 deploymentName := "test-cleanup-deployment" 841 framework.Logf("Creating deployment %s", deploymentName) 842 843 pods, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: labels.Everything().String()}) 844 framework.ExpectNoError(err, "Failed to query for pods: %v", err) 845 846 options := metav1.ListOptions{ 847 ResourceVersion: pods.ListMeta.ResourceVersion, 848 } 849 stopCh := make(chan struct{}) 850 defer close(stopCh) 851 w, err := c.CoreV1().Pods(ns).Watch(ctx, options) 852 framework.ExpectNoError(err) 853 go func() { 854 defer ginkgo.GinkgoRecover() 855 // There should be only one pod being created, which is the pod with the agnhost image. 856 // The old RS shouldn't create new pod when deployment controller adding pod template hash label to its selector. 857 numPodCreation := 1 858 for { 859 select { 860 case event := <-w.ResultChan(): 861 if event.Type != watch.Added { 862 continue 863 } 864 numPodCreation-- 865 if numPodCreation < 0 { 866 framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event) 867 } 868 pod, ok := event.Object.(*v1.Pod) 869 if !ok { 870 framework.Failf("Expect event Object to be a pod") 871 } 872 if pod.Spec.Containers[0].Name != AgnhostImageName { 873 framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", AgnhostImageName, pod) 874 } 875 case <-stopCh: 876 return 877 } 878 } 879 }() 880 d := e2edeployment.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) 881 d.Spec.RevisionHistoryLimit = revisionHistoryLimit 882 _, err = c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 883 framework.ExpectNoError(err) 884 885 ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName)) 886 err = waitForDeploymentOldRSsNum(ctx, c, ns, deploymentName, int(*revisionHistoryLimit)) 887 framework.ExpectNoError(err) 888 } 889 890 // testRolloverDeployment tests that deployment supports rollover. 891 // i.e. we can change desired state and kick off rolling update, then change desired state again before it finishes. 892 func testRolloverDeployment(ctx context.Context, f *framework.Framework) { 893 ns := f.Namespace.Name 894 c := f.ClientSet 895 podName := "rollover-pod" 896 deploymentPodLabels := map[string]string{"name": podName} 897 rsPodLabels := map[string]string{ 898 "name": podName, 899 "pod": WebserverImageName, 900 } 901 902 rsName := "test-rollover-controller" 903 rsReplicas := int32(1) 904 _, err := c.AppsV1().ReplicaSets(ns).Create(ctx, newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil), metav1.CreateOptions{}) 905 framework.ExpectNoError(err) 906 // Verify that the required pods have come up. 907 err = e2epod.VerifyPodsRunning(ctx, c, ns, podName, false, rsReplicas) 908 framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) 909 910 // Wait for replica set to become ready before adopting it. 911 framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName) 912 err = e2ereplicaset.WaitForReadyReplicaSet(ctx, c, ns, rsName) 913 framework.ExpectNoError(err) 914 915 // Create a deployment to delete webserver pods and instead bring up redis-slave pods. 916 // We use a nonexistent image here, so that we make sure it won't finish 917 deploymentName, deploymentImageName := "test-rollover-deployment", "redis-slave" 918 deploymentReplicas := int32(1) 919 deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent" 920 deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType 921 framework.Logf("Creating deployment %q", deploymentName) 922 newDeployment := e2edeployment.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType) 923 newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{ 924 MaxUnavailable: ptr.To(intstr.FromInt32(0)), 925 MaxSurge: ptr.To(intstr.FromInt32(1)), 926 } 927 newDeployment.Spec.MinReadySeconds = int32(10) 928 _, err = c.AppsV1().Deployments(ns).Create(ctx, newDeployment, metav1.CreateOptions{}) 929 framework.ExpectNoError(err) 930 931 // Verify that the pods were scaled up and down as expected. 932 deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) 933 framework.ExpectNoError(err) 934 framework.Logf("Make sure deployment %q performs scaling operations", deploymentName) 935 // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 936 err = waitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation) 937 framework.ExpectNoError(err) 938 // Check if it's updated to revision 1 correctly 939 framework.Logf("Check revision of new replica set for deployment %q", deploymentName) 940 err = checkDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) 941 framework.ExpectNoError(err) 942 943 framework.Logf("Ensure that both replica sets have 1 created replica") 944 oldRS, err := c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) 945 framework.ExpectNoError(err) 946 ensureReplicas(oldRS, int32(1)) 947 newRS, err := testutil.GetNewReplicaSet(deployment, c) 948 framework.ExpectNoError(err) 949 ensureReplicas(newRS, int32(1)) 950 951 // The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up agnhost pods. 952 framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName) 953 updatedDeploymentImageName, updatedDeploymentImage := AgnhostImageName, AgnhostImage 954 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) { 955 update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName 956 update.Spec.Template.Spec.Containers[0].Image = updatedDeploymentImage 957 }) 958 framework.ExpectNoError(err) 959 960 // Use observedGeneration to determine if the controller noticed the pod template update. 961 framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName) 962 err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) 963 framework.ExpectNoError(err) 964 965 // Wait for it to be updated to revision 2 966 framework.Logf("Wait for revision update of deployment %q to 2", deploymentName) 967 err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) 968 framework.ExpectNoError(err) 969 970 framework.Logf("Make sure deployment %q is complete", deploymentName) 971 err = waitForDeploymentCompleteAndCheckRolling(c, deployment) 972 framework.ExpectNoError(err) 973 974 framework.Logf("Ensure that both old replica sets have no replicas") 975 oldRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, rsName, metav1.GetOptions{}) 976 framework.ExpectNoError(err) 977 ensureReplicas(oldRS, int32(0)) 978 // Not really the new replica set anymore but we GET by name so that's fine. 979 newRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, newRS.Name, metav1.GetOptions{}) 980 framework.ExpectNoError(err) 981 ensureReplicas(newRS, int32(0)) 982 } 983 984 func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) { 985 gomega.Expect(*rs.Spec.Replicas).To(gomega.Equal(replicas)) 986 gomega.Expect(rs.Status.Replicas).To(gomega.Equal(replicas)) 987 } 988 989 func randomScale(d *appsv1.Deployment, i int) { 990 switch r := rand.Float32(); { 991 case r < 0.3: 992 framework.Logf("%02d: scaling up", i) 993 *(d.Spec.Replicas)++ 994 case r < 0.6: 995 if *(d.Spec.Replicas) > 1 { 996 framework.Logf("%02d: scaling down", i) 997 *(d.Spec.Replicas)-- 998 } 999 } 1000 } 1001 1002 func testIterativeDeployments(ctx context.Context, f *framework.Framework) { 1003 ns := f.Namespace.Name 1004 c := f.ClientSet 1005 1006 podLabels := map[string]string{"name": WebserverImageName} 1007 replicas := int32(6) 1008 zero := int64(0) 1009 two := int32(2) 1010 1011 // Create a webserver deployment. 1012 deploymentName := "webserver" 1013 fiveMinutes := int32(5 * 60) 1014 d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) 1015 d.Spec.ProgressDeadlineSeconds = &fiveMinutes 1016 d.Spec.RevisionHistoryLimit = &two 1017 d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero 1018 framework.Logf("Creating deployment %q", deploymentName) 1019 deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 1020 framework.ExpectNoError(err) 1021 1022 iterations := 20 1023 for i := 0; i < iterations; i++ { 1024 if r := rand.Float32(); r < 0.6 { 1025 time.Sleep(time.Duration(float32(i) * r * float32(time.Second))) 1026 } 1027 1028 switch n := rand.Float32(); { 1029 case n < 0.2: 1030 // trigger a new deployment 1031 framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name) 1032 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { 1033 newEnv := v1.EnvVar{Name: fmt.Sprintf("A%d", i), Value: fmt.Sprintf("%d", i)} 1034 update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) 1035 randomScale(update, i) 1036 }) 1037 framework.ExpectNoError(err) 1038 1039 case n < 0.4: 1040 // rollback to the previous version 1041 framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name) 1042 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { 1043 if update.Annotations == nil { 1044 update.Annotations = make(map[string]string) 1045 } 1046 update.Annotations[appsv1.DeprecatedRollbackTo] = "0" 1047 }) 1048 framework.ExpectNoError(err) 1049 1050 case n < 0.6: 1051 // just scaling 1052 framework.Logf("%02d: scaling deployment %q", i, deployment.Name) 1053 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { 1054 randomScale(update, i) 1055 }) 1056 framework.ExpectNoError(err) 1057 1058 case n < 0.8: 1059 // toggling the deployment 1060 if deployment.Spec.Paused { 1061 framework.Logf("%02d: resuming deployment %q", i, deployment.Name) 1062 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { 1063 update.Spec.Paused = false 1064 randomScale(update, i) 1065 }) 1066 framework.ExpectNoError(err) 1067 } else { 1068 framework.Logf("%02d: pausing deployment %q", i, deployment.Name) 1069 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { 1070 update.Spec.Paused = true 1071 randomScale(update, i) 1072 }) 1073 framework.ExpectNoError(err) 1074 } 1075 1076 default: 1077 // arbitrarily delete deployment pods 1078 framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name) 1079 selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) 1080 framework.ExpectNoError(err) 1081 opts := metav1.ListOptions{LabelSelector: selector.String()} 1082 podList, err := c.CoreV1().Pods(ns).List(ctx, opts) 1083 framework.ExpectNoError(err) 1084 if len(podList.Items) == 0 { 1085 framework.Logf("%02d: no deployment pods to delete", i) 1086 continue 1087 } 1088 for p := range podList.Items { 1089 if rand.Float32() < 0.5 { 1090 continue 1091 } 1092 name := podList.Items[p].Name 1093 framework.Logf("%02d: deleting deployment pod %q", i, name) 1094 err := c.CoreV1().Pods(ns).Delete(ctx, name, metav1.DeleteOptions{}) 1095 if err != nil && !apierrors.IsNotFound(err) { 1096 framework.ExpectNoError(err) 1097 } 1098 } 1099 } 1100 } 1101 1102 // unpause the deployment if we end up pausing it 1103 deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deployment.Name, metav1.GetOptions{}) 1104 framework.ExpectNoError(err) 1105 if deployment.Spec.Paused { 1106 framework.Logf("Resuming deployment %q", deployment.Name) 1107 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { 1108 update.Spec.Paused = false 1109 }) 1110 framework.ExpectNoError(err) 1111 } 1112 1113 framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) 1114 err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) 1115 framework.ExpectNoError(err) 1116 1117 framework.Logf("Waiting for deployment %q status", deploymentName) 1118 err = e2edeployment.WaitForDeploymentComplete(c, deployment) 1119 framework.ExpectNoError(err) 1120 1121 framework.Logf("Checking deployment %q for a complete condition", deploymentName) 1122 err = waitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, appsv1.DeploymentProgressing) 1123 framework.ExpectNoError(err) 1124 } 1125 1126 func testDeploymentsControllerRef(ctx context.Context, f *framework.Framework) { 1127 ns := f.Namespace.Name 1128 c := f.ClientSet 1129 1130 deploymentName := "test-orphan-deployment" 1131 framework.Logf("Creating Deployment %q", deploymentName) 1132 podLabels := map[string]string{"name": WebserverImageName} 1133 replicas := int32(1) 1134 d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) 1135 deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 1136 framework.ExpectNoError(err) 1137 err = e2edeployment.WaitForDeploymentComplete(c, deploy) 1138 framework.ExpectNoError(err) 1139 1140 framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) 1141 rsList := listDeploymentReplicaSets(ctx, c, ns, podLabels) 1142 gomega.Expect(rsList.Items).To(gomega.HaveLen(1)) 1143 1144 framework.Logf("Obtaining the ReplicaSet's UID") 1145 orphanedRSUID := rsList.Items[0].UID 1146 1147 framework.Logf("Checking the ReplicaSet has the right controllerRef") 1148 err = checkDeploymentReplicaSetsControllerRef(ctx, c, ns, deploy.UID, podLabels) 1149 framework.ExpectNoError(err) 1150 1151 framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName) 1152 err = orphanDeploymentReplicaSets(ctx, c, deploy) 1153 framework.ExpectNoError(err) 1154 1155 ginkgo.By("Wait for the ReplicaSet to be orphaned") 1156 err = wait.PollWithContext(ctx, dRetryPeriod, dRetryTimeout, waitDeploymentReplicaSetsOrphaned(c, ns, podLabels)) 1157 framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned") 1158 1159 deploymentName = "test-adopt-deployment" 1160 framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) 1161 d = e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) 1162 deploy, err = c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 1163 framework.ExpectNoError(err) 1164 err = e2edeployment.WaitForDeploymentComplete(c, deploy) 1165 framework.ExpectNoError(err) 1166 1167 framework.Logf("Waiting for the ReplicaSet to have the right controllerRef") 1168 err = checkDeploymentReplicaSetsControllerRef(ctx, c, ns, deploy.UID, podLabels) 1169 framework.ExpectNoError(err) 1170 1171 framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) 1172 rsList = listDeploymentReplicaSets(ctx, c, ns, podLabels) 1173 gomega.Expect(rsList.Items).To(gomega.HaveLen(1)) 1174 1175 framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet") 1176 gomega.Expect(rsList.Items[0].UID).To(gomega.Equal(orphanedRSUID)) 1177 } 1178 1179 // testProportionalScalingDeployment tests that when a RollingUpdate Deployment is scaled in the middle 1180 // of a rollout (either in progress or paused), then the Deployment will balance additional replicas 1181 // in existing active ReplicaSets (ReplicaSets with more than 0 replica) in order to mitigate risk. 1182 func testProportionalScalingDeployment(ctx context.Context, f *framework.Framework) { 1183 ns := f.Namespace.Name 1184 c := f.ClientSet 1185 1186 podLabels := map[string]string{"name": WebserverImageName} 1187 replicas := int32(10) 1188 1189 // Create a webserver deployment. 1190 deploymentName := "webserver-deployment" 1191 d := e2edeployment.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) 1192 d.Spec.Strategy.RollingUpdate = new(appsv1.RollingUpdateDeployment) 1193 d.Spec.Strategy.RollingUpdate.MaxSurge = ptr.To(intstr.FromInt32(3)) 1194 d.Spec.Strategy.RollingUpdate.MaxUnavailable = ptr.To(intstr.FromInt32(2)) 1195 1196 framework.Logf("Creating deployment %q", deploymentName) 1197 deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 1198 framework.ExpectNoError(err) 1199 1200 framework.Logf("Waiting for observed generation %d", deployment.Generation) 1201 err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) 1202 framework.ExpectNoError(err) 1203 1204 // Verify that the required pods have come up. 1205 framework.Logf("Waiting for all required pods to come up") 1206 err = e2epod.VerifyPodsRunning(ctx, c, ns, WebserverImageName, false, *(deployment.Spec.Replicas)) 1207 framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) 1208 1209 framework.Logf("Waiting for deployment %q to complete", deployment.Name) 1210 err = e2edeployment.WaitForDeploymentComplete(c, deployment) 1211 framework.ExpectNoError(err) 1212 1213 firstRS, err := testutil.GetNewReplicaSet(deployment, c) 1214 framework.ExpectNoError(err) 1215 1216 // Update the deployment with a non-existent image so that the new replica set 1217 // will be blocked to simulate a partial rollout. 1218 framework.Logf("Updating deployment %q with a non-existent image", deploymentName) 1219 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { 1220 update.Spec.Template.Spec.Containers[0].Image = "webserver:404" 1221 }) 1222 framework.ExpectNoError(err) 1223 1224 framework.Logf("Waiting for observed generation %d", deployment.Generation) 1225 err = waitForObservedDeployment(c, ns, deploymentName, deployment.Generation) 1226 framework.ExpectNoError(err) 1227 1228 // Checking state of first rollout's replicaset. 1229 maxUnavailable, err := intstr.GetScaledValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, int(*(deployment.Spec.Replicas)), false) 1230 framework.ExpectNoError(err) 1231 1232 // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. 1233 minAvailableReplicas := replicas - int32(maxUnavailable) 1234 framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) 1235 err = e2ereplicaset.WaitForReplicaSetTargetAvailableReplicas(ctx, c, firstRS, minAvailableReplicas) 1236 framework.ExpectNoError(err) 1237 1238 // First rollout's replicaset should have .spec.replicas = 8 too. 1239 framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas) 1240 err = waitForReplicaSetTargetSpecReplicas(ctx, c, firstRS, minAvailableReplicas) 1241 framework.ExpectNoError(err) 1242 1243 // The desired replicas wait makes sure that the RS controller has created expected number of pods. 1244 framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) 1245 firstRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, firstRS.Name, metav1.GetOptions{}) 1246 framework.ExpectNoError(err) 1247 err = waitForReplicaSetDesiredReplicas(ctx, c.AppsV1(), firstRS) 1248 framework.ExpectNoError(err) 1249 1250 // Checking state of second rollout's replicaset. 1251 secondRS, err := testutil.GetNewReplicaSet(deployment, c) 1252 framework.ExpectNoError(err) 1253 1254 maxSurge, err := intstr.GetScaledValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false) 1255 framework.ExpectNoError(err) 1256 1257 // Second rollout's replicaset should have 0 available replicas. 1258 framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0") 1259 gomega.Expect(secondRS.Status.AvailableReplicas).To(gomega.Equal(int32(0))) 1260 1261 // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. 1262 newReplicas := replicas + int32(maxSurge) - minAvailableReplicas 1263 framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas) 1264 err = waitForReplicaSetTargetSpecReplicas(ctx, c, secondRS, newReplicas) 1265 framework.ExpectNoError(err) 1266 1267 // The desired replicas wait makes sure that the RS controller has created expected number of pods. 1268 framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) 1269 secondRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, secondRS.Name, metav1.GetOptions{}) 1270 framework.ExpectNoError(err) 1271 err = waitForReplicaSetDesiredReplicas(ctx, c.AppsV1(), secondRS) 1272 framework.ExpectNoError(err) 1273 1274 // Check the deployment's minimum availability. 1275 framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName) 1276 if deployment.Status.AvailableReplicas < minAvailableReplicas { 1277 err = fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas) 1278 framework.ExpectNoError(err) 1279 } 1280 1281 // Scale the deployment to 30 replicas. 1282 newReplicas = int32(30) 1283 framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) 1284 _, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { 1285 update.Spec.Replicas = &newReplicas 1286 }) 1287 framework.ExpectNoError(err) 1288 1289 framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) 1290 firstRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, firstRS.Name, metav1.GetOptions{}) 1291 framework.ExpectNoError(err) 1292 secondRS, err = c.AppsV1().ReplicaSets(ns).Get(ctx, secondRS.Name, metav1.GetOptions{}) 1293 framework.ExpectNoError(err) 1294 1295 // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. 1296 // Note that 12 comes from rounding (30-10)*(8/13) to nearest integer. 1297 framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20") 1298 err = waitForReplicaSetTargetSpecReplicas(ctx, c, firstRS, 20) 1299 framework.ExpectNoError(err) 1300 1301 // Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas. 1302 // Note that 8 comes from rounding (30-10)*(5/13) to nearest integer. 1303 framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13") 1304 err = waitForReplicaSetTargetSpecReplicas(ctx, c, secondRS, 13) 1305 framework.ExpectNoError(err) 1306 } 1307 1308 func checkDeploymentReplicaSetsControllerRef(ctx context.Context, c clientset.Interface, ns string, uid types.UID, label map[string]string) error { 1309 rsList := listDeploymentReplicaSets(ctx, c, ns, label) 1310 for _, rs := range rsList.Items { 1311 // This rs is adopted only when its controller ref is update 1312 if controllerRef := metav1.GetControllerOf(&rs); controllerRef == nil || controllerRef.UID != uid { 1313 return fmt.Errorf("ReplicaSet %s has unexpected controllerRef %v", rs.Name, controllerRef) 1314 } 1315 } 1316 return nil 1317 } 1318 1319 func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label map[string]string) func(ctx context.Context) (bool, error) { 1320 return func(ctx context.Context) (bool, error) { 1321 rsList := listDeploymentReplicaSets(ctx, c, ns, label) 1322 for _, rs := range rsList.Items { 1323 // This rs is orphaned only when controller ref is cleared 1324 if controllerRef := metav1.GetControllerOf(&rs); controllerRef != nil { 1325 return false, nil 1326 } 1327 } 1328 return true, nil 1329 } 1330 } 1331 1332 func listDeploymentReplicaSets(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList { 1333 selector := labels.Set(label).AsSelector() 1334 options := metav1.ListOptions{LabelSelector: selector.String()} 1335 rsList, err := c.AppsV1().ReplicaSets(ns).List(ctx, options) 1336 framework.ExpectNoError(err) 1337 gomega.Expect(rsList.Items).ToNot(gomega.BeEmpty()) 1338 return rsList 1339 } 1340 1341 func orphanDeploymentReplicaSets(ctx context.Context, c clientset.Interface, d *appsv1.Deployment) error { 1342 trueVar := true 1343 deleteOptions := metav1.DeleteOptions{OrphanDependents: &trueVar} 1344 deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID)) 1345 return c.AppsV1().Deployments(d.Namespace).Delete(ctx, d.Name, deleteOptions) 1346 } 1347 1348 func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(ctx context.Context, f *framework.Framework) { 1349 ns := f.Namespace.Name 1350 c := f.ClientSet 1351 1352 name := "test-rolling-update-with-lb" 1353 framework.Logf("Creating Deployment %q", name) 1354 podLabels := map[string]string{"name": name} 1355 replicas := int32(3) 1356 d := e2edeployment.NewDeployment(name, replicas, podLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) 1357 // NewDeployment assigned the same value to both d.Spec.Selector and 1358 // d.Spec.Template.Labels, so mutating the one would mutate the other. 1359 // Thus we need to set d.Spec.Template.Labels to a new value if we want 1360 // to mutate it alone. 1361 d.Spec.Template.Labels = map[string]string{ 1362 "iteration": "0", 1363 "name": name, 1364 } 1365 d.Spec.Template.Spec.Containers[0].Args = []string{"netexec", "--http-port=80", "--udp-port=80"} 1366 // To ensure that a node that had a local endpoint prior to a rolling 1367 // update continues to have a local endpoint throughout the rollout, we 1368 // need an affinity policy that will cause pods to be scheduled on the 1369 // same nodes as old pods, and we need the deployment to scale up a new 1370 // pod before deleting an old pod. This affinity policy will define 1371 // inter-pod affinity for pods of different rollouts and anti-affinity 1372 // for pods of the same rollout, so it will need to be updated when 1373 // performing a rollout. 1374 setAffinities(d, false) 1375 d.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{ 1376 MaxSurge: ptr.To(intstr.FromInt32(1)), 1377 MaxUnavailable: ptr.To(intstr.FromInt32(0)), 1378 } 1379 deployment, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 1380 framework.ExpectNoError(err) 1381 err = e2edeployment.WaitForDeploymentComplete(c, deployment) 1382 framework.ExpectNoError(err) 1383 1384 framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns) 1385 jig := e2eservice.NewTestJig(c, ns, name) 1386 jig.Labels = podLabels 1387 service, err := jig.CreateLoadBalancerService(ctx, e2eservice.GetServiceLoadBalancerCreationTimeout(ctx, c), func(svc *v1.Service) { 1388 svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal 1389 }) 1390 framework.ExpectNoError(err) 1391 1392 lbNameOrAddress := e2eservice.GetIngressPoint(&service.Status.LoadBalancer.Ingress[0]) 1393 svcPort := int(service.Spec.Ports[0].Port) 1394 1395 framework.Logf("Hitting the replica set's pods through the service's load balancer") 1396 timeout := e2eservice.LoadBalancerLagTimeoutDefault 1397 if framework.ProviderIs("aws") { 1398 timeout = e2eservice.LoadBalancerLagTimeoutAWS 1399 } 1400 e2eservice.TestReachableHTTP(ctx, lbNameOrAddress, svcPort, timeout) 1401 1402 expectedNodes, err := jig.GetEndpointNodeNames(ctx) 1403 framework.ExpectNoError(err) 1404 1405 framework.Logf("Starting a goroutine to watch the service's endpoints in the background") 1406 done := make(chan struct{}) 1407 failed := make(chan struct{}) 1408 defer close(done) 1409 go func() { 1410 defer ginkgo.GinkgoRecover() 1411 // The affinity policy should ensure that before an old pod is 1412 // deleted, a new pod will have been created on the same node. 1413 // Thus the set of nodes with local endpoints for the service 1414 // should remain unchanged. 1415 wait.Until(func() { 1416 actualNodes, err := jig.GetEndpointNodeNames(ctx) 1417 if err != nil { 1418 framework.Logf("The previous set of nodes with local endpoints was %v, now the lookup failed: %v", expectedNodes.List(), err) 1419 failed <- struct{}{} 1420 return 1421 } 1422 if !actualNodes.Equal(expectedNodes) { 1423 framework.Logf("The set of nodes with local endpoints changed; started with %v, now have %v", expectedNodes.List(), actualNodes.List()) 1424 failed <- struct{}{} 1425 } 1426 }, framework.Poll, done) 1427 }() 1428 1429 framework.Logf("Triggering a rolling deployment several times") 1430 for i := 1; i <= 3; i++ { 1431 framework.Logf("Updating label deployment %q pod spec (iteration #%d)", name, i) 1432 deployment, err = e2edeployment.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { 1433 update.Spec.Template.Labels["iteration"] = fmt.Sprintf("%d", i) 1434 setAffinities(update, true) 1435 }) 1436 framework.ExpectNoError(err) 1437 1438 framework.Logf("Waiting for observed generation %d", deployment.Generation) 1439 err = waitForObservedDeployment(c, ns, name, deployment.Generation) 1440 framework.ExpectNoError(err) 1441 1442 framework.Logf("Make sure deployment %q is complete", name) 1443 err = waitForDeploymentCompleteAndCheckRolling(c, deployment) 1444 framework.ExpectNoError(err) 1445 } 1446 1447 select { 1448 case <-failed: 1449 framework.Failf("Connectivity to the load balancer was interrupted") 1450 case <-time.After(1 * time.Minute): 1451 } 1452 } 1453 1454 // setAffinities set PodAntiAffinity across pods from the same generation 1455 // of Deployment and if, explicitly requested, also affinity with pods 1456 // from other generations. 1457 // It is required to make those "Required" so that in large clusters where 1458 // scheduler may not score all nodes if a lot of them are feasible, the 1459 // test will also have a chance to pass. 1460 func setAffinities(d *appsv1.Deployment, setAffinity bool) { 1461 affinity := &v1.Affinity{ 1462 PodAntiAffinity: &v1.PodAntiAffinity{ 1463 RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ 1464 { 1465 TopologyKey: "kubernetes.io/hostname", 1466 LabelSelector: &metav1.LabelSelector{ 1467 MatchExpressions: []metav1.LabelSelectorRequirement{ 1468 { 1469 Key: "name", 1470 Operator: metav1.LabelSelectorOpIn, 1471 Values: []string{d.Spec.Template.Labels["name"]}, 1472 }, 1473 { 1474 Key: "iteration", 1475 Operator: metav1.LabelSelectorOpIn, 1476 Values: []string{d.Spec.Template.Labels["iteration"]}, 1477 }, 1478 }, 1479 }, 1480 }, 1481 }, 1482 }, 1483 } 1484 if setAffinity { 1485 affinity.PodAffinity = &v1.PodAffinity{ 1486 RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ 1487 { 1488 TopologyKey: "kubernetes.io/hostname", 1489 LabelSelector: &metav1.LabelSelector{ 1490 MatchExpressions: []metav1.LabelSelectorRequirement{ 1491 { 1492 Key: "name", 1493 Operator: metav1.LabelSelectorOpIn, 1494 Values: []string{d.Spec.Template.Labels["name"]}, 1495 }, 1496 { 1497 Key: "iteration", 1498 Operator: metav1.LabelSelectorOpNotIn, 1499 Values: []string{d.Spec.Template.Labels["iteration"]}, 1500 }, 1501 }, 1502 }, 1503 }, 1504 }, 1505 } 1506 } 1507 d.Spec.Template.Spec.Affinity = affinity 1508 } 1509 1510 // watchRecreateDeployment watches Recreate deployments and ensures no new pods will run at the same time with 1511 // old pods. 1512 func watchRecreateDeployment(ctx context.Context, c clientset.Interface, d *appsv1.Deployment) error { 1513 if d.Spec.Strategy.Type != appsv1.RecreateDeploymentStrategyType { 1514 return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type) 1515 } 1516 1517 fieldSelector := fields.OneTermEqualSelector("metadata.name", d.Name).String() 1518 w := &cache.ListWatch{ 1519 WatchFunc: func(options metav1.ListOptions) (i watch.Interface, e error) { 1520 options.FieldSelector = fieldSelector 1521 return c.AppsV1().Deployments(d.Namespace).Watch(ctx, options) 1522 }, 1523 } 1524 1525 status := d.Status 1526 1527 condition := func(event watch.Event) (bool, error) { 1528 d := event.Object.(*appsv1.Deployment) 1529 status = d.Status 1530 1531 if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas { 1532 _, allOldRSs, err := testutil.GetOldReplicaSets(d, c) 1533 newRS, nerr := testutil.GetNewReplicaSet(d, c) 1534 if err == nil && nerr == nil { 1535 framework.Logf("%+v", d) 1536 testutil.LogReplicaSetsOfDeployment(d, allOldRSs, newRS, framework.Logf) 1537 testutil.LogPodsOfDeployment(c, d, append(allOldRSs, newRS), framework.Logf) 1538 } 1539 return false, fmt.Errorf("deployment %q is running new pods alongside old pods: %#v", d.Name, status) 1540 } 1541 1542 return *(d.Spec.Replicas) == d.Status.Replicas && 1543 *(d.Spec.Replicas) == d.Status.UpdatedReplicas && 1544 d.Generation <= d.Status.ObservedGeneration, nil 1545 } 1546 1547 ctxUntil, cancel := context.WithTimeout(ctx, 2*time.Minute) 1548 defer cancel() 1549 _, err := watchtools.Until(ctxUntil, d.ResourceVersion, w, condition) 1550 if wait.Interrupted(err) { 1551 err = fmt.Errorf("deployment %q never completed: %#v", d.Name, status) 1552 } 1553 return err 1554 } 1555 1556 // waitForDeploymentOldRSsNum waits for the deployment to clean up old rcs. 1557 func waitForDeploymentOldRSsNum(ctx context.Context, c clientset.Interface, ns, deploymentName string, desiredRSNum int) error { 1558 var oldRSs []*appsv1.ReplicaSet 1559 var d *appsv1.Deployment 1560 1561 pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) { 1562 deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) 1563 if err != nil { 1564 return false, err 1565 } 1566 d = deployment 1567 1568 _, oldRSs, err = testutil.GetOldReplicaSets(deployment, c) 1569 if err != nil { 1570 return false, err 1571 } 1572 return len(oldRSs) == desiredRSNum, nil 1573 }) 1574 if wait.Interrupted(pollErr) { 1575 pollErr = fmt.Errorf("%d old replica sets were not cleaned up for deployment %q", len(oldRSs)-desiredRSNum, deploymentName) 1576 testutil.LogReplicaSetsOfDeployment(d, oldRSs, nil, framework.Logf) 1577 } 1578 return pollErr 1579 } 1580 1581 // waitForReplicaSetDesiredReplicas waits until the replicaset has desired number of replicas. 1582 func waitForReplicaSetDesiredReplicas(ctx context.Context, rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error { 1583 desiredGeneration := replicaSet.Generation 1584 err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, true, func(ctx context.Context) (bool, error) { 1585 rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) 1586 if err != nil { 1587 return false, err 1588 } 1589 return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(replicaSet.Spec.Replicas) && rs.Status.Replicas == *(rs.Spec.Replicas), nil 1590 }) 1591 if wait.Interrupted(err) { 1592 err = fmt.Errorf("replicaset %q never had desired number of replicas", replicaSet.Name) 1593 } 1594 return err 1595 } 1596 1597 // waitForReplicaSetTargetSpecReplicas waits for .spec.replicas of a RS to equal targetReplicaNum 1598 func waitForReplicaSetTargetSpecReplicas(ctx context.Context, c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error { 1599 desiredGeneration := replicaSet.Generation 1600 err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, true, func(ctx context.Context) (bool, error) { 1601 rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(ctx, replicaSet.Name, metav1.GetOptions{}) 1602 if err != nil { 1603 return false, err 1604 } 1605 return rs.Status.ObservedGeneration >= desiredGeneration && *rs.Spec.Replicas == targetReplicaNum, nil 1606 }) 1607 if wait.Interrupted(err) { 1608 err = fmt.Errorf("replicaset %q never had desired number of .spec.replicas", replicaSet.Name) 1609 } 1610 return err 1611 } 1612 1613 // checkDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected. 1614 func checkDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName, revision, image string) error { 1615 return testutil.CheckDeploymentRevisionAndImage(c, ns, deploymentName, revision, image) 1616 } 1617 1618 // waitForObservedDeployment waits for the specified deployment generation. 1619 func waitForObservedDeployment(c clientset.Interface, ns, deploymentName string, desiredGeneration int64) error { 1620 return testutil.WaitForObservedDeployment(c, ns, deploymentName, desiredGeneration) 1621 } 1622 1623 // waitForDeploymentWithCondition waits for the specified deployment condition. 1624 func waitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType appsv1.DeploymentConditionType) error { 1625 return testutil.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, framework.Logf, poll, pollLongTimeout) 1626 } 1627 1628 // waitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times. 1629 // Rolling update strategy should not be broken during a rolling update. 1630 func waitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *appsv1.Deployment) error { 1631 return testutil.WaitForDeploymentCompleteAndCheckRolling(c, d, framework.Logf, poll, pollLongTimeout) 1632 } 1633 1634 // waitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas 1635 func waitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error { 1636 return testutil.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, poll, pollLongTimeout) 1637 } 1638 1639 // Deployment should have a working scale subresource 1640 func testDeploymentSubresources(ctx context.Context, f *framework.Framework) { 1641 ns := f.Namespace.Name 1642 c := f.ClientSet 1643 1644 deploymentName := "test-new-deployment" 1645 framework.Logf("Creating simple deployment %s", deploymentName) 1646 d := e2edeployment.NewDeployment("test-new-deployment", int32(1), map[string]string{"name": WebserverImageName}, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) 1647 deploy, err := c.AppsV1().Deployments(ns).Create(ctx, d, metav1.CreateOptions{}) 1648 framework.ExpectNoError(err) 1649 1650 // Wait for it to be updated to revision 1 1651 err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage) 1652 framework.ExpectNoError(err) 1653 1654 err = e2edeployment.WaitForDeploymentComplete(c, deploy) 1655 framework.ExpectNoError(err) 1656 1657 _, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) 1658 framework.ExpectNoError(err) 1659 1660 ginkgo.By("getting scale subresource") 1661 scale, err := c.AppsV1().Deployments(ns).GetScale(ctx, deploymentName, metav1.GetOptions{}) 1662 if err != nil { 1663 framework.Failf("Failed to get scale subresource: %v", err) 1664 } 1665 gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1))) 1666 gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1))) 1667 1668 ginkgo.By("updating a scale subresource") 1669 scale.ResourceVersion = "" // indicate the scale update should be unconditional 1670 scale.Spec.Replicas = 2 1671 scaleResult, err := c.AppsV1().Deployments(ns).UpdateScale(ctx, deploymentName, scale, metav1.UpdateOptions{}) 1672 if err != nil { 1673 framework.Failf("Failed to put scale subresource: %v", err) 1674 } 1675 gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2))) 1676 1677 ginkgo.By("verifying the deployment Spec.Replicas was modified") 1678 deployment, err := c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) 1679 if err != nil { 1680 framework.Failf("Failed to get deployment resource: %v", err) 1681 } 1682 gomega.Expect(*(deployment.Spec.Replicas)).To(gomega.Equal(int32(2))) 1683 1684 ginkgo.By("Patch a scale subresource") 1685 scale.ResourceVersion = "" // indicate the scale update should be unconditional 1686 scale.Spec.Replicas = 4 // should be 2 after "UpdateScale" operation, now Patch to 4 1687 deploymentScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{ 1688 Spec: autoscalingv1.ScaleSpec{ 1689 Replicas: scale.Spec.Replicas, 1690 }, 1691 }) 1692 framework.ExpectNoError(err, "Could not Marshal JSON for patch payload") 1693 1694 _, err = c.AppsV1().Deployments(ns).Patch(ctx, deploymentName, types.StrategicMergePatchType, []byte(deploymentScalePatchPayload), metav1.PatchOptions{}, "scale") 1695 framework.ExpectNoError(err, "Failed to patch deployment: %v", err) 1696 1697 deployment, err = c.AppsV1().Deployments(ns).Get(ctx, deploymentName, metav1.GetOptions{}) 1698 framework.ExpectNoError(err, "Failed to get deployment resource: %v", err) 1699 gomega.Expect(*(deployment.Spec.Replicas)).To(gomega.Equal(int32(4)), "deployment should have 4 replicas") 1700 }