k8s.io/kubernetes@v1.29.3/test/integration/deployment/util.go (about) 1 /* 2 Copyright 2017 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package deployment 18 19 import ( 20 "context" 21 "fmt" 22 "sync" 23 "testing" 24 "time" 25 26 apps "k8s.io/api/apps/v1" 27 v1 "k8s.io/api/core/v1" 28 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 29 "k8s.io/apimachinery/pkg/util/wait" 30 "k8s.io/client-go/informers" 31 clientset "k8s.io/client-go/kubernetes" 32 restclient "k8s.io/client-go/rest" 33 "k8s.io/klog/v2/ktesting" 34 kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" 35 podutil "k8s.io/kubernetes/pkg/api/v1/pod" 36 "k8s.io/kubernetes/pkg/controller/deployment" 37 deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" 38 "k8s.io/kubernetes/pkg/controller/replicaset" 39 "k8s.io/kubernetes/test/integration/framework" 40 testutil "k8s.io/kubernetes/test/utils" 41 ) 42 43 const ( 44 pollInterval = 100 * time.Millisecond 45 pollTimeout = 60 * time.Second 46 47 fakeContainerName = "fake-name" 48 fakeImage = "fakeimage" 49 ) 50 51 var pauseFn = func(update *apps.Deployment) { 52 update.Spec.Paused = true 53 } 54 55 var resumeFn = func(update *apps.Deployment) { 56 update.Spec.Paused = false 57 } 58 59 type deploymentTester struct { 60 t *testing.T 61 c clientset.Interface 62 deployment *apps.Deployment 63 } 64 65 func testLabels() map[string]string { 66 return map[string]string{"name": "test"} 67 } 68 69 // newDeployment returns a RollingUpdate Deployment with a fake container image 70 func newDeployment(name, ns string, replicas int32) *apps.Deployment { 71 return &apps.Deployment{ 72 TypeMeta: metav1.TypeMeta{ 73 Kind: "Deployment", 74 APIVersion: "apps/v1", 75 }, 76 ObjectMeta: metav1.ObjectMeta{ 77 Namespace: ns, 78 Name: name, 79 }, 80 Spec: apps.DeploymentSpec{ 81 Replicas: &replicas, 82 Selector: &metav1.LabelSelector{MatchLabels: testLabels()}, 83 Strategy: apps.DeploymentStrategy{ 84 Type: apps.RollingUpdateDeploymentStrategyType, 85 RollingUpdate: new(apps.RollingUpdateDeployment), 86 }, 87 Template: v1.PodTemplateSpec{ 88 ObjectMeta: metav1.ObjectMeta{ 89 Labels: testLabels(), 90 }, 91 Spec: v1.PodSpec{ 92 Containers: []v1.Container{ 93 { 94 Name: fakeContainerName, 95 Image: fakeImage, 96 }, 97 }, 98 }, 99 }, 100 }, 101 } 102 } 103 104 // dcSetup sets up necessities for Deployment integration test, including control plane, apiserver, informers, and clientset 105 func dcSetup(ctx context.Context, t *testing.T) (kubeapiservertesting.TearDownFunc, *replicaset.ReplicaSetController, *deployment.DeploymentController, informers.SharedInformerFactory, clientset.Interface) { 106 // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. 107 server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) 108 logger, _ := ktesting.NewTestContext(t) 109 110 config := restclient.CopyConfig(server.ClientConfig) 111 clientSet, err := clientset.NewForConfig(config) 112 if err != nil { 113 t.Fatalf("error in create clientset: %v", err) 114 } 115 resyncPeriod := 12 * time.Hour 116 informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "deployment-informers")), resyncPeriod) 117 118 dc, err := deployment.NewDeploymentController( 119 ctx, 120 informers.Apps().V1().Deployments(), 121 informers.Apps().V1().ReplicaSets(), 122 informers.Core().V1().Pods(), 123 clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "deployment-controller")), 124 ) 125 if err != nil { 126 t.Fatalf("error creating Deployment controller: %v", err) 127 } 128 rm := replicaset.NewReplicaSetController( 129 logger, 130 informers.Apps().V1().ReplicaSets(), 131 informers.Core().V1().Pods(), 132 clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "replicaset-controller")), 133 replicaset.BurstReplicas, 134 ) 135 return server.TearDownFn, rm, dc, informers, clientSet 136 } 137 138 // dcSimpleSetup sets up necessities for Deployment integration test, including control plane, apiserver, 139 // and clientset, but not controllers and informers 140 func dcSimpleSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, clientset.Interface) { 141 // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. 142 server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd()) 143 144 config := restclient.CopyConfig(server.ClientConfig) 145 clientSet, err := clientset.NewForConfig(config) 146 if err != nil { 147 t.Fatalf("error in create clientset: %v", err) 148 } 149 return server.TearDownFn, clientSet 150 } 151 152 // runControllersAndInformers runs RS and deployment controllers and informers 153 func runControllersAndInformers(t *testing.T, rm *replicaset.ReplicaSetController, dc *deployment.DeploymentController, informers informers.SharedInformerFactory) func() { 154 ctx, cancelFn := context.WithCancel(context.Background()) 155 informers.Start(ctx.Done()) 156 go rm.Run(ctx, 5) 157 go dc.Run(ctx, 5) 158 return cancelFn 159 } 160 161 // addPodConditionReady sets given pod status to ready at given time 162 func addPodConditionReady(pod *v1.Pod, time metav1.Time) { 163 pod.Status = v1.PodStatus{ 164 Phase: v1.PodRunning, 165 Conditions: []v1.PodCondition{ 166 { 167 Type: v1.PodReady, 168 Status: v1.ConditionTrue, 169 LastTransitionTime: time, 170 }, 171 }, 172 } 173 } 174 175 func (d *deploymentTester) waitForDeploymentRevisionAndImage(revision, image string) error { 176 if err := testutil.WaitForDeploymentRevisionAndImage(d.c, d.deployment.Namespace, d.deployment.Name, revision, image, d.t.Logf, pollInterval, pollTimeout); err != nil { 177 return fmt.Errorf("failed to wait for Deployment revision %s: %v", d.deployment.Name, err) 178 } 179 return nil 180 } 181 182 func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error { 183 addPodConditionReady(pod, metav1.Now()) 184 _, err := c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) 185 return err 186 } 187 188 // markUpdatedPodsReady manually marks updated Deployment pods status to ready, 189 // until the deployment is complete 190 func (d *deploymentTester) markUpdatedPodsReady(wg *sync.WaitGroup) { 191 defer wg.Done() 192 193 ns := d.deployment.Namespace 194 err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { 195 // We're done when the deployment is complete 196 if completed, err := d.deploymentComplete(); err != nil { 197 return false, err 198 } else if completed { 199 return true, nil 200 } 201 // Otherwise, mark remaining pods as ready 202 pods, err := d.listUpdatedPods() 203 if err != nil { 204 d.t.Log(err) 205 return false, nil 206 } 207 d.t.Logf("%d/%d of deployment pods are created", len(pods), *d.deployment.Spec.Replicas) 208 for i := range pods { 209 pod := pods[i] 210 if podutil.IsPodReady(&pod) { 211 continue 212 } 213 if err = markPodReady(d.c, ns, &pod); err != nil { 214 d.t.Logf("failed to update Deployment pod %s, will retry later: %v", pod.Name, err) 215 } 216 } 217 return false, nil 218 }) 219 if err != nil { 220 d.t.Errorf("failed to mark updated Deployment pods to ready: %v", err) 221 } 222 } 223 224 func (d *deploymentTester) deploymentComplete() (bool, error) { 225 latest, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(context.TODO(), d.deployment.Name, metav1.GetOptions{}) 226 if err != nil { 227 return false, err 228 } 229 return deploymentutil.DeploymentComplete(d.deployment, &latest.Status), nil 230 } 231 232 // Waits for the deployment to complete, and check rolling update strategy isn't broken at any times. 233 // Rolling update strategy should not be broken during a rolling update. 234 func (d *deploymentTester) waitForDeploymentCompleteAndCheckRolling() error { 235 return testutil.WaitForDeploymentCompleteAndCheckRolling(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout) 236 } 237 238 // Waits for the deployment to complete, and don't check if rolling update strategy is broken. 239 // Rolling update strategy is used only during a rolling update, and can be violated in other situations, 240 // such as shortly after a scaling event or the deployment is just created. 241 func (d *deploymentTester) waitForDeploymentComplete() error { 242 return testutil.WaitForDeploymentComplete(d.c, d.deployment, d.t.Logf, pollInterval, pollTimeout) 243 } 244 245 // waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady waits for the Deployment to complete 246 // while marking updated Deployment pods as ready at the same time. 247 // Uses hard check to make sure rolling update strategy is not violated at any times. 248 func (d *deploymentTester) waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady() error { 249 var wg sync.WaitGroup 250 251 // Manually mark updated Deployment pods as ready in a separate goroutine 252 wg.Add(1) 253 go d.markUpdatedPodsReady(&wg) 254 // Wait for goroutine to finish, for all return paths. 255 defer wg.Wait() 256 257 // Wait for the Deployment status to complete while Deployment pods are becoming ready 258 err := d.waitForDeploymentCompleteAndCheckRolling() 259 if err != nil { 260 return fmt.Errorf("failed to wait for Deployment %s to complete: %v", d.deployment.Name, err) 261 } 262 263 return nil 264 } 265 266 // waitForDeploymentCompleteAndMarkPodsReady waits for the Deployment to complete 267 // while marking updated Deployment pods as ready at the same time. 268 func (d *deploymentTester) waitForDeploymentCompleteAndMarkPodsReady() error { 269 var wg sync.WaitGroup 270 271 // Manually mark updated Deployment pods as ready in a separate goroutine 272 wg.Add(1) 273 go d.markUpdatedPodsReady(&wg) 274 275 // Wait for the Deployment status to complete using soft check, while Deployment pods are becoming ready 276 err := d.waitForDeploymentComplete() 277 if err != nil { 278 return fmt.Errorf("failed to wait for Deployment status %s: %v", d.deployment.Name, err) 279 } 280 281 // Wait for goroutine to finish 282 wg.Wait() 283 284 return nil 285 } 286 287 func (d *deploymentTester) updateDeployment(applyUpdate testutil.UpdateDeploymentFunc) (*apps.Deployment, error) { 288 return testutil.UpdateDeploymentWithRetries(d.c, d.deployment.Namespace, d.deployment.Name, applyUpdate, d.t.Logf, pollInterval, pollTimeout) 289 } 290 291 func (d *deploymentTester) waitForObservedDeployment(desiredGeneration int64) error { 292 if err := testutil.WaitForObservedDeployment(d.c, d.deployment.Namespace, d.deployment.Name, desiredGeneration); err != nil { 293 return fmt.Errorf("failed waiting for ObservedGeneration of deployment %s to become %d: %v", d.deployment.Name, desiredGeneration, err) 294 } 295 return nil 296 } 297 298 func (d *deploymentTester) getNewReplicaSet() (*apps.ReplicaSet, error) { 299 deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(context.TODO(), d.deployment.Name, metav1.GetOptions{}) 300 if err != nil { 301 return nil, fmt.Errorf("failed retrieving deployment %s: %v", d.deployment.Name, err) 302 } 303 rs, err := testutil.GetNewReplicaSet(deployment, d.c) 304 if err != nil { 305 return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err) 306 } 307 return rs, nil 308 } 309 310 func (d *deploymentTester) expectNoNewReplicaSet() error { 311 rs, err := d.getNewReplicaSet() 312 if err != nil { 313 return err 314 } 315 if rs != nil { 316 return fmt.Errorf("expected deployment %s not to create a new replicaset, got %v", d.deployment.Name, rs) 317 } 318 return nil 319 } 320 321 func (d *deploymentTester) expectNewReplicaSet() (*apps.ReplicaSet, error) { 322 rs, err := d.getNewReplicaSet() 323 if err != nil { 324 return nil, err 325 } 326 if rs == nil { 327 return nil, fmt.Errorf("expected deployment %s to create a new replicaset, got nil", d.deployment.Name) 328 } 329 return rs, nil 330 } 331 332 func (d *deploymentTester) updateReplicaSet(name string, applyUpdate testutil.UpdateReplicaSetFunc) (*apps.ReplicaSet, error) { 333 return testutil.UpdateReplicaSetWithRetries(d.c, d.deployment.Namespace, name, applyUpdate, d.t.Logf, pollInterval, pollTimeout) 334 } 335 336 func (d *deploymentTester) waitForDeploymentUpdatedReplicasGTE(minUpdatedReplicas int32) error { 337 return testutil.WaitForDeploymentUpdatedReplicasGTE(d.c, d.deployment.Namespace, d.deployment.Name, minUpdatedReplicas, d.deployment.Generation, pollInterval, pollTimeout) 338 } 339 340 func (d *deploymentTester) waitForDeploymentWithCondition(reason string, condType apps.DeploymentConditionType) error { 341 return testutil.WaitForDeploymentWithCondition(d.c, d.deployment.Namespace, d.deployment.Name, reason, condType, d.t.Logf, pollInterval, pollTimeout) 342 } 343 344 func (d *deploymentTester) listUpdatedPods() ([]v1.Pod, error) { 345 selector, err := metav1.LabelSelectorAsSelector(d.deployment.Spec.Selector) 346 if err != nil { 347 return nil, fmt.Errorf("failed to parse deployment selector: %v", err) 348 } 349 pods, err := d.c.CoreV1().Pods(d.deployment.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) 350 if err != nil { 351 return nil, fmt.Errorf("failed to list deployment pods, will retry later: %v", err) 352 } 353 newRS, err := d.getNewReplicaSet() 354 if err != nil { 355 return nil, fmt.Errorf("failed to get new replicaset of deployment %q: %v", d.deployment.Name, err) 356 } 357 if newRS == nil { 358 return nil, fmt.Errorf("unable to find new replicaset of deployment %q", d.deployment.Name) 359 } 360 361 var ownedPods []v1.Pod 362 for _, pod := range pods.Items { 363 rs := metav1.GetControllerOf(&pod) 364 if rs.UID == newRS.UID { 365 ownedPods = append(ownedPods, pod) 366 } 367 } 368 return ownedPods, nil 369 } 370 371 func (d *deploymentTester) waitRSStable(replicaset *apps.ReplicaSet) error { 372 return testutil.WaitRSStable(d.t, d.c, replicaset, pollInterval, pollTimeout) 373 } 374 375 func (d *deploymentTester) scaleDeployment(newReplicas int32) error { 376 var err error 377 d.deployment, err = d.updateDeployment(func(update *apps.Deployment) { 378 update.Spec.Replicas = &newReplicas 379 }) 380 if err != nil { 381 return fmt.Errorf("failed updating deployment %q: %v", d.deployment.Name, err) 382 } 383 384 if err := d.waitForDeploymentCompleteAndMarkPodsReady(); err != nil { 385 return err 386 } 387 388 rs, err := d.expectNewReplicaSet() 389 if err != nil { 390 return err 391 } 392 if *rs.Spec.Replicas != newReplicas { 393 return fmt.Errorf("expected new replicaset replicas = %d, got %d", newReplicas, *rs.Spec.Replicas) 394 } 395 return nil 396 } 397 398 // waitForReadyReplicas waits for number of ready replicas to equal number of replicas. 399 func (d *deploymentTester) waitForReadyReplicas() error { 400 if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { 401 deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(context.TODO(), d.deployment.Name, metav1.GetOptions{}) 402 if err != nil { 403 return false, fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err) 404 } 405 return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas, nil 406 }); err != nil { 407 return fmt.Errorf("failed to wait for .readyReplicas to equal .replicas: %v", err) 408 } 409 return nil 410 } 411 412 // markUpdatedPodsReadyWithoutComplete marks updated Deployment pods as ready without waiting for deployment to complete. 413 func (d *deploymentTester) markUpdatedPodsReadyWithoutComplete() error { 414 if err := wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { 415 pods, err := d.listUpdatedPods() 416 if err != nil { 417 return false, err 418 } 419 for i := range pods { 420 pod := pods[i] 421 if podutil.IsPodReady(&pod) { 422 continue 423 } 424 if err = markPodReady(d.c, d.deployment.Namespace, &pod); err != nil { 425 d.t.Logf("failed to update Deployment pod %q, will retry later: %v", pod.Name, err) 426 return false, nil 427 } 428 } 429 return true, nil 430 }); err != nil { 431 return fmt.Errorf("failed to mark all updated pods as ready: %v", err) 432 } 433 return nil 434 } 435 436 // Verify all replicas fields of DeploymentStatus have desired count. 437 // Immediately return an error when found a non-matching replicas field. 438 func (d *deploymentTester) checkDeploymentStatusReplicasFields(replicas, updatedReplicas, readyReplicas, availableReplicas, unavailableReplicas int32) error { 439 deployment, err := d.c.AppsV1().Deployments(d.deployment.Namespace).Get(context.TODO(), d.deployment.Name, metav1.GetOptions{}) 440 if err != nil { 441 return fmt.Errorf("failed to get deployment %q: %v", d.deployment.Name, err) 442 } 443 if deployment.Status.Replicas != replicas { 444 return fmt.Errorf("unexpected .replicas: expect %d, got %d", replicas, deployment.Status.Replicas) 445 } 446 if deployment.Status.UpdatedReplicas != updatedReplicas { 447 return fmt.Errorf("unexpected .updatedReplicas: expect %d, got %d", updatedReplicas, deployment.Status.UpdatedReplicas) 448 } 449 if deployment.Status.ReadyReplicas != readyReplicas { 450 return fmt.Errorf("unexpected .readyReplicas: expect %d, got %d", readyReplicas, deployment.Status.ReadyReplicas) 451 } 452 if deployment.Status.AvailableReplicas != availableReplicas { 453 return fmt.Errorf("unexpected .replicas: expect %d, got %d", availableReplicas, deployment.Status.AvailableReplicas) 454 } 455 if deployment.Status.UnavailableReplicas != unavailableReplicas { 456 return fmt.Errorf("unexpected .replicas: expect %d, got %d", unavailableReplicas, deployment.Status.UnavailableReplicas) 457 } 458 return nil 459 }