sigs.k8s.io/cluster-api@v1.6.3/internal/controllers/topology/cluster/conditions_test.go (about) 1 /* 2 Copyright 2021 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package cluster 18 19 import ( 20 "testing" 21 22 . "github.com/onsi/gomega" 23 "github.com/pkg/errors" 24 corev1 "k8s.io/api/core/v1" 25 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 "k8s.io/apimachinery/pkg/runtime" 27 "sigs.k8s.io/controller-runtime/pkg/client" 28 "sigs.k8s.io/controller-runtime/pkg/client/fake" 29 30 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 31 expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" 32 runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" 33 "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" 34 "sigs.k8s.io/cluster-api/internal/test/builder" 35 "sigs.k8s.io/cluster-api/util/conditions" 36 ) 37 38 func TestReconcileTopologyReconciledCondition(t *testing.T) { 39 g := NewWithT(t) 40 scheme := runtime.NewScheme() 41 g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) 42 g.Expect(expv1.AddToScheme(scheme)).To(Succeed()) 43 44 deletionTime := metav1.Unix(0, 0) 45 tests := []struct { 46 name string 47 reconcileErr error 48 s *scope.Scope 49 cluster *clusterv1.Cluster 50 machines []*clusterv1.Machine 51 wantConditionStatus corev1.ConditionStatus 52 wantConditionReason string 53 wantConditionMessage string 54 wantErr bool 55 }{ 56 { 57 name: "should set the condition to false if there is a reconcile error", 58 reconcileErr: errors.New("reconcile error"), 59 cluster: &clusterv1.Cluster{}, 60 wantConditionStatus: corev1.ConditionFalse, 61 wantConditionReason: clusterv1.TopologyReconcileFailedReason, 62 wantConditionMessage: "reconcile error", 63 wantErr: false, 64 }, 65 { 66 name: "should set the condition to false if the ClusterClass is out of date", 67 cluster: &clusterv1.Cluster{}, 68 s: &scope.Scope{ 69 Blueprint: &scope.ClusterBlueprint{ 70 ClusterClass: &clusterv1.ClusterClass{ 71 ObjectMeta: metav1.ObjectMeta{ 72 Name: "class1", 73 Generation: 10, 74 }, 75 Status: clusterv1.ClusterClassStatus{ 76 ObservedGeneration: 999, 77 }, 78 }, 79 }, 80 }, 81 wantConditionStatus: corev1.ConditionFalse, 82 wantConditionReason: clusterv1.TopologyReconciledClusterClassNotReconciledReason, 83 wantConditionMessage: "ClusterClass not reconciled. If this condition persists please check ClusterClass status. A ClusterClass is reconciled if" + 84 ".status.observedGeneration == .metadata.generation is true. If this is not the case either ClusterClass reconciliation failed or the ClusterClass is paused", 85 wantErr: false, 86 }, 87 { 88 name: "should set the condition to false if the there is a blocking hook", 89 reconcileErr: nil, 90 cluster: &clusterv1.Cluster{}, 91 s: &scope.Scope{ 92 HookResponseTracker: func() *scope.HookResponseTracker { 93 hrt := scope.NewHookResponseTracker() 94 hrt.Add(runtimehooksv1.BeforeClusterUpgrade, &runtimehooksv1.BeforeClusterUpgradeResponse{ 95 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 96 CommonResponse: runtimehooksv1.CommonResponse{ 97 Message: "msg", 98 }, 99 RetryAfterSeconds: int32(10), 100 }, 101 }) 102 return hrt 103 }(), 104 }, 105 wantConditionStatus: corev1.ConditionFalse, 106 wantConditionReason: clusterv1.TopologyReconciledHookBlockingReason, 107 wantConditionMessage: "hook \"BeforeClusterUpgrade\" is blocking: msg", 108 }, 109 { 110 name: "should set the condition to false if new version is not picked up because control plane is provisioning", 111 reconcileErr: nil, 112 cluster: &clusterv1.Cluster{}, 113 s: &scope.Scope{ 114 Blueprint: &scope.ClusterBlueprint{ 115 Topology: &clusterv1.Topology{ 116 Version: "v1.22.0", 117 }, 118 }, 119 Current: &scope.ClusterState{ 120 Cluster: &clusterv1.Cluster{}, 121 ControlPlane: &scope.ControlPlaneState{ 122 Object: builder.ControlPlane("ns1", "controlplane1"). 123 WithVersion("v1.21.2"). 124 Build(), 125 }, 126 }, 127 UpgradeTracker: func() *scope.UpgradeTracker { 128 ut := scope.NewUpgradeTracker() 129 ut.ControlPlane.IsPendingUpgrade = true 130 ut.ControlPlane.IsProvisioning = true 131 return ut 132 }(), 133 HookResponseTracker: scope.NewHookResponseTracker(), 134 }, 135 wantConditionStatus: corev1.ConditionFalse, 136 wantConditionReason: clusterv1.TopologyReconciledControlPlaneUpgradePendingReason, 137 wantConditionMessage: "Control plane rollout and upgrade to version v1.22.0 on hold. Control plane is completing initial provisioning", 138 }, 139 { 140 name: "should set the condition to false if new version is not picked up because control plane is upgrading", 141 reconcileErr: nil, 142 cluster: &clusterv1.Cluster{}, 143 s: &scope.Scope{ 144 Blueprint: &scope.ClusterBlueprint{ 145 Topology: &clusterv1.Topology{ 146 Version: "v1.22.0", 147 }, 148 }, 149 Current: &scope.ClusterState{ 150 Cluster: &clusterv1.Cluster{}, 151 ControlPlane: &scope.ControlPlaneState{ 152 Object: builder.ControlPlane("ns1", "controlplane1"). 153 WithVersion("v1.21.2"). 154 WithReplicas(3). 155 Build(), 156 }, 157 }, 158 UpgradeTracker: func() *scope.UpgradeTracker { 159 ut := scope.NewUpgradeTracker() 160 ut.ControlPlane.IsPendingUpgrade = true 161 ut.ControlPlane.IsUpgrading = true 162 return ut 163 }(), 164 HookResponseTracker: scope.NewHookResponseTracker(), 165 }, 166 wantConditionStatus: corev1.ConditionFalse, 167 wantConditionReason: clusterv1.TopologyReconciledControlPlaneUpgradePendingReason, 168 wantConditionMessage: "Control plane rollout and upgrade to version v1.22.0 on hold. Control plane is upgrading to version v1.21.2", 169 }, 170 { 171 name: "should set the condition to false if new version is not picked up because control plane is scaling", 172 reconcileErr: nil, 173 cluster: &clusterv1.Cluster{}, 174 s: &scope.Scope{ 175 Blueprint: &scope.ClusterBlueprint{ 176 Topology: &clusterv1.Topology{ 177 Version: "v1.22.0", 178 }, 179 }, 180 Current: &scope.ClusterState{ 181 Cluster: &clusterv1.Cluster{}, 182 ControlPlane: &scope.ControlPlaneState{ 183 Object: builder.ControlPlane("ns1", "controlplane1"). 184 WithVersion("v1.21.2"). 185 WithReplicas(3). 186 Build(), 187 }, 188 }, 189 UpgradeTracker: func() *scope.UpgradeTracker { 190 ut := scope.NewUpgradeTracker() 191 ut.ControlPlane.IsPendingUpgrade = true 192 ut.ControlPlane.IsScaling = true 193 return ut 194 }(), 195 HookResponseTracker: scope.NewHookResponseTracker(), 196 }, 197 wantConditionStatus: corev1.ConditionFalse, 198 wantConditionReason: clusterv1.TopologyReconciledControlPlaneUpgradePendingReason, 199 wantConditionMessage: "Control plane rollout and upgrade to version v1.22.0 on hold. Control plane is reconciling desired replicas", 200 }, 201 { 202 name: "should set the condition to false if new version is not picked up because at least one of the machine deployment is upgrading", 203 reconcileErr: nil, 204 cluster: &clusterv1.Cluster{}, 205 s: &scope.Scope{ 206 Blueprint: &scope.ClusterBlueprint{ 207 Topology: &clusterv1.Topology{ 208 Version: "v1.22.0", 209 }, 210 }, 211 Current: &scope.ClusterState{ 212 Cluster: &clusterv1.Cluster{}, 213 ControlPlane: &scope.ControlPlaneState{ 214 Object: builder.ControlPlane("ns1", "controlplane1"). 215 WithVersion("v1.21.2"). 216 WithReplicas(3). 217 Build(), 218 }, 219 MachineDeployments: scope.MachineDeploymentsStateMap{ 220 "md0": &scope.MachineDeploymentState{ 221 Object: builder.MachineDeployment("ns1", "md0-abc123"). 222 WithReplicas(2). 223 WithStatus(clusterv1.MachineDeploymentStatus{ 224 Replicas: int32(1), 225 UpdatedReplicas: int32(1), 226 ReadyReplicas: int32(1), 227 AvailableReplicas: int32(1), 228 UnavailableReplicas: int32(0), 229 }). 230 Build(), 231 }, 232 }, 233 }, 234 UpgradeTracker: func() *scope.UpgradeTracker { 235 ut := scope.NewUpgradeTracker() 236 ut.ControlPlane.IsPendingUpgrade = true 237 ut.MachineDeployments.MarkUpgrading("md0-abc123") 238 return ut 239 }(), 240 HookResponseTracker: scope.NewHookResponseTracker(), 241 }, 242 wantConditionStatus: corev1.ConditionFalse, 243 wantConditionReason: clusterv1.TopologyReconciledControlPlaneUpgradePendingReason, 244 wantConditionMessage: "Control plane rollout and upgrade to version v1.22.0 on hold. MachineDeployment(s) md0-abc123 are upgrading", 245 }, 246 { 247 name: "should set the condition to false if new version is not picked up because at least one of the machine pool is upgrading", 248 reconcileErr: nil, 249 cluster: &clusterv1.Cluster{}, 250 s: &scope.Scope{ 251 Blueprint: &scope.ClusterBlueprint{ 252 Topology: &clusterv1.Topology{ 253 Version: "v1.22.0", 254 }, 255 }, 256 Current: &scope.ClusterState{ 257 Cluster: &clusterv1.Cluster{}, 258 ControlPlane: &scope.ControlPlaneState{ 259 Object: builder.ControlPlane("ns1", "controlplane1"). 260 WithVersion("v1.21.2"). 261 WithReplicas(3). 262 Build(), 263 }, 264 MachinePools: scope.MachinePoolsStateMap{ 265 "mp0": &scope.MachinePoolState{ 266 Object: builder.MachinePool("ns1", "mp0-abc123"). 267 WithReplicas(2). 268 WithStatus(expv1.MachinePoolStatus{ 269 Replicas: int32(1), 270 ReadyReplicas: int32(1), 271 AvailableReplicas: int32(1), 272 UnavailableReplicas: int32(0), 273 }). 274 Build(), 275 }, 276 }, 277 }, 278 UpgradeTracker: func() *scope.UpgradeTracker { 279 ut := scope.NewUpgradeTracker() 280 ut.ControlPlane.IsPendingUpgrade = true 281 ut.MachinePools.MarkUpgrading("mp0-abc123") 282 return ut 283 }(), 284 HookResponseTracker: scope.NewHookResponseTracker(), 285 }, 286 wantConditionStatus: corev1.ConditionFalse, 287 wantConditionReason: clusterv1.TopologyReconciledControlPlaneUpgradePendingReason, 288 wantConditionMessage: "Control plane rollout and upgrade to version v1.22.0 on hold. MachinePool(s) mp0-abc123 are upgrading", 289 }, 290 { 291 name: "should set the condition to false if control plane picked the new version but machine deployments did not because control plane is upgrading", 292 reconcileErr: nil, 293 cluster: &clusterv1.Cluster{}, 294 s: &scope.Scope{ 295 Blueprint: &scope.ClusterBlueprint{ 296 Topology: &clusterv1.Topology{ 297 Version: "v1.22.0", 298 }, 299 }, 300 Current: &scope.ClusterState{ 301 Cluster: &clusterv1.Cluster{}, 302 ControlPlane: &scope.ControlPlaneState{ 303 Object: builder.ControlPlane("ns1", "controlplane1"). 304 WithVersion("v1.22.0"). 305 WithReplicas(3). 306 Build(), 307 }, 308 MachineDeployments: scope.MachineDeploymentsStateMap{ 309 "md0": &scope.MachineDeploymentState{ 310 Object: builder.MachineDeployment("ns1", "md0-abc123"). 311 WithReplicas(2). 312 WithStatus(clusterv1.MachineDeploymentStatus{ 313 Replicas: int32(2), 314 UpdatedReplicas: int32(2), 315 ReadyReplicas: int32(2), 316 AvailableReplicas: int32(2), 317 UnavailableReplicas: int32(0), 318 }). 319 Build(), 320 }, 321 }, 322 }, 323 UpgradeTracker: func() *scope.UpgradeTracker { 324 ut := scope.NewUpgradeTracker() 325 ut.ControlPlane.IsPendingUpgrade = false 326 ut.ControlPlane.IsUpgrading = true 327 ut.MachineDeployments.MarkPendingUpgrade("md0-abc123") 328 return ut 329 }(), 330 HookResponseTracker: scope.NewHookResponseTracker(), 331 }, 332 wantConditionStatus: corev1.ConditionFalse, 333 wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsUpgradePendingReason, 334 wantConditionMessage: "MachineDeployment(s) md0-abc123 rollout and upgrade to version v1.22.0 on hold. Control plane is upgrading to version v1.22.0", 335 }, 336 { 337 name: "should set the condition to false if control plane picked the new version but machine pools did not because control plane is upgrading", 338 reconcileErr: nil, 339 cluster: &clusterv1.Cluster{}, 340 s: &scope.Scope{ 341 Blueprint: &scope.ClusterBlueprint{ 342 Topology: &clusterv1.Topology{ 343 Version: "v1.22.0", 344 }, 345 }, 346 Current: &scope.ClusterState{ 347 Cluster: &clusterv1.Cluster{}, 348 ControlPlane: &scope.ControlPlaneState{ 349 Object: builder.ControlPlane("ns1", "controlplane1"). 350 WithVersion("v1.22.0"). 351 WithReplicas(3). 352 Build(), 353 }, 354 MachinePools: scope.MachinePoolsStateMap{ 355 "mp0": &scope.MachinePoolState{ 356 Object: builder.MachinePool("ns1", "mp0-abc123"). 357 WithReplicas(2). 358 WithStatus(expv1.MachinePoolStatus{ 359 Replicas: int32(2), 360 ReadyReplicas: int32(2), 361 AvailableReplicas: int32(2), 362 UnavailableReplicas: int32(0), 363 }). 364 Build(), 365 }, 366 }, 367 }, 368 UpgradeTracker: func() *scope.UpgradeTracker { 369 ut := scope.NewUpgradeTracker() 370 ut.ControlPlane.IsPendingUpgrade = false 371 ut.ControlPlane.IsUpgrading = true 372 ut.MachinePools.MarkPendingUpgrade("mp0-abc123") 373 return ut 374 }(), 375 HookResponseTracker: scope.NewHookResponseTracker(), 376 }, 377 wantConditionStatus: corev1.ConditionFalse, 378 wantConditionReason: clusterv1.TopologyReconciledMachinePoolsUpgradePendingReason, 379 wantConditionMessage: "MachinePool(s) mp0-abc123 rollout and upgrade to version v1.22.0 on hold. Control plane is upgrading to version v1.22.0", 380 }, 381 { 382 name: "should set the condition to false if control plane picked the new version but machine deployments did not because control plane is scaling", 383 reconcileErr: nil, 384 cluster: &clusterv1.Cluster{}, 385 s: &scope.Scope{ 386 Blueprint: &scope.ClusterBlueprint{ 387 Topology: &clusterv1.Topology{ 388 Version: "v1.22.0", 389 }, 390 }, 391 Current: &scope.ClusterState{ 392 Cluster: &clusterv1.Cluster{}, 393 ControlPlane: &scope.ControlPlaneState{ 394 Object: builder.ControlPlane("ns1", "controlplane1"). 395 WithVersion("v1.22.0"). 396 WithReplicas(3). 397 Build(), 398 }, 399 MachineDeployments: scope.MachineDeploymentsStateMap{ 400 "md0": &scope.MachineDeploymentState{ 401 Object: builder.MachineDeployment("ns1", "md0-abc123"). 402 WithReplicas(2). 403 WithStatus(clusterv1.MachineDeploymentStatus{ 404 Replicas: int32(2), 405 UpdatedReplicas: int32(2), 406 ReadyReplicas: int32(2), 407 AvailableReplicas: int32(2), 408 UnavailableReplicas: int32(0), 409 }). 410 Build(), 411 }, 412 }, 413 }, 414 UpgradeTracker: func() *scope.UpgradeTracker { 415 ut := scope.NewUpgradeTracker() 416 ut.ControlPlane.IsPendingUpgrade = false 417 ut.ControlPlane.IsScaling = true 418 ut.MachineDeployments.MarkPendingUpgrade("md0-abc123") 419 return ut 420 }(), 421 HookResponseTracker: scope.NewHookResponseTracker(), 422 }, 423 wantConditionStatus: corev1.ConditionFalse, 424 wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsUpgradePendingReason, 425 wantConditionMessage: "MachineDeployment(s) md0-abc123 rollout and upgrade to version v1.22.0 on hold. Control plane is reconciling desired replicas", 426 }, 427 { 428 name: "should set the condition to false if control plane picked the new version but machine pools did not because control plane is scaling", 429 reconcileErr: nil, 430 cluster: &clusterv1.Cluster{}, 431 s: &scope.Scope{ 432 Blueprint: &scope.ClusterBlueprint{ 433 Topology: &clusterv1.Topology{ 434 Version: "v1.22.0", 435 }, 436 }, 437 Current: &scope.ClusterState{ 438 Cluster: &clusterv1.Cluster{}, 439 ControlPlane: &scope.ControlPlaneState{ 440 Object: builder.ControlPlane("ns1", "controlplane1"). 441 WithVersion("v1.22.0"). 442 WithReplicas(3). 443 Build(), 444 }, 445 MachinePools: scope.MachinePoolsStateMap{ 446 "mp0": &scope.MachinePoolState{ 447 Object: builder.MachinePool("ns1", "mp0-abc123"). 448 WithReplicas(2). 449 WithStatus(expv1.MachinePoolStatus{ 450 Replicas: int32(2), 451 ReadyReplicas: int32(2), 452 AvailableReplicas: int32(2), 453 UnavailableReplicas: int32(0), 454 }). 455 Build(), 456 }, 457 }, 458 }, 459 UpgradeTracker: func() *scope.UpgradeTracker { 460 ut := scope.NewUpgradeTracker() 461 ut.ControlPlane.IsPendingUpgrade = false 462 ut.ControlPlane.IsScaling = true 463 ut.MachinePools.MarkPendingUpgrade("mp0-abc123") 464 return ut 465 }(), 466 HookResponseTracker: scope.NewHookResponseTracker(), 467 }, 468 wantConditionStatus: corev1.ConditionFalse, 469 wantConditionReason: clusterv1.TopologyReconciledMachinePoolsUpgradePendingReason, 470 wantConditionMessage: "MachinePool(s) mp0-abc123 rollout and upgrade to version v1.22.0 on hold. Control plane is reconciling desired replicas", 471 }, 472 { 473 name: "should set the condition to false if control plane picked the new version but there are machine deployments pending create because control plane is scaling", 474 reconcileErr: nil, 475 cluster: &clusterv1.Cluster{}, 476 s: &scope.Scope{ 477 Blueprint: &scope.ClusterBlueprint{ 478 Topology: &clusterv1.Topology{ 479 Version: "v1.22.0", 480 }, 481 }, 482 Current: &scope.ClusterState{ 483 Cluster: &clusterv1.Cluster{}, 484 ControlPlane: &scope.ControlPlaneState{ 485 Object: builder.ControlPlane("ns1", "controlplane1"). 486 WithVersion("v1.22.0"). 487 WithReplicas(3). 488 Build(), 489 }, 490 }, 491 UpgradeTracker: func() *scope.UpgradeTracker { 492 ut := scope.NewUpgradeTracker() 493 ut.ControlPlane.IsPendingUpgrade = false 494 ut.ControlPlane.IsScaling = true 495 ut.MachineDeployments.MarkPendingCreate("md0") 496 return ut 497 }(), 498 HookResponseTracker: scope.NewHookResponseTracker(), 499 }, 500 wantConditionStatus: corev1.ConditionFalse, 501 wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsCreatePendingReason, 502 wantConditionMessage: "MachineDeployment(s) for Topologies md0 creation on hold. Control plane is reconciling desired replicas", 503 }, 504 { 505 name: "should set the condition to false if control plane picked the new version but there are machine pools pending create because control plane is scaling", 506 reconcileErr: nil, 507 cluster: &clusterv1.Cluster{}, 508 s: &scope.Scope{ 509 Blueprint: &scope.ClusterBlueprint{ 510 Topology: &clusterv1.Topology{ 511 Version: "v1.22.0", 512 }, 513 }, 514 Current: &scope.ClusterState{ 515 Cluster: &clusterv1.Cluster{}, 516 ControlPlane: &scope.ControlPlaneState{ 517 Object: builder.ControlPlane("ns1", "controlplane1"). 518 WithVersion("v1.22.0"). 519 WithReplicas(3). 520 Build(), 521 }, 522 }, 523 UpgradeTracker: func() *scope.UpgradeTracker { 524 ut := scope.NewUpgradeTracker() 525 ut.ControlPlane.IsPendingUpgrade = false 526 ut.ControlPlane.IsScaling = true 527 ut.MachinePools.MarkPendingCreate("mp0") 528 return ut 529 }(), 530 HookResponseTracker: scope.NewHookResponseTracker(), 531 }, 532 wantConditionStatus: corev1.ConditionFalse, 533 wantConditionReason: clusterv1.TopologyReconciledMachinePoolsCreatePendingReason, 534 wantConditionMessage: "MachinePool(s) for Topologies mp0 creation on hold. Control plane is reconciling desired replicas", 535 }, 536 { 537 name: "should set the condition to true if control plane picked the new version and is upgrading but there are no machine deployments or machine pools", 538 reconcileErr: nil, 539 cluster: &clusterv1.Cluster{}, 540 s: &scope.Scope{ 541 Blueprint: &scope.ClusterBlueprint{ 542 Topology: &clusterv1.Topology{ 543 Version: "v1.22.0", 544 }, 545 }, 546 Current: &scope.ClusterState{ 547 Cluster: &clusterv1.Cluster{}, 548 ControlPlane: &scope.ControlPlaneState{ 549 Object: builder.ControlPlane("ns1", "controlplane1"). 550 WithVersion("v1.22.0"). 551 WithReplicas(3). 552 Build(), 553 }, 554 }, 555 UpgradeTracker: func() *scope.UpgradeTracker { 556 ut := scope.NewUpgradeTracker() 557 ut.ControlPlane.IsPendingUpgrade = false 558 ut.ControlPlane.IsUpgrading = true 559 return ut 560 }(), 561 HookResponseTracker: scope.NewHookResponseTracker(), 562 }, 563 wantConditionStatus: corev1.ConditionTrue, 564 }, 565 { 566 name: "should set the condition to true if control plane picked the new version and is scaling but there are no machine deployments or machine pools", 567 reconcileErr: nil, 568 cluster: &clusterv1.Cluster{}, 569 s: &scope.Scope{ 570 Blueprint: &scope.ClusterBlueprint{ 571 Topology: &clusterv1.Topology{ 572 Version: "v1.22.0", 573 }, 574 }, 575 Current: &scope.ClusterState{ 576 Cluster: &clusterv1.Cluster{}, 577 ControlPlane: &scope.ControlPlaneState{ 578 Object: builder.ControlPlane("ns1", "controlplane1"). 579 WithVersion("v1.22.0"). 580 WithReplicas(3). 581 Build(), 582 }, 583 }, 584 UpgradeTracker: func() *scope.UpgradeTracker { 585 ut := scope.NewUpgradeTracker() 586 ut.ControlPlane.IsPendingUpgrade = false 587 ut.ControlPlane.IsScaling = true 588 return ut 589 }(), 590 HookResponseTracker: scope.NewHookResponseTracker(), 591 }, 592 wantConditionStatus: corev1.ConditionTrue, 593 }, 594 { 595 name: "should set the condition to false is some machine deployments have not picked the new version because other machine deployments are upgrading", 596 reconcileErr: nil, 597 cluster: &clusterv1.Cluster{}, 598 s: &scope.Scope{ 599 Blueprint: &scope.ClusterBlueprint{ 600 Topology: &clusterv1.Topology{ 601 Version: "v1.22.0", 602 }, 603 }, 604 Current: &scope.ClusterState{ 605 Cluster: &clusterv1.Cluster{}, 606 ControlPlane: &scope.ControlPlaneState{ 607 Object: builder.ControlPlane("ns1", "controlplane1"). 608 WithVersion("v1.22.0"). 609 WithReplicas(3). 610 Build(), 611 }, 612 MachineDeployments: scope.MachineDeploymentsStateMap{ 613 "md0": &scope.MachineDeploymentState{ 614 Object: builder.MachineDeployment("ns1", "md0-abc123"). 615 WithReplicas(2). 616 WithVersion("v1.22.0"). 617 WithSelector(metav1.LabelSelector{ 618 MatchLabels: map[string]string{ 619 clusterv1.ClusterTopologyMachineDeploymentNameLabel: "md0", 620 }, 621 }). 622 WithStatus(clusterv1.MachineDeploymentStatus{ 623 // MD is not ready because we don't have 2 updated, ready and available replicas. 624 Replicas: int32(2), 625 UpdatedReplicas: int32(1), 626 ReadyReplicas: int32(1), 627 AvailableReplicas: int32(1), 628 UnavailableReplicas: int32(0), 629 }). 630 Build(), 631 }, 632 "md1": &scope.MachineDeploymentState{ 633 Object: builder.MachineDeployment("ns1", "md1-abc123"). 634 WithReplicas(2). 635 WithVersion("v1.21.2"). 636 WithSelector(metav1.LabelSelector{ 637 MatchLabels: map[string]string{ 638 clusterv1.ClusterTopologyMachineDeploymentNameLabel: "md1", 639 }, 640 }). 641 WithStatus(clusterv1.MachineDeploymentStatus{ 642 Replicas: int32(2), 643 UpdatedReplicas: int32(2), 644 ReadyReplicas: int32(2), 645 AvailableReplicas: int32(2), 646 UnavailableReplicas: int32(0), 647 }). 648 Build(), 649 }, 650 }, 651 }, 652 UpgradeTracker: func() *scope.UpgradeTracker { 653 ut := scope.NewUpgradeTracker() 654 ut.ControlPlane.IsPendingUpgrade = false 655 ut.MachineDeployments.MarkUpgrading("md0-abc123") 656 ut.MachineDeployments.MarkPendingUpgrade("md1-abc123") 657 return ut 658 }(), 659 HookResponseTracker: scope.NewHookResponseTracker(), 660 }, 661 machines: []*clusterv1.Machine{ 662 builder.Machine("ns1", "md0-machine0"). 663 WithLabels(map[string]string{clusterv1.ClusterTopologyMachineDeploymentNameLabel: "md0"}). 664 WithVersion("v1.21.2"). // Machine's version does not match MachineDeployment's version 665 Build(), 666 builder.Machine("ns1", "md1-machine0"). 667 WithLabels(map[string]string{clusterv1.ClusterTopologyMachineDeploymentNameLabel: "md1"}). 668 WithVersion("v1.21.2"). 669 Build(), 670 }, 671 wantConditionStatus: corev1.ConditionFalse, 672 wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsUpgradePendingReason, 673 wantConditionMessage: "MachineDeployment(s) md1-abc123 rollout and upgrade to version v1.22.0 on hold. MachineDeployment(s) md0-abc123 are upgrading", 674 }, 675 { 676 name: "should set the condition to false is some machine pools have not picked the new version because other machine pools are upgrading", 677 reconcileErr: nil, 678 cluster: &clusterv1.Cluster{}, 679 s: &scope.Scope{ 680 Blueprint: &scope.ClusterBlueprint{ 681 Topology: &clusterv1.Topology{ 682 Version: "v1.22.0", 683 }, 684 }, 685 Current: &scope.ClusterState{ 686 Cluster: &clusterv1.Cluster{}, 687 ControlPlane: &scope.ControlPlaneState{ 688 Object: builder.ControlPlane("ns1", "controlplane1"). 689 WithVersion("v1.22.0"). 690 WithReplicas(3). 691 Build(), 692 }, 693 MachinePools: scope.MachinePoolsStateMap{ 694 "mp0": &scope.MachinePoolState{ 695 Object: builder.MachinePool("ns1", "mp0-abc123"). 696 WithReplicas(2). 697 WithVersion("v1.22.0"). 698 WithStatus(expv1.MachinePoolStatus{ 699 // mp is not ready because we don't have 2 updated, ready and available replicas. 700 Replicas: int32(2), 701 ReadyReplicas: int32(1), 702 AvailableReplicas: int32(1), 703 UnavailableReplicas: int32(0), 704 }). 705 Build(), 706 }, 707 "mp1": &scope.MachinePoolState{ 708 Object: builder.MachinePool("ns1", "mp1-abc123"). 709 WithReplicas(2). 710 WithVersion("v1.21.2"). 711 WithStatus(expv1.MachinePoolStatus{ 712 Replicas: int32(2), 713 ReadyReplicas: int32(2), 714 AvailableReplicas: int32(2), 715 UnavailableReplicas: int32(0), 716 }). 717 Build(), 718 }, 719 }, 720 }, 721 UpgradeTracker: func() *scope.UpgradeTracker { 722 ut := scope.NewUpgradeTracker() 723 ut.ControlPlane.IsPendingUpgrade = false 724 ut.MachinePools.MarkUpgrading("mp0-abc123") 725 ut.MachinePools.MarkPendingUpgrade("mp1-abc123") 726 return ut 727 }(), 728 HookResponseTracker: scope.NewHookResponseTracker(), 729 }, 730 machines: []*clusterv1.Machine{ 731 builder.Machine("ns1", "mp0-machine0"). 732 WithLabels(map[string]string{clusterv1.ClusterTopologyMachinePoolNameLabel: "mp0"}). 733 WithVersion("v1.21.2"). // Machine's version does not match MachinePool's version 734 Build(), 735 builder.Machine("ns1", "mp1-machine0"). 736 WithLabels(map[string]string{clusterv1.ClusterTopologyMachinePoolNameLabel: "mp1"}). 737 WithVersion("v1.21.2"). 738 Build(), 739 }, 740 wantConditionStatus: corev1.ConditionFalse, 741 wantConditionReason: clusterv1.TopologyReconciledMachinePoolsUpgradePendingReason, 742 wantConditionMessage: "MachinePool(s) mp1-abc123 rollout and upgrade to version v1.22.0 on hold. MachinePool(s) mp0-abc123 are upgrading", 743 }, 744 { 745 name: "should set the condition to false if some machine deployments have not picked the new version because their upgrade has been deferred", 746 reconcileErr: nil, 747 cluster: &clusterv1.Cluster{}, 748 s: &scope.Scope{ 749 Blueprint: &scope.ClusterBlueprint{ 750 Topology: &clusterv1.Topology{ 751 Version: "v1.22.0", 752 }, 753 }, 754 Current: &scope.ClusterState{ 755 Cluster: &clusterv1.Cluster{}, 756 ControlPlane: &scope.ControlPlaneState{ 757 Object: builder.ControlPlane("ns1", "controlplane1"). 758 WithVersion("v1.22.0"). 759 WithReplicas(3). 760 Build(), 761 }, 762 MachineDeployments: scope.MachineDeploymentsStateMap{ 763 "md0": &scope.MachineDeploymentState{ 764 Object: builder.MachineDeployment("ns1", "md0-abc123"). 765 WithReplicas(2). 766 WithVersion("v1.22.0"). 767 WithStatus(clusterv1.MachineDeploymentStatus{ 768 Replicas: int32(2), 769 UpdatedReplicas: int32(2), 770 ReadyReplicas: int32(2), 771 AvailableReplicas: int32(2), 772 UnavailableReplicas: int32(0), 773 }). 774 Build(), 775 }, 776 "md1": &scope.MachineDeploymentState{ 777 Object: builder.MachineDeployment("ns1", "md1-abc123"). 778 WithReplicas(2). 779 WithVersion("v1.21.2"). 780 WithStatus(clusterv1.MachineDeploymentStatus{ 781 Replicas: int32(2), 782 UpdatedReplicas: int32(2), 783 ReadyReplicas: int32(2), 784 AvailableReplicas: int32(2), 785 UnavailableReplicas: int32(0), 786 }). 787 Build(), 788 }, 789 }, 790 }, 791 UpgradeTracker: func() *scope.UpgradeTracker { 792 ut := scope.NewUpgradeTracker() 793 ut.ControlPlane.IsPendingUpgrade = false 794 ut.MachineDeployments.MarkDeferredUpgrade("md1-abc123") 795 return ut 796 }(), 797 HookResponseTracker: scope.NewHookResponseTracker(), 798 }, 799 wantConditionStatus: corev1.ConditionFalse, 800 wantConditionReason: clusterv1.TopologyReconciledMachineDeploymentsUpgradeDeferredReason, 801 wantConditionMessage: "MachineDeployment(s) md1-abc123 rollout and upgrade to version v1.22.0 deferred.", 802 }, 803 { 804 name: "should set the condition to false if some machine pools have not picked the new version because their upgrade has been deferred", 805 reconcileErr: nil, 806 cluster: &clusterv1.Cluster{}, 807 s: &scope.Scope{ 808 Blueprint: &scope.ClusterBlueprint{ 809 Topology: &clusterv1.Topology{ 810 Version: "v1.22.0", 811 }, 812 }, 813 Current: &scope.ClusterState{ 814 Cluster: &clusterv1.Cluster{}, 815 ControlPlane: &scope.ControlPlaneState{ 816 Object: builder.ControlPlane("ns1", "controlplane1"). 817 WithVersion("v1.22.0"). 818 WithReplicas(3). 819 Build(), 820 }, 821 MachinePools: scope.MachinePoolsStateMap{ 822 "mp0": &scope.MachinePoolState{ 823 Object: builder.MachinePool("ns1", "mp0-abc123"). 824 WithReplicas(2). 825 WithVersion("v1.22.0"). 826 WithStatus(expv1.MachinePoolStatus{ 827 Replicas: int32(2), 828 ReadyReplicas: int32(2), 829 AvailableReplicas: int32(2), 830 UnavailableReplicas: int32(0), 831 }). 832 Build(), 833 }, 834 "mp1": &scope.MachinePoolState{ 835 Object: builder.MachinePool("ns1", "mp1-abc123"). 836 WithReplicas(2). 837 WithVersion("v1.21.2"). 838 WithStatus(expv1.MachinePoolStatus{ 839 Replicas: int32(2), 840 ReadyReplicas: int32(2), 841 AvailableReplicas: int32(2), 842 UnavailableReplicas: int32(0), 843 }). 844 Build(), 845 }, 846 }, 847 }, 848 UpgradeTracker: func() *scope.UpgradeTracker { 849 ut := scope.NewUpgradeTracker() 850 ut.ControlPlane.IsPendingUpgrade = false 851 ut.MachinePools.MarkDeferredUpgrade("mp1-abc123") 852 return ut 853 }(), 854 HookResponseTracker: scope.NewHookResponseTracker(), 855 }, 856 wantConditionStatus: corev1.ConditionFalse, 857 wantConditionReason: clusterv1.TopologyReconciledMachinePoolsUpgradeDeferredReason, 858 wantConditionMessage: "MachinePool(s) mp1-abc123 rollout and upgrade to version v1.22.0 deferred.", 859 }, 860 { 861 name: "should set the condition to true if there are no reconcile errors and control plane and all machine deployments and machine pools picked up the new version", 862 reconcileErr: nil, 863 cluster: &clusterv1.Cluster{}, 864 s: &scope.Scope{ 865 Blueprint: &scope.ClusterBlueprint{ 866 Topology: &clusterv1.Topology{ 867 Version: "v1.22.0", 868 }, 869 }, 870 Current: &scope.ClusterState{ 871 Cluster: &clusterv1.Cluster{}, 872 ControlPlane: &scope.ControlPlaneState{ 873 Object: builder.ControlPlane("ns1", "controlplane1"). 874 WithVersion("v1.22.0"). 875 WithReplicas(3). 876 Build(), 877 }, 878 MachineDeployments: scope.MachineDeploymentsStateMap{ 879 "md0": &scope.MachineDeploymentState{ 880 Object: builder.MachineDeployment("ns1", "md0-abc123"). 881 WithReplicas(2). 882 WithVersion("v1.22.0"). 883 WithStatus(clusterv1.MachineDeploymentStatus{ 884 Replicas: int32(1), 885 UpdatedReplicas: int32(1), 886 ReadyReplicas: int32(1), 887 AvailableReplicas: int32(1), 888 UnavailableReplicas: int32(0), 889 }). 890 Build(), 891 }, 892 "md1": &scope.MachineDeploymentState{ 893 Object: builder.MachineDeployment("ns1", "md1-abc123"). 894 WithReplicas(2). 895 WithVersion("v1.22.0"). 896 WithStatus(clusterv1.MachineDeploymentStatus{ 897 Replicas: int32(2), 898 UpdatedReplicas: int32(2), 899 ReadyReplicas: int32(2), 900 AvailableReplicas: int32(2), 901 UnavailableReplicas: int32(0), 902 }). 903 Build(), 904 }, 905 }, 906 MachinePools: scope.MachinePoolsStateMap{ 907 "mp0": &scope.MachinePoolState{ 908 Object: builder.MachinePool("ns1", "mp0-abc123"). 909 WithReplicas(2). 910 WithVersion("v1.22.0"). 911 WithStatus(expv1.MachinePoolStatus{ 912 Replicas: int32(1), 913 ReadyReplicas: int32(1), 914 AvailableReplicas: int32(1), 915 UnavailableReplicas: int32(0), 916 }). 917 Build(), 918 }, 919 "mp1": &scope.MachinePoolState{ 920 Object: builder.MachinePool("ns1", "mp1-abc123"). 921 WithReplicas(2). 922 WithVersion("v1.22.0"). 923 WithStatus(expv1.MachinePoolStatus{ 924 Replicas: int32(2), 925 ReadyReplicas: int32(2), 926 AvailableReplicas: int32(2), 927 UnavailableReplicas: int32(0), 928 }). 929 Build(), 930 }, 931 }, 932 }, 933 UpgradeTracker: func() *scope.UpgradeTracker { 934 ut := scope.NewUpgradeTracker() 935 ut.ControlPlane.IsPendingUpgrade = false 936 return ut 937 }(), 938 HookResponseTracker: scope.NewHookResponseTracker(), 939 }, 940 wantConditionStatus: corev1.ConditionTrue, 941 }, 942 { 943 name: "should set the TopologyReconciledCondition to False if the cluster has been deleted", 944 cluster: &clusterv1.Cluster{ 945 ObjectMeta: metav1.ObjectMeta{ 946 DeletionTimestamp: &deletionTime, 947 }, 948 }, 949 wantConditionStatus: corev1.ConditionFalse, 950 wantConditionReason: clusterv1.DeletedReason, 951 wantConditionMessage: "", 952 }, 953 } 954 955 for _, tt := range tests { 956 t.Run(tt.name, func(t *testing.T) { 957 g := NewWithT(t) 958 959 objs := []client.Object{} 960 if tt.s != nil && tt.s.Current != nil { 961 for _, md := range tt.s.Current.MachineDeployments { 962 objs = append(objs, md.Object) 963 } 964 for _, mp := range tt.s.Current.MachinePools { 965 objs = append(objs, mp.Object) 966 } 967 } 968 for _, m := range tt.machines { 969 objs = append(objs, m) 970 } 971 fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() 972 973 r := &Reconciler{Client: fakeClient} 974 err := r.reconcileTopologyReconciledCondition(tt.s, tt.cluster, tt.reconcileErr) 975 if tt.wantErr { 976 g.Expect(err).To(HaveOccurred()) 977 } else { 978 g.Expect(err).ToNot(HaveOccurred()) 979 980 actualCondition := conditions.Get(tt.cluster, clusterv1.TopologyReconciledCondition) 981 g.Expect(actualCondition.Status).To(Equal(tt.wantConditionStatus)) 982 g.Expect(actualCondition.Reason).To(Equal(tt.wantConditionReason)) 983 g.Expect(actualCondition.Message).To(Equal(tt.wantConditionMessage)) 984 } 985 }) 986 } 987 } 988 989 func TestComputeNameList(t *testing.T) { 990 tests := []struct { 991 name string 992 mdList []string 993 expected string 994 }{ 995 { 996 name: "mdList with 4 names", 997 mdList: []string{"md-1", "md-2", "md-3", "md-4"}, 998 expected: "md-1, md-2, md-3, md-4", 999 }, 1000 { 1001 name: "mdList with 5 names", 1002 mdList: []string{"md-1", "md-2", "md-3", "md-4", "md-5"}, 1003 expected: "md-1, md-2, md-3, md-4, md-5", 1004 }, 1005 { 1006 name: "mdList with 6 names is shortened", 1007 mdList: []string{"md-1", "md-2", "md-3", "md-4", "md-5", "md-6"}, 1008 expected: "md-1, md-2, md-3, md-4, md-5, ...", 1009 }, 1010 } 1011 1012 for _, tt := range tests { 1013 t.Run(tt.name, func(t *testing.T) { 1014 g := NewWithT(t) 1015 g.Expect(computeNameList(tt.mdList)).To(Equal(tt.expected)) 1016 }) 1017 } 1018 }