sigs.k8s.io/cluster-api@v1.6.3/internal/controllers/machine/machine_controller_test.go (about) 1 /* 2 Copyright 2019 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package machine 18 19 import ( 20 "context" 21 "fmt" 22 "testing" 23 "time" 24 25 "github.com/go-logr/logr" 26 . "github.com/onsi/gomega" 27 corev1 "k8s.io/api/core/v1" 28 apierrors "k8s.io/apimachinery/pkg/api/errors" 29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 31 "k8s.io/client-go/kubernetes/scheme" 32 "k8s.io/client-go/tools/record" 33 "k8s.io/utils/pointer" 34 ctrl "sigs.k8s.io/controller-runtime" 35 "sigs.k8s.io/controller-runtime/pkg/client" 36 "sigs.k8s.io/controller-runtime/pkg/client/apiutil" 37 "sigs.k8s.io/controller-runtime/pkg/client/fake" 38 "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 39 "sigs.k8s.io/controller-runtime/pkg/log" 40 "sigs.k8s.io/controller-runtime/pkg/reconcile" 41 42 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 43 "sigs.k8s.io/cluster-api/api/v1beta1/index" 44 "sigs.k8s.io/cluster-api/controllers/remote" 45 "sigs.k8s.io/cluster-api/internal/test/builder" 46 "sigs.k8s.io/cluster-api/internal/util/ssa" 47 "sigs.k8s.io/cluster-api/util" 48 "sigs.k8s.io/cluster-api/util/conditions" 49 "sigs.k8s.io/cluster-api/util/patch" 50 ) 51 52 func TestWatches(t *testing.T) { 53 g := NewWithT(t) 54 ns, err := env.CreateNamespace(ctx, "test-machine-watches") 55 g.Expect(err).ToNot(HaveOccurred()) 56 57 infraMachine := &unstructured.Unstructured{ 58 Object: map[string]interface{}{ 59 "kind": "GenericInfrastructureMachine", 60 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 61 "metadata": map[string]interface{}{ 62 "name": "infra-config1", 63 "namespace": ns.Name, 64 }, 65 "spec": map[string]interface{}{ 66 "providerID": "test://id-1", 67 }, 68 "status": map[string]interface{}{ 69 "ready": true, 70 "addresses": []interface{}{ 71 map[string]interface{}{ 72 "type": "InternalIP", 73 "address": "10.0.0.1", 74 }, 75 }, 76 }, 77 }, 78 } 79 80 defaultBootstrap := &unstructured.Unstructured{ 81 Object: map[string]interface{}{ 82 "kind": "GenericBootstrapConfig", 83 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 84 "metadata": map[string]interface{}{ 85 "name": "bootstrap-config-machinereconcile", 86 "namespace": ns.Name, 87 }, 88 "spec": map[string]interface{}{}, 89 "status": map[string]interface{}{}, 90 }, 91 } 92 93 testCluster := &clusterv1.Cluster{ 94 ObjectMeta: metav1.ObjectMeta{ 95 GenerateName: "machine-reconcile-", 96 Namespace: ns.Name, 97 }, 98 } 99 100 node := &corev1.Node{ 101 ObjectMeta: metav1.ObjectMeta{ 102 Name: "node-1", 103 Namespace: ns.Name, 104 }, 105 Spec: corev1.NodeSpec{ 106 ProviderID: "test://id-1", 107 }, 108 } 109 110 g.Expect(env.Create(ctx, testCluster)).To(Succeed()) 111 g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) 112 g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed()) 113 g.Expect(env.Create(ctx, node)).To(Succeed()) 114 g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) 115 116 defer func(do ...client.Object) { 117 g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) 118 }(ns, testCluster, defaultBootstrap) 119 120 // Patch infra machine ready 121 patchHelper, err := patch.NewHelper(infraMachine, env) 122 g.Expect(err).ShouldNot(HaveOccurred()) 123 g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) 124 g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) 125 126 // Patch bootstrap ready 127 patchHelper, err = patch.NewHelper(defaultBootstrap, env) 128 g.Expect(err).ShouldNot(HaveOccurred()) 129 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).To(Succeed()) 130 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, "secretData", "status", "dataSecretName")).To(Succeed()) 131 g.Expect(patchHelper.Patch(ctx, defaultBootstrap, patch.WithStatusObservedGeneration{})).To(Succeed()) 132 133 machine := &clusterv1.Machine{ 134 ObjectMeta: metav1.ObjectMeta{ 135 GenerateName: "machine-created-", 136 Namespace: ns.Name, 137 Labels: map[string]string{ 138 clusterv1.MachineControlPlaneLabel: "", 139 }, 140 }, 141 Spec: clusterv1.MachineSpec{ 142 ClusterName: testCluster.Name, 143 InfrastructureRef: corev1.ObjectReference{ 144 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 145 Kind: "GenericInfrastructureMachine", 146 Name: "infra-config1", 147 }, 148 Bootstrap: clusterv1.Bootstrap{ 149 ConfigRef: &corev1.ObjectReference{ 150 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 151 Kind: "GenericBootstrapConfig", 152 Name: "bootstrap-config-machinereconcile", 153 }, 154 }, 155 }, 156 } 157 158 g.Expect(env.Create(ctx, machine)).To(Succeed()) 159 defer func() { 160 g.Expect(env.Cleanup(ctx, machine)).To(Succeed()) 161 }() 162 163 // Wait for reconciliation to happen. 164 // Since infra and bootstrap objects are ready, a nodeRef will be assigned during node reconciliation. 165 key := client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} 166 g.Eventually(func() bool { 167 if err := env.Get(ctx, key, machine); err != nil { 168 return false 169 } 170 return machine.Status.NodeRef != nil 171 }, timeout).Should(BeTrue()) 172 173 // Node deletion will trigger node watchers and a request will be added to the queue. 174 g.Expect(env.Delete(ctx, node)).To(Succeed()) 175 // TODO: Once conditions are in place, check if node deletion triggered a reconcile. 176 177 // Delete infra machine, external tracker will trigger reconcile 178 // and machine Status.FailureReason should be non-nil after reconcileInfrastructure 179 g.Expect(env.Delete(ctx, infraMachine)).To(Succeed()) 180 g.Eventually(func() bool { 181 if err := env.Get(ctx, key, machine); err != nil { 182 return false 183 } 184 return machine.Status.FailureMessage != nil 185 }, timeout).Should(BeTrue()) 186 } 187 188 func TestWatchesDelete(t *testing.T) { 189 g := NewWithT(t) 190 ns, err := env.CreateNamespace(ctx, "test-machine-watches-delete") 191 g.Expect(err).ToNot(HaveOccurred()) 192 193 infraMachine := &unstructured.Unstructured{ 194 Object: map[string]interface{}{ 195 "kind": "GenericInfrastructureMachine", 196 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 197 "metadata": map[string]interface{}{ 198 "name": "infra-config1", 199 "namespace": ns.Name, 200 }, 201 "spec": map[string]interface{}{ 202 "providerID": "test://id-1", 203 }, 204 "status": map[string]interface{}{ 205 "ready": true, 206 "addresses": []interface{}{ 207 map[string]interface{}{ 208 "type": "InternalIP", 209 "address": "10.0.0.1", 210 }, 211 }, 212 }, 213 }, 214 } 215 infraMachineFinalizer := "test.infrastructure.cluster.x-k8s.io" 216 controllerutil.AddFinalizer(infraMachine, infraMachineFinalizer) 217 218 defaultBootstrap := &unstructured.Unstructured{ 219 Object: map[string]interface{}{ 220 "kind": "GenericBootstrapConfig", 221 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 222 "metadata": map[string]interface{}{ 223 "name": "bootstrap-config-machinereconcile", 224 "namespace": ns.Name, 225 }, 226 "spec": map[string]interface{}{}, 227 "status": map[string]interface{}{}, 228 }, 229 } 230 bootstrapFinalizer := "test.bootstrap.cluster.x-k8s.io" 231 controllerutil.AddFinalizer(defaultBootstrap, bootstrapFinalizer) 232 233 testCluster := &clusterv1.Cluster{ 234 ObjectMeta: metav1.ObjectMeta{ 235 GenerateName: "machine-reconcile-", 236 Namespace: ns.Name, 237 }, 238 Spec: clusterv1.ClusterSpec{ 239 // we create the cluster in paused state so we don't reconcile 240 // the machine immediately after creation. 241 // This avoids going through reconcileExternal, which adds watches 242 // for the provider machine and the bootstrap config objects. 243 Paused: true, 244 }, 245 } 246 247 g.Expect(env.Create(ctx, testCluster)).To(Succeed()) 248 g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) 249 g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed()) 250 g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) 251 252 defer func(do ...client.Object) { 253 g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) 254 }(ns, testCluster, defaultBootstrap) 255 256 // Patch infra machine ready 257 patchHelper, err := patch.NewHelper(infraMachine, env) 258 g.Expect(err).ShouldNot(HaveOccurred()) 259 g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) 260 g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) 261 262 // Patch bootstrap ready 263 patchHelper, err = patch.NewHelper(defaultBootstrap, env) 264 g.Expect(err).ShouldNot(HaveOccurred()) 265 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).To(Succeed()) 266 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, "secretData", "status", "dataSecretName")).To(Succeed()) 267 g.Expect(patchHelper.Patch(ctx, defaultBootstrap, patch.WithStatusObservedGeneration{})).To(Succeed()) 268 269 machine := &clusterv1.Machine{ 270 ObjectMeta: metav1.ObjectMeta{ 271 GenerateName: "machine-created-", 272 Namespace: ns.Name, 273 Labels: map[string]string{ 274 clusterv1.MachineControlPlaneLabel: "", 275 }, 276 }, 277 Spec: clusterv1.MachineSpec{ 278 ClusterName: testCluster.Name, 279 InfrastructureRef: corev1.ObjectReference{ 280 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 281 Kind: "GenericInfrastructureMachine", 282 Name: "infra-config1", 283 }, 284 Bootstrap: clusterv1.Bootstrap{ 285 ConfigRef: &corev1.ObjectReference{ 286 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 287 Kind: "GenericBootstrapConfig", 288 Name: "bootstrap-config-machinereconcile", 289 }, 290 }, 291 }, 292 } 293 // We create the machine with a finalizer so the machine is not deleted immediately. 294 controllerutil.AddFinalizer(machine, clusterv1.MachineFinalizer) 295 296 g.Expect(env.Create(ctx, machine)).To(Succeed()) 297 defer func() { 298 g.Expect(env.Cleanup(ctx, machine)).To(Succeed()) 299 }() 300 301 // We mark the machine for deletion 302 g.Expect(env.Delete(ctx, machine)).To(Succeed()) 303 304 // We unpause the cluster so the machine can be reconciled. 305 testCluster.Spec.Paused = false 306 g.Expect(env.Update(ctx, testCluster)).To(Succeed()) 307 308 // Wait for reconciliation to happen. 309 // The first reconciliation should add the cluster name label. 310 key := client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} 311 g.Eventually(func() bool { 312 if err := env.Get(ctx, key, machine); err != nil { 313 return false 314 } 315 return machine.Labels[clusterv1.ClusterNameLabel] == testCluster.Name 316 }, timeout).Should(BeTrue()) 317 318 // Deleting the machine should mark the infra machine for deletion 319 infraMachineKey := client.ObjectKey{Name: infraMachine.GetName(), Namespace: infraMachine.GetNamespace()} 320 g.Eventually(func() bool { 321 if err := env.Get(ctx, infraMachineKey, infraMachine); err != nil { 322 return false 323 } 324 return infraMachine.GetDeletionTimestamp() != nil 325 }, timeout).Should(BeTrue(), "infra machine should be marked for deletion") 326 327 // We wait a bit and remove the finalizer, simulating the infra machine controller. 328 time.Sleep(2 * time.Second) 329 infraMachine.SetFinalizers([]string{}) 330 g.Expect(env.Update(ctx, infraMachine)).To(Succeed()) 331 332 // This should delete the infra machine 333 g.Eventually(func() bool { 334 err := env.Get(ctx, infraMachineKey, infraMachine) 335 return apierrors.IsNotFound(err) 336 }, timeout).Should(BeTrue(), "infra machine should be deleted") 337 338 // If the watch on infra machine works, deleting of the infra machine will trigger another 339 // reconcile, which will mark the bootstrap config for deletion 340 bootstrapKey := client.ObjectKey{Name: defaultBootstrap.GetName(), Namespace: defaultBootstrap.GetNamespace()} 341 g.Eventually(func() bool { 342 if err := env.Get(ctx, bootstrapKey, defaultBootstrap); err != nil { 343 return false 344 } 345 return defaultBootstrap.GetDeletionTimestamp() != nil 346 }, timeout).Should(BeTrue(), "bootstrap config should be marked for deletion") 347 348 // We wait a bit a remove the finalizer, simulating the bootstrap config controller. 349 time.Sleep(2 * time.Second) 350 defaultBootstrap.SetFinalizers([]string{}) 351 g.Expect(env.Update(ctx, defaultBootstrap)).To(Succeed()) 352 353 // This should delete the bootstrap config. 354 g.Eventually(func() bool { 355 err := env.Get(ctx, bootstrapKey, defaultBootstrap) 356 return apierrors.IsNotFound(err) 357 }, timeout).Should(BeTrue(), "bootstrap config should be deleted") 358 359 // If the watch on bootstrap config works, the deleting of the bootstrap config will trigger another 360 // reconcile, which will remove the finalizer and delete the machine 361 g.Eventually(func() bool { 362 err := env.Get(ctx, key, machine) 363 return apierrors.IsNotFound(err) 364 }, timeout).Should(BeTrue(), "machine should be deleted") 365 } 366 367 func TestMachine_Reconcile(t *testing.T) { 368 g := NewWithT(t) 369 370 ns, err := env.CreateNamespace(ctx, "test-machine-reconcile") 371 g.Expect(err).ToNot(HaveOccurred()) 372 373 infraMachine := &unstructured.Unstructured{ 374 Object: map[string]interface{}{ 375 "kind": "GenericInfrastructureMachine", 376 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 377 "metadata": map[string]interface{}{ 378 "name": "infra-config1", 379 "namespace": ns.Name, 380 }, 381 "spec": map[string]interface{}{ 382 "providerID": "test://id-1", 383 }, 384 }, 385 } 386 387 defaultBootstrap := &unstructured.Unstructured{ 388 Object: map[string]interface{}{ 389 "kind": "GenericBootstrapConfig", 390 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 391 "metadata": map[string]interface{}{ 392 "name": "bootstrap-config-machinereconcile", 393 "namespace": ns.Name, 394 }, 395 "spec": map[string]interface{}{}, 396 "status": map[string]interface{}{}, 397 }, 398 } 399 400 testCluster := &clusterv1.Cluster{ 401 ObjectMeta: metav1.ObjectMeta{ 402 GenerateName: "machine-reconcile-", 403 Namespace: ns.Name, 404 }, 405 } 406 407 g.Expect(env.Create(ctx, testCluster)).To(Succeed()) 408 g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) 409 g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed()) 410 411 defer func(do ...client.Object) { 412 g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) 413 }(ns, testCluster, defaultBootstrap) 414 415 machine := &clusterv1.Machine{ 416 ObjectMeta: metav1.ObjectMeta{ 417 GenerateName: "machine-created-", 418 Namespace: ns.Name, 419 Finalizers: []string{clusterv1.MachineFinalizer}, 420 }, 421 Spec: clusterv1.MachineSpec{ 422 ClusterName: testCluster.Name, 423 InfrastructureRef: corev1.ObjectReference{ 424 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 425 Kind: "GenericInfrastructureMachine", 426 Name: "infra-config1", 427 }, 428 Bootstrap: clusterv1.Bootstrap{ 429 ConfigRef: &corev1.ObjectReference{ 430 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 431 Kind: "GenericBootstrapConfig", 432 Name: "bootstrap-config-machinereconcile", 433 }, 434 }, 435 }, 436 Status: clusterv1.MachineStatus{ 437 NodeRef: &corev1.ObjectReference{ 438 Name: "test", 439 }, 440 }, 441 } 442 g.Expect(env.Create(ctx, machine)).To(Succeed()) 443 444 key := client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} 445 446 // Wait for reconciliation to happen when infra and bootstrap objects are not ready. 447 g.Eventually(func() bool { 448 if err := env.Get(ctx, key, machine); err != nil { 449 return false 450 } 451 return len(machine.Finalizers) > 0 452 }, timeout).Should(BeTrue()) 453 454 // Set bootstrap ready. 455 bootstrapPatch := client.MergeFrom(defaultBootstrap.DeepCopy()) 456 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).ToNot(HaveOccurred()) 457 g.Expect(env.Status().Patch(ctx, defaultBootstrap, bootstrapPatch)).To(Succeed()) 458 459 // Set infrastructure ready. 460 infraMachinePatch := client.MergeFrom(infraMachine.DeepCopy()) 461 g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) 462 g.Expect(env.Status().Patch(ctx, infraMachine, infraMachinePatch)).To(Succeed()) 463 464 // Wait for Machine Ready Condition to become True. 465 g.Eventually(func() bool { 466 if err := env.Get(ctx, key, machine); err != nil { 467 return false 468 } 469 if !conditions.Has(machine, clusterv1.InfrastructureReadyCondition) { 470 return false 471 } 472 readyCondition := conditions.Get(machine, clusterv1.ReadyCondition) 473 return readyCondition.Status == corev1.ConditionTrue 474 }, timeout).Should(BeTrue()) 475 476 g.Expect(env.Delete(ctx, machine)).ToNot(HaveOccurred()) 477 // Wait for Machine to be deleted. 478 g.Eventually(func() bool { 479 if err := env.Get(ctx, key, machine); err != nil { 480 if apierrors.IsNotFound(err) { 481 return true 482 } 483 } 484 return false 485 }, timeout).Should(BeTrue()) 486 487 // Check if Machine deletion successfully deleted infrastructure external reference. 488 keyInfra := client.ObjectKey{Name: infraMachine.GetName(), Namespace: infraMachine.GetNamespace()} 489 g.Eventually(func() bool { 490 if err := env.Get(ctx, keyInfra, infraMachine); err != nil { 491 if apierrors.IsNotFound(err) { 492 return true 493 } 494 } 495 return false 496 }, timeout).Should(BeTrue()) 497 498 // Check if Machine deletion successfully deleted bootstrap external reference. 499 keyBootstrap := client.ObjectKey{Name: defaultBootstrap.GetName(), Namespace: defaultBootstrap.GetNamespace()} 500 g.Eventually(func() bool { 501 if err := env.Get(ctx, keyBootstrap, defaultBootstrap); err != nil { 502 if apierrors.IsNotFound(err) { 503 return true 504 } 505 } 506 return false 507 }, timeout).Should(BeTrue()) 508 } 509 510 func TestMachineFinalizer(t *testing.T) { 511 bootstrapData := "some valid data" 512 clusterCorrectMeta := &clusterv1.Cluster{ 513 ObjectMeta: metav1.ObjectMeta{ 514 Name: "valid-cluster", 515 Namespace: metav1.NamespaceDefault, 516 }, 517 } 518 519 machineValidCluster := &clusterv1.Machine{ 520 ObjectMeta: metav1.ObjectMeta{ 521 Name: "machine1", 522 Namespace: metav1.NamespaceDefault, 523 }, 524 Spec: clusterv1.MachineSpec{ 525 Bootstrap: clusterv1.Bootstrap{ 526 DataSecretName: &bootstrapData, 527 }, 528 ClusterName: "valid-cluster", 529 }, 530 } 531 532 machineWithFinalizer := &clusterv1.Machine{ 533 ObjectMeta: metav1.ObjectMeta{ 534 Name: "machine2", 535 Namespace: metav1.NamespaceDefault, 536 Finalizers: []string{"some-other-finalizer"}, 537 }, 538 Spec: clusterv1.MachineSpec{ 539 Bootstrap: clusterv1.Bootstrap{ 540 DataSecretName: &bootstrapData, 541 }, 542 ClusterName: "valid-cluster", 543 }, 544 } 545 546 testCases := []struct { 547 name string 548 request reconcile.Request 549 m *clusterv1.Machine 550 expectedFinalizers []string 551 }{ 552 { 553 name: "should add a machine finalizer to the machine if it doesn't have one", 554 request: reconcile.Request{ 555 NamespacedName: util.ObjectKey(machineValidCluster), 556 }, 557 m: machineValidCluster, 558 expectedFinalizers: []string{clusterv1.MachineFinalizer}, 559 }, 560 { 561 name: "should append the machine finalizer to the machine if it already has a finalizer", 562 request: reconcile.Request{ 563 NamespacedName: util.ObjectKey(machineWithFinalizer), 564 }, 565 m: machineWithFinalizer, 566 expectedFinalizers: []string{"some-other-finalizer", clusterv1.MachineFinalizer}, 567 }, 568 } 569 570 for _, tc := range testCases { 571 t.Run(tc.name, func(t *testing.T) { 572 g := NewWithT(t) 573 574 c := fake.NewClientBuilder().WithObjects( 575 clusterCorrectMeta, 576 machineValidCluster, 577 machineWithFinalizer, 578 ).Build() 579 mr := &Reconciler{ 580 Client: c, 581 UnstructuredCachingClient: c, 582 } 583 584 _, _ = mr.Reconcile(ctx, tc.request) 585 586 key := client.ObjectKey{Namespace: tc.m.Namespace, Name: tc.m.Name} 587 var actual clusterv1.Machine 588 if len(tc.expectedFinalizers) > 0 { 589 g.Expect(mr.Client.Get(ctx, key, &actual)).To(Succeed()) 590 g.Expect(actual.Finalizers).ToNot(BeEmpty()) 591 g.Expect(actual.Finalizers).To(Equal(tc.expectedFinalizers)) 592 } else { 593 g.Expect(actual.Finalizers).To(BeEmpty()) 594 } 595 }) 596 } 597 } 598 599 func TestMachineOwnerReference(t *testing.T) { 600 bootstrapData := "some valid data" 601 testCluster := &clusterv1.Cluster{ 602 TypeMeta: metav1.TypeMeta{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String()}, 603 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 604 } 605 606 machineInvalidCluster := &clusterv1.Machine{ 607 ObjectMeta: metav1.ObjectMeta{ 608 Name: "machine1", 609 Namespace: metav1.NamespaceDefault, 610 }, 611 Spec: clusterv1.MachineSpec{ 612 ClusterName: "invalid", 613 }, 614 } 615 616 machineValidCluster := &clusterv1.Machine{ 617 ObjectMeta: metav1.ObjectMeta{ 618 Name: "machine2", 619 Namespace: metav1.NamespaceDefault, 620 }, 621 Spec: clusterv1.MachineSpec{ 622 Bootstrap: clusterv1.Bootstrap{ 623 DataSecretName: &bootstrapData, 624 }, 625 ClusterName: "test-cluster", 626 }, 627 } 628 629 machineValidMachine := &clusterv1.Machine{ 630 ObjectMeta: metav1.ObjectMeta{ 631 Name: "machine3", 632 Namespace: metav1.NamespaceDefault, 633 Labels: map[string]string{ 634 clusterv1.ClusterNameLabel: "valid-cluster", 635 }, 636 OwnerReferences: []metav1.OwnerReference{ 637 { 638 APIVersion: clusterv1.GroupVersion.String(), 639 Kind: "MachineSet", 640 Name: "valid-machineset", 641 Controller: pointer.Bool(true), 642 }, 643 }, 644 }, 645 Spec: clusterv1.MachineSpec{ 646 Bootstrap: clusterv1.Bootstrap{ 647 DataSecretName: &bootstrapData, 648 }, 649 ClusterName: "test-cluster", 650 }, 651 } 652 653 machineValidControlled := &clusterv1.Machine{ 654 ObjectMeta: metav1.ObjectMeta{ 655 Name: "machine4", 656 Namespace: metav1.NamespaceDefault, 657 Labels: map[string]string{ 658 clusterv1.ClusterNameLabel: "valid-cluster", 659 clusterv1.MachineControlPlaneLabel: "", 660 }, 661 OwnerReferences: []metav1.OwnerReference{ 662 { 663 APIVersion: "test.group", 664 Kind: "KubeadmControlPlane", 665 Name: "valid-controlplane", 666 Controller: pointer.Bool(true), 667 }, 668 }, 669 }, 670 Spec: clusterv1.MachineSpec{ 671 Bootstrap: clusterv1.Bootstrap{ 672 DataSecretName: &bootstrapData, 673 }, 674 ClusterName: "test-cluster", 675 }, 676 } 677 678 testCases := []struct { 679 name string 680 request reconcile.Request 681 m *clusterv1.Machine 682 expectedOR []metav1.OwnerReference 683 }{ 684 { 685 name: "should add owner reference to machine referencing a cluster with correct type meta", 686 request: reconcile.Request{ 687 NamespacedName: util.ObjectKey(machineValidCluster), 688 }, 689 m: machineValidCluster, 690 expectedOR: []metav1.OwnerReference{ 691 { 692 APIVersion: testCluster.APIVersion, 693 Kind: testCluster.Kind, 694 Name: testCluster.Name, 695 UID: testCluster.UID, 696 }, 697 }, 698 }, 699 { 700 name: "should not add cluster owner reference if machine is owned by a machine set", 701 request: reconcile.Request{ 702 NamespacedName: util.ObjectKey(machineValidMachine), 703 }, 704 m: machineValidMachine, 705 expectedOR: []metav1.OwnerReference{ 706 { 707 APIVersion: clusterv1.GroupVersion.String(), 708 Kind: "MachineSet", 709 Name: "valid-machineset", 710 Controller: pointer.Bool(true), 711 }, 712 }, 713 }, 714 { 715 name: "should not add cluster owner reference if machine has a controller owner", 716 request: reconcile.Request{ 717 NamespacedName: util.ObjectKey(machineValidControlled), 718 }, 719 m: machineValidControlled, 720 expectedOR: []metav1.OwnerReference{ 721 { 722 APIVersion: "test.group", 723 Kind: "KubeadmControlPlane", 724 Name: "valid-controlplane", 725 Controller: pointer.Bool(true), 726 }, 727 }, 728 }, 729 } 730 731 for _, tc := range testCases { 732 t.Run(tc.name, func(t *testing.T) { 733 g := NewWithT(t) 734 735 c := fake.NewClientBuilder().WithObjects( 736 testCluster, 737 machineInvalidCluster, 738 machineValidCluster, 739 machineValidMachine, 740 machineValidControlled, 741 ).WithStatusSubresource(&clusterv1.Machine{}).Build() 742 mr := &Reconciler{ 743 Client: c, 744 UnstructuredCachingClient: c, 745 APIReader: c, 746 } 747 748 key := client.ObjectKey{Namespace: tc.m.Namespace, Name: tc.m.Name} 749 var actual clusterv1.Machine 750 751 // this first requeue is to add finalizer 752 result, err := mr.Reconcile(ctx, tc.request) 753 g.Expect(err).ToNot(HaveOccurred()) 754 g.Expect(result).To(BeComparableTo(ctrl.Result{})) 755 g.Expect(mr.Client.Get(ctx, key, &actual)).To(Succeed()) 756 g.Expect(actual.Finalizers).To(ContainElement(clusterv1.MachineFinalizer)) 757 758 _, _ = mr.Reconcile(ctx, tc.request) 759 760 if len(tc.expectedOR) > 0 { 761 g.Expect(mr.Client.Get(ctx, key, &actual)).To(Succeed()) 762 g.Expect(actual.OwnerReferences).To(BeComparableTo(tc.expectedOR)) 763 } else { 764 g.Expect(actual.OwnerReferences).To(BeEmpty()) 765 } 766 }) 767 } 768 } 769 770 func TestReconcileRequest(t *testing.T) { 771 infraConfig := unstructured.Unstructured{ 772 Object: map[string]interface{}{ 773 "kind": "GenericInfrastructureMachine", 774 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 775 "metadata": map[string]interface{}{ 776 "name": "infra-config1", 777 "namespace": metav1.NamespaceDefault, 778 }, 779 "spec": map[string]interface{}{ 780 "providerID": "test://id-1", 781 }, 782 "status": map[string]interface{}{ 783 "ready": true, 784 "addresses": []interface{}{ 785 map[string]interface{}{ 786 "type": "InternalIP", 787 "address": "10.0.0.1", 788 }, 789 }, 790 }, 791 }, 792 } 793 794 time := metav1.Now() 795 796 testCluster := clusterv1.Cluster{ 797 ObjectMeta: metav1.ObjectMeta{ 798 Name: "test-cluster", 799 Namespace: metav1.NamespaceDefault, 800 }, 801 } 802 803 node := &corev1.Node{ 804 ObjectMeta: metav1.ObjectMeta{ 805 Name: "test", 806 }, 807 Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, 808 } 809 810 type expected struct { 811 result reconcile.Result 812 err bool 813 } 814 testCases := []struct { 815 machine clusterv1.Machine 816 expected expected 817 }{ 818 { 819 machine: clusterv1.Machine{ 820 ObjectMeta: metav1.ObjectMeta{ 821 Name: "created", 822 Namespace: metav1.NamespaceDefault, 823 Finalizers: []string{clusterv1.MachineFinalizer}, 824 }, 825 Spec: clusterv1.MachineSpec{ 826 ClusterName: "test-cluster", 827 InfrastructureRef: corev1.ObjectReference{ 828 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 829 Kind: "GenericInfrastructureMachine", 830 Name: "infra-config1", 831 }, 832 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 833 }, 834 Status: clusterv1.MachineStatus{ 835 NodeRef: &corev1.ObjectReference{ 836 Name: "test", 837 }, 838 ObservedGeneration: 1, 839 }, 840 }, 841 expected: expected{ 842 result: reconcile.Result{}, 843 err: false, 844 }, 845 }, 846 { 847 machine: clusterv1.Machine{ 848 ObjectMeta: metav1.ObjectMeta{ 849 Name: "updated", 850 Namespace: metav1.NamespaceDefault, 851 Finalizers: []string{clusterv1.MachineFinalizer}, 852 }, 853 Spec: clusterv1.MachineSpec{ 854 ClusterName: "test-cluster", 855 InfrastructureRef: corev1.ObjectReference{ 856 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 857 Kind: "GenericInfrastructureMachine", 858 Name: "infra-config1", 859 }, 860 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 861 }, 862 Status: clusterv1.MachineStatus{ 863 NodeRef: &corev1.ObjectReference{ 864 Name: "test", 865 }, 866 ObservedGeneration: 1, 867 }, 868 }, 869 expected: expected{ 870 result: reconcile.Result{}, 871 err: false, 872 }, 873 }, 874 { 875 machine: clusterv1.Machine{ 876 ObjectMeta: metav1.ObjectMeta{ 877 Name: "deleted", 878 Namespace: metav1.NamespaceDefault, 879 Labels: map[string]string{ 880 clusterv1.MachineControlPlaneLabel: "", 881 }, 882 Finalizers: []string{clusterv1.MachineFinalizer}, 883 DeletionTimestamp: &time, 884 }, 885 Spec: clusterv1.MachineSpec{ 886 ClusterName: "test-cluster", 887 InfrastructureRef: corev1.ObjectReference{ 888 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 889 Kind: "GenericInfrastructureMachine", 890 Name: "infra-config1", 891 }, 892 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 893 }, 894 }, 895 expected: expected{ 896 result: reconcile.Result{}, 897 err: false, 898 }, 899 }, 900 } 901 902 for i := range testCases { 903 tc := testCases[i] 904 t.Run("machine should be "+tc.machine.Name, func(t *testing.T) { 905 g := NewWithT(t) 906 907 clientFake := fake.NewClientBuilder().WithObjects( 908 node, 909 &testCluster, 910 &tc.machine, 911 builder.GenericInfrastructureMachineCRD.DeepCopy(), 912 &infraConfig, 913 ).WithStatusSubresource(&clusterv1.Machine{}).WithIndex(&corev1.Node{}, index.NodeProviderIDField, index.NodeByProviderID).Build() 914 915 r := &Reconciler{ 916 Client: clientFake, 917 UnstructuredCachingClient: clientFake, 918 Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, scheme.Scheme, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}), 919 ssaCache: ssa.NewCache(), 920 } 921 922 result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: util.ObjectKey(&tc.machine)}) 923 if tc.expected.err { 924 g.Expect(err).To(HaveOccurred()) 925 } else { 926 g.Expect(err).ToNot(HaveOccurred()) 927 } 928 929 g.Expect(result).To(BeComparableTo(tc.expected.result)) 930 }) 931 } 932 } 933 934 func TestMachineConditions(t *testing.T) { 935 infraConfig := func(ready bool) *unstructured.Unstructured { 936 return &unstructured.Unstructured{ 937 Object: map[string]interface{}{ 938 "kind": "GenericInfrastructureMachine", 939 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 940 "metadata": map[string]interface{}{ 941 "name": "infra-config1", 942 "namespace": metav1.NamespaceDefault, 943 }, 944 "spec": map[string]interface{}{ 945 "providerID": "test://id-1", 946 }, 947 "status": map[string]interface{}{ 948 "ready": ready, 949 "addresses": []interface{}{ 950 map[string]interface{}{ 951 "type": "InternalIP", 952 "address": "10.0.0.1", 953 }, 954 }, 955 }, 956 }, 957 } 958 } 959 960 boostrapConfig := func(ready bool) *unstructured.Unstructured { 961 status := map[string]interface{}{ 962 "ready": ready, 963 } 964 if ready { 965 status["dataSecretName"] = "data" 966 } 967 return &unstructured.Unstructured{ 968 Object: map[string]interface{}{ 969 "kind": "GenericBootstrapConfig", 970 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 971 "metadata": map[string]interface{}{ 972 "name": "bootstrap-config1", 973 "namespace": metav1.NamespaceDefault, 974 }, 975 "status": status, 976 }, 977 } 978 } 979 980 testCluster := clusterv1.Cluster{ 981 ObjectMeta: metav1.ObjectMeta{ 982 Name: "test-cluster", 983 Namespace: metav1.NamespaceDefault, 984 }, 985 } 986 987 machine := clusterv1.Machine{ 988 ObjectMeta: metav1.ObjectMeta{ 989 Name: "blah", 990 Namespace: metav1.NamespaceDefault, 991 Labels: map[string]string{ 992 clusterv1.MachineControlPlaneLabel: "", 993 }, 994 Finalizers: []string{clusterv1.MachineFinalizer}, 995 }, 996 Spec: clusterv1.MachineSpec{ 997 ProviderID: pointer.String("test://id-1"), 998 ClusterName: "test-cluster", 999 InfrastructureRef: corev1.ObjectReference{ 1000 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 1001 Kind: "GenericInfrastructureMachine", 1002 Name: "infra-config1", 1003 }, 1004 Bootstrap: clusterv1.Bootstrap{ 1005 ConfigRef: &corev1.ObjectReference{ 1006 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 1007 Kind: "GenericBootstrapConfig", 1008 Name: "bootstrap-config1", 1009 }, 1010 }, 1011 }, 1012 Status: clusterv1.MachineStatus{ 1013 NodeRef: &corev1.ObjectReference{ 1014 Name: "test", 1015 }, 1016 ObservedGeneration: 1, 1017 }, 1018 } 1019 1020 node := &corev1.Node{ 1021 ObjectMeta: metav1.ObjectMeta{ 1022 Name: "test", 1023 }, 1024 Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, 1025 } 1026 1027 testcases := []struct { 1028 name string 1029 infraReady bool 1030 bootstrapReady bool 1031 beforeFunc func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) 1032 conditionsToAssert []*clusterv1.Condition 1033 }{ 1034 { 1035 name: "all conditions true", 1036 infraReady: true, 1037 bootstrapReady: true, 1038 beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { 1039 // since these conditions are set by an external controller 1040 conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededCondition) 1041 conditions.MarkTrue(m, clusterv1.MachineOwnerRemediatedCondition) 1042 }, 1043 conditionsToAssert: []*clusterv1.Condition{ 1044 conditions.TrueCondition(clusterv1.InfrastructureReadyCondition), 1045 conditions.TrueCondition(clusterv1.BootstrapReadyCondition), 1046 conditions.TrueCondition(clusterv1.MachineOwnerRemediatedCondition), 1047 conditions.TrueCondition(clusterv1.MachineHealthCheckSucceededCondition), 1048 conditions.TrueCondition(clusterv1.ReadyCondition), 1049 }, 1050 }, 1051 { 1052 name: "infra condition consumes reason from the infra config", 1053 infraReady: false, 1054 bootstrapReady: true, 1055 beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { 1056 addConditionsToExternal(infra, clusterv1.Conditions{ 1057 { 1058 Type: clusterv1.ReadyCondition, 1059 Status: corev1.ConditionFalse, 1060 Severity: clusterv1.ConditionSeverityInfo, 1061 Reason: "Custom reason", 1062 }, 1063 }) 1064 }, 1065 conditionsToAssert: []*clusterv1.Condition{ 1066 conditions.FalseCondition(clusterv1.InfrastructureReadyCondition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), 1067 }, 1068 }, 1069 { 1070 name: "infra condition consumes the fallback reason", 1071 infraReady: false, 1072 bootstrapReady: true, 1073 conditionsToAssert: []*clusterv1.Condition{ 1074 conditions.FalseCondition(clusterv1.InfrastructureReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1075 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1076 }, 1077 }, 1078 { 1079 name: "bootstrap condition consumes reason from the bootstrap config", 1080 infraReady: true, 1081 bootstrapReady: false, 1082 beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { 1083 addConditionsToExternal(bootstrap, clusterv1.Conditions{ 1084 { 1085 Type: clusterv1.ReadyCondition, 1086 Status: corev1.ConditionFalse, 1087 Severity: clusterv1.ConditionSeverityInfo, 1088 Reason: "Custom reason", 1089 }, 1090 }) 1091 }, 1092 conditionsToAssert: []*clusterv1.Condition{ 1093 conditions.FalseCondition(clusterv1.BootstrapReadyCondition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), 1094 }, 1095 }, 1096 { 1097 name: "bootstrap condition consumes the fallback reason", 1098 infraReady: true, 1099 bootstrapReady: false, 1100 conditionsToAssert: []*clusterv1.Condition{ 1101 conditions.FalseCondition(clusterv1.BootstrapReadyCondition, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1102 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1103 }, 1104 }, 1105 // Assert summary conditions 1106 // infra condition takes precedence over bootstrap condition in generating summary 1107 { 1108 name: "ready condition summary consumes reason from the infra condition", 1109 infraReady: false, 1110 bootstrapReady: false, 1111 conditionsToAssert: []*clusterv1.Condition{ 1112 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1113 }, 1114 }, 1115 { 1116 name: "ready condition summary consumes reason from the machine owner remediated condition", 1117 infraReady: true, 1118 bootstrapReady: true, 1119 beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { 1120 conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "MHC failed") 1121 }, 1122 conditionsToAssert: []*clusterv1.Condition{ 1123 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "MHC failed"), 1124 }, 1125 }, 1126 { 1127 name: "ready condition summary consumes reason from the MHC succeeded condition", 1128 infraReady: true, 1129 bootstrapReady: true, 1130 beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { 1131 conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, "") 1132 }, 1133 conditionsToAssert: []*clusterv1.Condition{ 1134 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, ""), 1135 }, 1136 }, 1137 } 1138 1139 for _, tt := range testcases { 1140 t.Run(tt.name, func(t *testing.T) { 1141 g := NewWithT(t) 1142 1143 // setup objects 1144 bootstrap := boostrapConfig(tt.bootstrapReady) 1145 infra := infraConfig(tt.infraReady) 1146 m := machine.DeepCopy() 1147 if tt.beforeFunc != nil { 1148 tt.beforeFunc(bootstrap, infra, m) 1149 } 1150 1151 clientFake := fake.NewClientBuilder().WithObjects( 1152 &testCluster, 1153 m, 1154 builder.GenericInfrastructureMachineCRD.DeepCopy(), 1155 infra, 1156 builder.GenericBootstrapConfigCRD.DeepCopy(), 1157 bootstrap, 1158 node, 1159 ). 1160 WithIndex(&corev1.Node{}, index.NodeProviderIDField, index.NodeByProviderID). 1161 WithStatusSubresource(&clusterv1.Machine{}). 1162 Build() 1163 1164 r := &Reconciler{ 1165 Client: clientFake, 1166 UnstructuredCachingClient: clientFake, 1167 Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, scheme.Scheme, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}), 1168 ssaCache: ssa.NewCache(), 1169 } 1170 1171 _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: util.ObjectKey(&machine)}) 1172 g.Expect(err).ToNot(HaveOccurred()) 1173 1174 m = &clusterv1.Machine{} 1175 g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(&machine), m)).ToNot(HaveOccurred()) 1176 1177 assertConditions(t, m, tt.conditionsToAssert...) 1178 }) 1179 } 1180 } 1181 1182 func TestReconcileDeleteExternal(t *testing.T) { 1183 testCluster := &clusterv1.Cluster{ 1184 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 1185 } 1186 1187 bootstrapConfig := &unstructured.Unstructured{ 1188 Object: map[string]interface{}{ 1189 "kind": "BootstrapConfig", 1190 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 1191 "metadata": map[string]interface{}{ 1192 "name": "delete-bootstrap", 1193 "namespace": metav1.NamespaceDefault, 1194 }, 1195 }, 1196 } 1197 1198 machine := &clusterv1.Machine{ 1199 ObjectMeta: metav1.ObjectMeta{ 1200 Name: "delete", 1201 Namespace: metav1.NamespaceDefault, 1202 }, 1203 Spec: clusterv1.MachineSpec{ 1204 ClusterName: "test-cluster", 1205 Bootstrap: clusterv1.Bootstrap{ 1206 ConfigRef: &corev1.ObjectReference{ 1207 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 1208 Kind: "BootstrapConfig", 1209 Name: "delete-bootstrap", 1210 }, 1211 }, 1212 }, 1213 } 1214 1215 testCases := []struct { 1216 name string 1217 bootstrapExists bool 1218 expectError bool 1219 expected *unstructured.Unstructured 1220 }{ 1221 { 1222 name: "should continue to reconcile delete of external refs if exists", 1223 bootstrapExists: true, 1224 expected: &unstructured.Unstructured{ 1225 Object: map[string]interface{}{ 1226 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 1227 "kind": "BootstrapConfig", 1228 "metadata": map[string]interface{}{ 1229 "name": "delete-bootstrap", 1230 "namespace": metav1.NamespaceDefault, 1231 "resourceVersion": "999", 1232 }, 1233 }, 1234 }, 1235 expectError: false, 1236 }, 1237 { 1238 name: "should no longer reconcile deletion of external refs since it doesn't exist", 1239 bootstrapExists: false, 1240 expected: nil, 1241 expectError: false, 1242 }, 1243 } 1244 1245 for _, tc := range testCases { 1246 t.Run(tc.name, func(t *testing.T) { 1247 g := NewWithT(t) 1248 1249 objs := []client.Object{testCluster, machine} 1250 1251 if tc.bootstrapExists { 1252 objs = append(objs, bootstrapConfig) 1253 } 1254 1255 c := fake.NewClientBuilder().WithObjects(objs...).Build() 1256 r := &Reconciler{ 1257 Client: c, 1258 UnstructuredCachingClient: c, 1259 } 1260 1261 obj, err := r.reconcileDeleteExternal(ctx, testCluster, machine, machine.Spec.Bootstrap.ConfigRef) 1262 if tc.expectError { 1263 g.Expect(err).To(HaveOccurred()) 1264 } else { 1265 g.Expect(err).ToNot(HaveOccurred()) 1266 } 1267 g.Expect(obj).To(BeComparableTo(tc.expected)) 1268 }) 1269 } 1270 } 1271 1272 func TestRemoveMachineFinalizerAfterDeleteReconcile(t *testing.T) { 1273 g := NewWithT(t) 1274 1275 dt := metav1.Now() 1276 1277 testCluster := &clusterv1.Cluster{ 1278 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 1279 } 1280 1281 m := &clusterv1.Machine{ 1282 ObjectMeta: metav1.ObjectMeta{ 1283 Name: "delete123", 1284 Namespace: metav1.NamespaceDefault, 1285 Finalizers: []string{clusterv1.MachineFinalizer, "test"}, 1286 DeletionTimestamp: &dt, 1287 }, 1288 Spec: clusterv1.MachineSpec{ 1289 ClusterName: "test-cluster", 1290 InfrastructureRef: corev1.ObjectReference{ 1291 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 1292 Kind: "GenericInfrastructureMachine", 1293 Name: "infra-config1", 1294 }, 1295 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1296 }, 1297 } 1298 key := client.ObjectKey{Namespace: m.Namespace, Name: m.Name} 1299 c := fake.NewClientBuilder().WithObjects(testCluster, m).WithStatusSubresource(&clusterv1.Machine{}).Build() 1300 mr := &Reconciler{ 1301 Client: c, 1302 UnstructuredCachingClient: c, 1303 } 1304 _, err := mr.Reconcile(ctx, reconcile.Request{NamespacedName: key}) 1305 g.Expect(err).ToNot(HaveOccurred()) 1306 1307 var actual clusterv1.Machine 1308 g.Expect(mr.Client.Get(ctx, key, &actual)).To(Succeed()) 1309 g.Expect(actual.ObjectMeta.Finalizers).To(Equal([]string{"test"})) 1310 } 1311 1312 func TestIsNodeDrainedAllowed(t *testing.T) { 1313 testCluster := &clusterv1.Cluster{ 1314 TypeMeta: metav1.TypeMeta{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String()}, 1315 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 1316 } 1317 1318 tests := []struct { 1319 name string 1320 machine *clusterv1.Machine 1321 expected bool 1322 }{ 1323 { 1324 name: "Exclude node draining annotation exists", 1325 machine: &clusterv1.Machine{ 1326 ObjectMeta: metav1.ObjectMeta{ 1327 Name: "test-machine", 1328 Namespace: metav1.NamespaceDefault, 1329 Finalizers: []string{clusterv1.MachineFinalizer}, 1330 Annotations: map[string]string{clusterv1.ExcludeNodeDrainingAnnotation: "existed!!"}, 1331 }, 1332 Spec: clusterv1.MachineSpec{ 1333 ClusterName: "test-cluster", 1334 InfrastructureRef: corev1.ObjectReference{}, 1335 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1336 }, 1337 Status: clusterv1.MachineStatus{}, 1338 }, 1339 expected: false, 1340 }, 1341 { 1342 name: "Node draining timeout is over", 1343 machine: &clusterv1.Machine{ 1344 ObjectMeta: metav1.ObjectMeta{ 1345 Name: "test-machine", 1346 Namespace: metav1.NamespaceDefault, 1347 Finalizers: []string{clusterv1.MachineFinalizer}, 1348 }, 1349 Spec: clusterv1.MachineSpec{ 1350 ClusterName: "test-cluster", 1351 InfrastructureRef: corev1.ObjectReference{}, 1352 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1353 NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, 1354 }, 1355 1356 Status: clusterv1.MachineStatus{ 1357 Conditions: clusterv1.Conditions{ 1358 { 1359 Type: clusterv1.DrainingSucceededCondition, 1360 Status: corev1.ConditionFalse, 1361 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 70)).UTC()}, 1362 }, 1363 }, 1364 }, 1365 }, 1366 expected: false, 1367 }, 1368 { 1369 name: "Node draining timeout is not yet over", 1370 machine: &clusterv1.Machine{ 1371 ObjectMeta: metav1.ObjectMeta{ 1372 Name: "test-machine", 1373 Namespace: metav1.NamespaceDefault, 1374 Finalizers: []string{clusterv1.MachineFinalizer}, 1375 }, 1376 Spec: clusterv1.MachineSpec{ 1377 ClusterName: "test-cluster", 1378 InfrastructureRef: corev1.ObjectReference{}, 1379 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1380 NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, 1381 }, 1382 Status: clusterv1.MachineStatus{ 1383 Conditions: clusterv1.Conditions{ 1384 { 1385 Type: clusterv1.DrainingSucceededCondition, 1386 Status: corev1.ConditionFalse, 1387 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, 1388 }, 1389 }, 1390 }, 1391 }, 1392 expected: true, 1393 }, 1394 { 1395 name: "NodeDrainTimeout option is set to its default value 0", 1396 machine: &clusterv1.Machine{ 1397 ObjectMeta: metav1.ObjectMeta{ 1398 Name: "test-machine", 1399 Namespace: metav1.NamespaceDefault, 1400 Finalizers: []string{clusterv1.MachineFinalizer}, 1401 }, 1402 Spec: clusterv1.MachineSpec{ 1403 ClusterName: "test-cluster", 1404 InfrastructureRef: corev1.ObjectReference{}, 1405 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1406 }, 1407 Status: clusterv1.MachineStatus{ 1408 Conditions: clusterv1.Conditions{ 1409 { 1410 Type: clusterv1.DrainingSucceededCondition, 1411 Status: corev1.ConditionFalse, 1412 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, 1413 }, 1414 }, 1415 }, 1416 }, 1417 expected: true, 1418 }, 1419 } 1420 for _, tt := range tests { 1421 t.Run(tt.name, func(t *testing.T) { 1422 g := NewWithT(t) 1423 1424 var objs []client.Object 1425 objs = append(objs, testCluster, tt.machine) 1426 1427 c := fake.NewClientBuilder().WithObjects(objs...).Build() 1428 r := &Reconciler{ 1429 Client: c, 1430 UnstructuredCachingClient: c, 1431 } 1432 1433 got := r.isNodeDrainAllowed(tt.machine) 1434 g.Expect(got).To(Equal(tt.expected)) 1435 }) 1436 } 1437 } 1438 1439 func TestIsNodeVolumeDetachingAllowed(t *testing.T) { 1440 testCluster := &clusterv1.Cluster{ 1441 TypeMeta: metav1.TypeMeta{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String()}, 1442 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 1443 } 1444 1445 tests := []struct { 1446 name string 1447 machine *clusterv1.Machine 1448 expected bool 1449 }{ 1450 { 1451 name: "Exclude wait node volume detaching annotation exists", 1452 machine: &clusterv1.Machine{ 1453 ObjectMeta: metav1.ObjectMeta{ 1454 Name: "test-machine", 1455 Namespace: metav1.NamespaceDefault, 1456 Finalizers: []string{clusterv1.MachineFinalizer}, 1457 Annotations: map[string]string{clusterv1.ExcludeWaitForNodeVolumeDetachAnnotation: "existed!!"}, 1458 }, 1459 Spec: clusterv1.MachineSpec{ 1460 ClusterName: "test-cluster", 1461 InfrastructureRef: corev1.ObjectReference{}, 1462 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1463 }, 1464 Status: clusterv1.MachineStatus{}, 1465 }, 1466 expected: false, 1467 }, 1468 { 1469 name: "Volume detach timeout is over", 1470 machine: &clusterv1.Machine{ 1471 ObjectMeta: metav1.ObjectMeta{ 1472 Name: "test-machine", 1473 Namespace: metav1.NamespaceDefault, 1474 Finalizers: []string{clusterv1.MachineFinalizer}, 1475 }, 1476 Spec: clusterv1.MachineSpec{ 1477 ClusterName: "test-cluster", 1478 InfrastructureRef: corev1.ObjectReference{}, 1479 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1480 NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 30}, 1481 }, 1482 1483 Status: clusterv1.MachineStatus{ 1484 Conditions: clusterv1.Conditions{ 1485 { 1486 Type: clusterv1.VolumeDetachSucceededCondition, 1487 Status: corev1.ConditionFalse, 1488 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 60)).UTC()}, 1489 }, 1490 }, 1491 }, 1492 }, 1493 expected: false, 1494 }, 1495 { 1496 name: "Volume detach timeout is not yet over", 1497 machine: &clusterv1.Machine{ 1498 ObjectMeta: metav1.ObjectMeta{ 1499 Name: "test-machine", 1500 Namespace: metav1.NamespaceDefault, 1501 Finalizers: []string{clusterv1.MachineFinalizer}, 1502 }, 1503 Spec: clusterv1.MachineSpec{ 1504 ClusterName: "test-cluster", 1505 InfrastructureRef: corev1.ObjectReference{}, 1506 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1507 NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 60}, 1508 }, 1509 Status: clusterv1.MachineStatus{ 1510 Conditions: clusterv1.Conditions{ 1511 { 1512 Type: clusterv1.VolumeDetachSucceededCondition, 1513 Status: corev1.ConditionFalse, 1514 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, 1515 }, 1516 }, 1517 }, 1518 }, 1519 expected: true, 1520 }, 1521 { 1522 name: "Volume detach timeout option is set to it's default value 0", 1523 machine: &clusterv1.Machine{ 1524 ObjectMeta: metav1.ObjectMeta{ 1525 Name: "test-machine", 1526 Namespace: metav1.NamespaceDefault, 1527 Finalizers: []string{clusterv1.MachineFinalizer}, 1528 }, 1529 Spec: clusterv1.MachineSpec{ 1530 ClusterName: "test-cluster", 1531 InfrastructureRef: corev1.ObjectReference{}, 1532 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1533 }, 1534 Status: clusterv1.MachineStatus{ 1535 Conditions: clusterv1.Conditions{ 1536 { 1537 Type: clusterv1.VolumeDetachSucceededCondition, 1538 Status: corev1.ConditionFalse, 1539 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, 1540 }, 1541 }, 1542 }, 1543 }, 1544 expected: true, 1545 }, 1546 } 1547 for _, tt := range tests { 1548 t.Run(tt.name, func(t *testing.T) { 1549 g := NewWithT(t) 1550 1551 var objs []client.Object 1552 objs = append(objs, testCluster, tt.machine) 1553 1554 c := fake.NewClientBuilder().WithObjects(objs...).Build() 1555 r := &Reconciler{ 1556 Client: c, 1557 UnstructuredCachingClient: c, 1558 } 1559 1560 got := r.isNodeVolumeDetachingAllowed(tt.machine) 1561 g.Expect(got).To(Equal(tt.expected)) 1562 }) 1563 } 1564 } 1565 1566 func TestIsDeleteNodeAllowed(t *testing.T) { 1567 deletionts := metav1.Now() 1568 1569 testCases := []struct { 1570 name string 1571 cluster *clusterv1.Cluster 1572 machine *clusterv1.Machine 1573 expectedError error 1574 }{ 1575 { 1576 name: "machine without nodeRef", 1577 cluster: &clusterv1.Cluster{ 1578 ObjectMeta: metav1.ObjectMeta{ 1579 Name: "test-cluster", 1580 Namespace: metav1.NamespaceDefault, 1581 }, 1582 }, 1583 machine: &clusterv1.Machine{ 1584 ObjectMeta: metav1.ObjectMeta{ 1585 Name: "created", 1586 Namespace: metav1.NamespaceDefault, 1587 Labels: map[string]string{ 1588 clusterv1.ClusterNameLabel: "test-cluster", 1589 }, 1590 Finalizers: []string{clusterv1.MachineFinalizer}, 1591 }, 1592 Spec: clusterv1.MachineSpec{ 1593 ClusterName: "test-cluster", 1594 InfrastructureRef: corev1.ObjectReference{}, 1595 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1596 }, 1597 Status: clusterv1.MachineStatus{}, 1598 }, 1599 expectedError: errNilNodeRef, 1600 }, 1601 { 1602 name: "no control plane members", 1603 cluster: &clusterv1.Cluster{ 1604 ObjectMeta: metav1.ObjectMeta{ 1605 Name: "test-cluster", 1606 Namespace: metav1.NamespaceDefault, 1607 }, 1608 }, 1609 machine: &clusterv1.Machine{ 1610 ObjectMeta: metav1.ObjectMeta{ 1611 Name: "created", 1612 Namespace: metav1.NamespaceDefault, 1613 Labels: map[string]string{ 1614 clusterv1.ClusterNameLabel: "test-cluster", 1615 }, 1616 Finalizers: []string{clusterv1.MachineFinalizer}, 1617 }, 1618 Spec: clusterv1.MachineSpec{ 1619 ClusterName: "test-cluster", 1620 InfrastructureRef: corev1.ObjectReference{}, 1621 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1622 }, 1623 Status: clusterv1.MachineStatus{ 1624 NodeRef: &corev1.ObjectReference{ 1625 Name: "test", 1626 }, 1627 }, 1628 }, 1629 expectedError: errNoControlPlaneNodes, 1630 }, 1631 { 1632 name: "is last control plane member", 1633 cluster: &clusterv1.Cluster{ 1634 ObjectMeta: metav1.ObjectMeta{ 1635 Name: "test-cluster", 1636 Namespace: metav1.NamespaceDefault, 1637 }, 1638 }, 1639 machine: &clusterv1.Machine{ 1640 ObjectMeta: metav1.ObjectMeta{ 1641 Name: "created", 1642 Namespace: metav1.NamespaceDefault, 1643 Labels: map[string]string{ 1644 clusterv1.ClusterNameLabel: "test-cluster", 1645 clusterv1.MachineControlPlaneLabel: "", 1646 }, 1647 Finalizers: []string{clusterv1.MachineFinalizer}, 1648 DeletionTimestamp: &metav1.Time{Time: time.Now().UTC()}, 1649 }, 1650 Spec: clusterv1.MachineSpec{ 1651 ClusterName: "test-cluster", 1652 InfrastructureRef: corev1.ObjectReference{}, 1653 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1654 }, 1655 Status: clusterv1.MachineStatus{ 1656 NodeRef: &corev1.ObjectReference{ 1657 Name: "test", 1658 }, 1659 }, 1660 }, 1661 expectedError: errNoControlPlaneNodes, 1662 }, 1663 { 1664 name: "has nodeRef and control plane is healthy", 1665 cluster: &clusterv1.Cluster{ 1666 ObjectMeta: metav1.ObjectMeta{ 1667 Name: "test-cluster", 1668 Namespace: metav1.NamespaceDefault, 1669 }, 1670 }, 1671 machine: &clusterv1.Machine{ 1672 ObjectMeta: metav1.ObjectMeta{ 1673 Name: "created", 1674 Namespace: metav1.NamespaceDefault, 1675 Labels: map[string]string{ 1676 clusterv1.ClusterNameLabel: "test-cluster", 1677 }, 1678 Finalizers: []string{clusterv1.MachineFinalizer}, 1679 }, 1680 Spec: clusterv1.MachineSpec{ 1681 ClusterName: "test-cluster", 1682 InfrastructureRef: corev1.ObjectReference{}, 1683 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1684 }, 1685 Status: clusterv1.MachineStatus{ 1686 NodeRef: &corev1.ObjectReference{ 1687 Name: "test", 1688 }, 1689 }, 1690 }, 1691 expectedError: nil, 1692 }, 1693 { 1694 name: "has nodeRef and cluster is being deleted", 1695 cluster: &clusterv1.Cluster{ 1696 ObjectMeta: metav1.ObjectMeta{ 1697 Name: "test-cluster", 1698 Namespace: metav1.NamespaceDefault, 1699 DeletionTimestamp: &deletionts, 1700 Finalizers: []string{clusterv1.ClusterFinalizer}, 1701 }, 1702 }, 1703 machine: &clusterv1.Machine{}, 1704 expectedError: errClusterIsBeingDeleted, 1705 }, 1706 { 1707 name: "has nodeRef and control plane is healthy and externally managed", 1708 cluster: &clusterv1.Cluster{ 1709 ObjectMeta: metav1.ObjectMeta{ 1710 Name: "test-cluster", 1711 Namespace: metav1.NamespaceDefault, 1712 }, 1713 Spec: clusterv1.ClusterSpec{ 1714 ControlPlaneRef: &corev1.ObjectReference{ 1715 APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", 1716 Kind: "AWSManagedControlPlane", 1717 Name: "test-cluster", 1718 Namespace: "test-cluster", 1719 }, 1720 }, 1721 }, 1722 machine: &clusterv1.Machine{ 1723 ObjectMeta: metav1.ObjectMeta{ 1724 Name: "created", 1725 Namespace: metav1.NamespaceDefault, 1726 Labels: map[string]string{ 1727 clusterv1.ClusterNameLabel: "test-cluster", 1728 }, 1729 Finalizers: []string{clusterv1.MachineFinalizer}, 1730 }, 1731 Spec: clusterv1.MachineSpec{ 1732 ClusterName: "test-cluster", 1733 InfrastructureRef: corev1.ObjectReference{}, 1734 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1735 }, 1736 Status: clusterv1.MachineStatus{ 1737 NodeRef: &corev1.ObjectReference{ 1738 Name: "test", 1739 }, 1740 }, 1741 }, 1742 expectedError: nil, 1743 }, 1744 { 1745 name: "has nodeRef, control plane is being deleted and not externally managed", 1746 cluster: &clusterv1.Cluster{ 1747 ObjectMeta: metav1.ObjectMeta{ 1748 Name: "test-cluster", 1749 Namespace: metav1.NamespaceDefault, 1750 }, 1751 Spec: clusterv1.ClusterSpec{ 1752 ControlPlaneRef: &corev1.ObjectReference{ 1753 APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", 1754 Kind: "AWSManagedControlPlane", 1755 Name: "test-cluster-2", 1756 Namespace: "test-cluster", 1757 }, 1758 }, 1759 }, 1760 machine: &clusterv1.Machine{ 1761 ObjectMeta: metav1.ObjectMeta{ 1762 Name: "created", 1763 Namespace: metav1.NamespaceDefault, 1764 Labels: map[string]string{ 1765 clusterv1.ClusterNameLabel: "test-cluster", 1766 }, 1767 Finalizers: []string{clusterv1.MachineFinalizer}, 1768 }, 1769 Spec: clusterv1.MachineSpec{ 1770 ClusterName: "test-cluster", 1771 InfrastructureRef: corev1.ObjectReference{}, 1772 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1773 }, 1774 Status: clusterv1.MachineStatus{ 1775 NodeRef: &corev1.ObjectReference{ 1776 Name: "test", 1777 }, 1778 }, 1779 }, 1780 expectedError: errControlPlaneIsBeingDeleted, 1781 }, 1782 { 1783 name: "has nodeRef, control plane is being deleted and is externally managed", 1784 cluster: &clusterv1.Cluster{ 1785 ObjectMeta: metav1.ObjectMeta{ 1786 Name: "test-cluster", 1787 Namespace: metav1.NamespaceDefault, 1788 }, 1789 Spec: clusterv1.ClusterSpec{ 1790 ControlPlaneRef: &corev1.ObjectReference{ 1791 APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", 1792 Kind: "AWSManagedControlPlane", 1793 Name: "test-cluster-3", 1794 Namespace: "test-cluster", 1795 }, 1796 }, 1797 }, 1798 machine: &clusterv1.Machine{ 1799 ObjectMeta: metav1.ObjectMeta{ 1800 Name: "created", 1801 Namespace: metav1.NamespaceDefault, 1802 Labels: map[string]string{ 1803 clusterv1.ClusterNameLabel: "test-cluster", 1804 }, 1805 Finalizers: []string{clusterv1.MachineFinalizer}, 1806 }, 1807 Spec: clusterv1.MachineSpec{ 1808 ClusterName: "test-cluster", 1809 InfrastructureRef: corev1.ObjectReference{}, 1810 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1811 }, 1812 Status: clusterv1.MachineStatus{ 1813 NodeRef: &corev1.ObjectReference{ 1814 Name: "test", 1815 }, 1816 }, 1817 }, 1818 expectedError: errControlPlaneIsBeingDeleted, 1819 }, 1820 } 1821 1822 emp := &unstructured.Unstructured{ 1823 Object: map[string]interface{}{ 1824 "status": map[string]interface{}{ 1825 "externalManagedControlPlane": true, 1826 }, 1827 }, 1828 } 1829 emp.SetAPIVersion("controlplane.cluster.x-k8s.io/v1beta1") 1830 emp.SetKind("AWSManagedControlPlane") 1831 emp.SetName("test-cluster") 1832 emp.SetNamespace("test-cluster") 1833 1834 mcpBeingDeleted := &unstructured.Unstructured{ 1835 Object: map[string]interface{}{}, 1836 } 1837 mcpBeingDeleted.SetAPIVersion("controlplane.cluster.x-k8s.io/v1beta1") 1838 mcpBeingDeleted.SetKind("AWSManagedControlPlane") 1839 mcpBeingDeleted.SetName("test-cluster-2") 1840 mcpBeingDeleted.SetNamespace("test-cluster") 1841 mcpBeingDeleted.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) 1842 mcpBeingDeleted.SetFinalizers([]string{"block-deletion"}) 1843 1844 empBeingDeleted := &unstructured.Unstructured{ 1845 Object: map[string]interface{}{ 1846 "status": map[string]interface{}{ 1847 "externalManagedControlPlane": true, 1848 }, 1849 }, 1850 } 1851 empBeingDeleted.SetAPIVersion("controlplane.cluster.x-k8s.io/v1beta1") 1852 empBeingDeleted.SetKind("AWSManagedControlPlane") 1853 empBeingDeleted.SetName("test-cluster-3") 1854 empBeingDeleted.SetNamespace("test-cluster") 1855 empBeingDeleted.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) 1856 empBeingDeleted.SetFinalizers([]string{"block-deletion"}) 1857 1858 for _, tc := range testCases { 1859 t.Run(tc.name, func(t *testing.T) { 1860 g := NewWithT(t) 1861 1862 m1 := &clusterv1.Machine{ 1863 ObjectMeta: metav1.ObjectMeta{ 1864 Name: "cp1", 1865 Namespace: metav1.NamespaceDefault, 1866 Labels: map[string]string{ 1867 clusterv1.ClusterNameLabel: "test-cluster", 1868 }, 1869 Finalizers: []string{clusterv1.MachineFinalizer}, 1870 }, 1871 Spec: clusterv1.MachineSpec{ 1872 ClusterName: "test-cluster", 1873 InfrastructureRef: corev1.ObjectReference{}, 1874 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1875 }, 1876 Status: clusterv1.MachineStatus{ 1877 NodeRef: &corev1.ObjectReference{ 1878 Name: "test1", 1879 }, 1880 }, 1881 } 1882 m2 := &clusterv1.Machine{ 1883 ObjectMeta: metav1.ObjectMeta{ 1884 Name: "cp2", 1885 Namespace: metav1.NamespaceDefault, 1886 Labels: map[string]string{ 1887 clusterv1.ClusterNameLabel: "test-cluster", 1888 }, 1889 Finalizers: []string{clusterv1.MachineFinalizer}, 1890 }, 1891 Spec: clusterv1.MachineSpec{ 1892 ClusterName: "test-cluster", 1893 InfrastructureRef: corev1.ObjectReference{}, 1894 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 1895 }, 1896 Status: clusterv1.MachineStatus{ 1897 NodeRef: &corev1.ObjectReference{ 1898 Name: "test2", 1899 }, 1900 }, 1901 } 1902 // For isDeleteNodeAllowed to be true we assume a healthy control plane. 1903 if tc.expectedError == nil { 1904 m1.Labels[clusterv1.MachineControlPlaneLabel] = "" 1905 m2.Labels[clusterv1.MachineControlPlaneLabel] = "" 1906 } 1907 1908 c := fake.NewClientBuilder().WithObjects( 1909 tc.cluster, 1910 tc.machine, 1911 m1, 1912 m2, 1913 emp, 1914 mcpBeingDeleted, 1915 empBeingDeleted, 1916 ).Build() 1917 mr := &Reconciler{ 1918 Client: c, 1919 UnstructuredCachingClient: c, 1920 } 1921 1922 err := mr.isDeleteNodeAllowed(ctx, tc.cluster, tc.machine) 1923 if tc.expectedError == nil { 1924 g.Expect(err).ToNot(HaveOccurred()) 1925 } else { 1926 g.Expect(err).To(Equal(tc.expectedError)) 1927 } 1928 }) 1929 } 1930 } 1931 1932 func TestNodeToMachine(t *testing.T) { 1933 g := NewWithT(t) 1934 ns, err := env.CreateNamespace(ctx, "test-node-to-machine") 1935 g.Expect(err).ToNot(HaveOccurred()) 1936 1937 // Set up cluster, machines and nodes to test against. 1938 infraMachine := &unstructured.Unstructured{ 1939 Object: map[string]interface{}{ 1940 "kind": "GenericInfrastructureMachine", 1941 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 1942 "metadata": map[string]interface{}{ 1943 "name": "infra-config1", 1944 "namespace": ns.Name, 1945 }, 1946 "spec": map[string]interface{}{ 1947 "providerID": "test://id-1", 1948 }, 1949 "status": map[string]interface{}{ 1950 "ready": true, 1951 "addresses": []interface{}{ 1952 map[string]interface{}{ 1953 "type": "InternalIP", 1954 "address": "10.0.0.1", 1955 }, 1956 }, 1957 }, 1958 }, 1959 } 1960 1961 infraMachine2 := &unstructured.Unstructured{ 1962 Object: map[string]interface{}{ 1963 "kind": "GenericInfrastructureMachine", 1964 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 1965 "metadata": map[string]interface{}{ 1966 "name": "infra-config2", 1967 "namespace": ns.Name, 1968 }, 1969 "spec": map[string]interface{}{ 1970 "providerID": "test://id-2", 1971 }, 1972 "status": map[string]interface{}{ 1973 "ready": true, 1974 "addresses": []interface{}{ 1975 map[string]interface{}{ 1976 "type": "InternalIP", 1977 "address": "10.0.0.1", 1978 }, 1979 }, 1980 }, 1981 }, 1982 } 1983 1984 defaultBootstrap := &unstructured.Unstructured{ 1985 Object: map[string]interface{}{ 1986 "kind": "GenericBootstrapConfig", 1987 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 1988 "metadata": map[string]interface{}{ 1989 "name": "bootstrap-config-machinereconcile", 1990 "namespace": ns.Name, 1991 }, 1992 "spec": map[string]interface{}{}, 1993 "status": map[string]interface{}{}, 1994 }, 1995 } 1996 1997 testCluster := &clusterv1.Cluster{ 1998 ObjectMeta: metav1.ObjectMeta{ 1999 GenerateName: "machine-reconcile-", 2000 Namespace: ns.Name, 2001 }, 2002 } 2003 2004 targetNode := &corev1.Node{ 2005 ObjectMeta: metav1.ObjectMeta{ 2006 Name: "test-node-to-machine-1", 2007 }, 2008 Spec: corev1.NodeSpec{ 2009 ProviderID: "test://id-1", 2010 }, 2011 } 2012 2013 randomNode := &corev1.Node{ 2014 ObjectMeta: metav1.ObjectMeta{ 2015 Name: "test-node-to-machine-node-2", 2016 }, 2017 Spec: corev1.NodeSpec{ 2018 ProviderID: "test://id-2", 2019 }, 2020 } 2021 2022 g.Expect(env.Create(ctx, testCluster)).To(Succeed()) 2023 g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) 2024 g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed()) 2025 g.Expect(env.Create(ctx, targetNode)).To(Succeed()) 2026 g.Expect(env.Create(ctx, randomNode)).To(Succeed()) 2027 g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) 2028 g.Expect(env.Create(ctx, infraMachine2)).To(Succeed()) 2029 2030 defer func(do ...client.Object) { 2031 g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) 2032 }(ns, testCluster, defaultBootstrap) 2033 2034 // Patch infra expectedMachine ready 2035 patchHelper, err := patch.NewHelper(infraMachine, env) 2036 g.Expect(err).ShouldNot(HaveOccurred()) 2037 g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) 2038 g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) 2039 2040 // Patch infra randomMachine ready 2041 patchHelper, err = patch.NewHelper(infraMachine2, env) 2042 g.Expect(err).ShouldNot(HaveOccurred()) 2043 g.Expect(unstructured.SetNestedField(infraMachine2.Object, true, "status", "ready")).To(Succeed()) 2044 g.Expect(patchHelper.Patch(ctx, infraMachine2, patch.WithStatusObservedGeneration{})).To(Succeed()) 2045 2046 // Patch bootstrap ready 2047 patchHelper, err = patch.NewHelper(defaultBootstrap, env) 2048 g.Expect(err).ShouldNot(HaveOccurred()) 2049 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).To(Succeed()) 2050 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, "secretData", "status", "dataSecretName")).To(Succeed()) 2051 g.Expect(patchHelper.Patch(ctx, defaultBootstrap, patch.WithStatusObservedGeneration{})).To(Succeed()) 2052 2053 expectedMachine := &clusterv1.Machine{ 2054 ObjectMeta: metav1.ObjectMeta{ 2055 GenerateName: "machine-created-", 2056 Namespace: ns.Name, 2057 Labels: map[string]string{ 2058 clusterv1.MachineControlPlaneLabel: "", 2059 }, 2060 }, 2061 Spec: clusterv1.MachineSpec{ 2062 ClusterName: testCluster.Name, 2063 InfrastructureRef: corev1.ObjectReference{ 2064 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 2065 Kind: "GenericInfrastructureMachine", 2066 Name: "infra-config1", 2067 }, 2068 Bootstrap: clusterv1.Bootstrap{ 2069 ConfigRef: &corev1.ObjectReference{ 2070 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 2071 Kind: "GenericBootstrapConfig", 2072 Name: "bootstrap-config-machinereconcile", 2073 }, 2074 }, 2075 }, 2076 } 2077 2078 g.Expect(env.Create(ctx, expectedMachine)).To(Succeed()) 2079 defer func() { 2080 g.Expect(env.Cleanup(ctx, expectedMachine)).To(Succeed()) 2081 }() 2082 2083 // Wait for reconciliation to happen. 2084 // Since infra and bootstrap objects are ready, a nodeRef will be assigned during node reconciliation. 2085 key := client.ObjectKey{Name: expectedMachine.Name, Namespace: expectedMachine.Namespace} 2086 g.Eventually(func() bool { 2087 if err := env.Get(ctx, key, expectedMachine); err != nil { 2088 return false 2089 } 2090 return expectedMachine.Status.NodeRef != nil 2091 }, timeout).Should(BeTrue()) 2092 2093 randomMachine := &clusterv1.Machine{ 2094 ObjectMeta: metav1.ObjectMeta{ 2095 GenerateName: "machine-created-", 2096 Namespace: ns.Name, 2097 Labels: map[string]string{ 2098 clusterv1.MachineControlPlaneLabel: "", 2099 }, 2100 }, 2101 Spec: clusterv1.MachineSpec{ 2102 ClusterName: testCluster.Name, 2103 InfrastructureRef: corev1.ObjectReference{ 2104 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 2105 Kind: "GenericInfrastructureMachine", 2106 Name: "infra-config2", 2107 }, 2108 Bootstrap: clusterv1.Bootstrap{ 2109 ConfigRef: &corev1.ObjectReference{ 2110 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 2111 Kind: "GenericBootstrapConfig", 2112 Name: "bootstrap-config-machinereconcile", 2113 }, 2114 }, 2115 }, 2116 } 2117 2118 g.Expect(env.Create(ctx, randomMachine)).To(Succeed()) 2119 defer func() { 2120 g.Expect(env.Cleanup(ctx, randomMachine)).To(Succeed()) 2121 }() 2122 2123 // Wait for reconciliation to happen. 2124 // Since infra and bootstrap objects are ready, a nodeRef will be assigned during node reconciliation. 2125 key = client.ObjectKey{Name: randomMachine.Name, Namespace: randomMachine.Namespace} 2126 g.Eventually(func() bool { 2127 if err := env.Get(ctx, key, randomMachine); err != nil { 2128 return false 2129 } 2130 return randomMachine.Status.NodeRef != nil 2131 }, timeout).Should(BeTrue()) 2132 2133 // Fake nodes for actual test of nodeToMachine. 2134 fakeNodes := []*corev1.Node{ 2135 // None annotations. 2136 { 2137 ObjectMeta: metav1.ObjectMeta{ 2138 Name: targetNode.GetName(), 2139 }, 2140 Spec: corev1.NodeSpec{ 2141 ProviderID: targetNode.Spec.ProviderID, 2142 }, 2143 }, 2144 // ClusterNameAnnotation annotation. 2145 { 2146 ObjectMeta: metav1.ObjectMeta{ 2147 Name: targetNode.GetName(), 2148 Annotations: map[string]string{ 2149 clusterv1.ClusterNameAnnotation: testCluster.GetName(), 2150 }, 2151 }, 2152 Spec: corev1.NodeSpec{ 2153 ProviderID: targetNode.Spec.ProviderID, 2154 }, 2155 }, 2156 // ClusterNamespaceAnnotation annotation. 2157 { 2158 ObjectMeta: metav1.ObjectMeta{ 2159 Name: targetNode.GetName(), 2160 Annotations: map[string]string{ 2161 clusterv1.ClusterNamespaceAnnotation: ns.GetName(), 2162 }, 2163 }, 2164 Spec: corev1.NodeSpec{ 2165 ProviderID: targetNode.Spec.ProviderID, 2166 }, 2167 }, 2168 // Both annotations. 2169 { 2170 ObjectMeta: metav1.ObjectMeta{ 2171 Name: targetNode.GetName(), 2172 Annotations: map[string]string{ 2173 clusterv1.ClusterNameAnnotation: testCluster.GetName(), 2174 clusterv1.ClusterNamespaceAnnotation: ns.GetName(), 2175 }, 2176 }, 2177 Spec: corev1.NodeSpec{ 2178 ProviderID: targetNode.Spec.ProviderID, 2179 }, 2180 }, 2181 } 2182 2183 r := &Reconciler{ 2184 Client: env, 2185 UnstructuredCachingClient: env, 2186 } 2187 for _, node := range fakeNodes { 2188 request := r.nodeToMachine(ctx, node) 2189 g.Expect(request).To(BeEquivalentTo([]reconcile.Request{ 2190 { 2191 NamespacedName: client.ObjectKeyFromObject(expectedMachine), 2192 }, 2193 })) 2194 } 2195 } 2196 2197 type fakeClientWithNodeDeletionErr struct { 2198 client.Client 2199 } 2200 2201 func (fc fakeClientWithNodeDeletionErr) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { 2202 gvk, err := apiutil.GVKForObject(obj, fakeScheme) 2203 if err == nil && gvk.Kind == "Node" { 2204 return fmt.Errorf("fake error") 2205 } 2206 return fc.Client.Delete(ctx, obj, opts...) 2207 } 2208 2209 func TestNodeDeletion(t *testing.T) { 2210 g := NewWithT(t) 2211 2212 deletionTime := metav1.Now().Add(-1 * time.Second) 2213 2214 testCluster := clusterv1.Cluster{ 2215 ObjectMeta: metav1.ObjectMeta{ 2216 Name: "test-cluster", 2217 Namespace: metav1.NamespaceDefault, 2218 }, 2219 } 2220 2221 node := &corev1.Node{ 2222 ObjectMeta: metav1.ObjectMeta{ 2223 Name: "test", 2224 }, 2225 Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, 2226 } 2227 2228 testMachine := clusterv1.Machine{ 2229 ObjectMeta: metav1.ObjectMeta{ 2230 Name: "test", 2231 Namespace: metav1.NamespaceDefault, 2232 Labels: map[string]string{ 2233 clusterv1.MachineControlPlaneLabel: "", 2234 }, 2235 Annotations: map[string]string{ 2236 "machine.cluster.x-k8s.io/exclude-node-draining": "", 2237 }, 2238 Finalizers: []string{clusterv1.MachineFinalizer}, 2239 DeletionTimestamp: &metav1.Time{Time: deletionTime}, 2240 }, 2241 Spec: clusterv1.MachineSpec{ 2242 ClusterName: "test-cluster", 2243 InfrastructureRef: corev1.ObjectReference{ 2244 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 2245 Kind: "GenericInfrastructureMachine", 2246 Name: "infra-config1", 2247 }, 2248 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 2249 }, 2250 Status: clusterv1.MachineStatus{ 2251 NodeRef: &corev1.ObjectReference{ 2252 Name: "test", 2253 }, 2254 }, 2255 } 2256 2257 cpmachine1 := &clusterv1.Machine{ 2258 ObjectMeta: metav1.ObjectMeta{ 2259 Name: "cp1", 2260 Namespace: metav1.NamespaceDefault, 2261 Labels: map[string]string{ 2262 clusterv1.ClusterNameLabel: "test-cluster", 2263 clusterv1.MachineControlPlaneLabel: "", 2264 }, 2265 Finalizers: []string{clusterv1.MachineFinalizer}, 2266 }, 2267 Spec: clusterv1.MachineSpec{ 2268 ClusterName: "test-cluster", 2269 InfrastructureRef: corev1.ObjectReference{}, 2270 Bootstrap: clusterv1.Bootstrap{DataSecretName: pointer.String("data")}, 2271 }, 2272 Status: clusterv1.MachineStatus{ 2273 NodeRef: &corev1.ObjectReference{ 2274 Name: "cp1", 2275 }, 2276 }, 2277 } 2278 2279 testCases := []struct { 2280 name string 2281 deletionTimeout *metav1.Duration 2282 resultErr bool 2283 clusterDeleted bool 2284 expectNodeDeletion bool 2285 createFakeClient func(...client.Object) client.Client 2286 }{ 2287 { 2288 name: "should return no error when deletion is successful", 2289 deletionTimeout: &metav1.Duration{Duration: time.Second}, 2290 resultErr: false, 2291 expectNodeDeletion: true, 2292 createFakeClient: func(initObjs ...client.Object) client.Client { 2293 return fake.NewClientBuilder(). 2294 WithObjects(initObjs...). 2295 WithStatusSubresource(&clusterv1.Machine{}). 2296 Build() 2297 }, 2298 }, 2299 { 2300 name: "should return an error when timeout is not expired and node deletion fails", 2301 deletionTimeout: &metav1.Duration{Duration: time.Hour}, 2302 resultErr: true, 2303 expectNodeDeletion: false, 2304 createFakeClient: func(initObjs ...client.Object) client.Client { 2305 fc := fake.NewClientBuilder(). 2306 WithObjects(initObjs...). 2307 WithStatusSubresource(&clusterv1.Machine{}). 2308 Build() 2309 return fakeClientWithNodeDeletionErr{fc} 2310 }, 2311 }, 2312 { 2313 name: "should return an error when timeout is infinite and node deletion fails", 2314 deletionTimeout: &metav1.Duration{Duration: 0}, // should lead to infinite timeout 2315 resultErr: true, 2316 expectNodeDeletion: false, 2317 createFakeClient: func(initObjs ...client.Object) client.Client { 2318 fc := fake.NewClientBuilder(). 2319 WithObjects(initObjs...). 2320 WithStatusSubresource(&clusterv1.Machine{}). 2321 Build() 2322 return fakeClientWithNodeDeletionErr{fc} 2323 }, 2324 }, 2325 { 2326 name: "should not return an error when timeout is expired and node deletion fails", 2327 deletionTimeout: &metav1.Duration{Duration: time.Millisecond}, 2328 resultErr: false, 2329 expectNodeDeletion: false, 2330 createFakeClient: func(initObjs ...client.Object) client.Client { 2331 fc := fake.NewClientBuilder(). 2332 WithObjects(initObjs...). 2333 WithStatusSubresource(&clusterv1.Machine{}). 2334 Build() 2335 return fakeClientWithNodeDeletionErr{fc} 2336 }, 2337 }, 2338 { 2339 name: "should not delete the node or return an error when the cluster is marked for deletion", 2340 deletionTimeout: nil, // should lead to infinite timeout 2341 resultErr: false, 2342 clusterDeleted: true, 2343 expectNodeDeletion: false, 2344 createFakeClient: func(initObjs ...client.Object) client.Client { 2345 fc := fake.NewClientBuilder(). 2346 WithObjects(initObjs...). 2347 WithStatusSubresource(&clusterv1.Machine{}). 2348 Build() 2349 return fakeClientWithNodeDeletionErr{fc} 2350 }, 2351 }, 2352 } 2353 2354 for _, tc := range testCases { 2355 t.Run(tc.name, func(t *testing.T) { 2356 m := testMachine.DeepCopy() 2357 m.Spec.NodeDeletionTimeout = tc.deletionTimeout 2358 2359 fakeClient := tc.createFakeClient(node, m, cpmachine1) 2360 tracker := remote.NewTestClusterCacheTracker(ctrl.Log, fakeClient, fakeScheme, client.ObjectKeyFromObject(&testCluster)) 2361 2362 r := &Reconciler{ 2363 Client: fakeClient, 2364 UnstructuredCachingClient: fakeClient, 2365 Tracker: tracker, 2366 recorder: record.NewFakeRecorder(10), 2367 nodeDeletionRetryTimeout: 10 * time.Millisecond, 2368 } 2369 2370 cluster := testCluster.DeepCopy() 2371 if tc.clusterDeleted { 2372 cluster.DeletionTimestamp = &metav1.Time{Time: deletionTime.Add(time.Hour)} 2373 } 2374 2375 _, err := r.reconcileDelete(context.Background(), cluster, m) 2376 2377 if tc.resultErr { 2378 g.Expect(err).To(HaveOccurred()) 2379 } else { 2380 g.Expect(err).ToNot(HaveOccurred()) 2381 if tc.expectNodeDeletion { 2382 n := &corev1.Node{} 2383 g.Expect(fakeClient.Get(context.Background(), client.ObjectKeyFromObject(node), n)).NotTo(Succeed()) 2384 } 2385 } 2386 }) 2387 } 2388 } 2389 2390 // adds a condition list to an external object. 2391 func addConditionsToExternal(u *unstructured.Unstructured, newConditions clusterv1.Conditions) { 2392 existingConditions := clusterv1.Conditions{} 2393 if cs := conditions.UnstructuredGetter(u).GetConditions(); len(cs) != 0 { 2394 existingConditions = cs 2395 } 2396 existingConditions = append(existingConditions, newConditions...) 2397 conditions.UnstructuredSetter(u).SetConditions(existingConditions) 2398 } 2399 2400 // asserts the conditions set on the Getter object. 2401 // TODO: replace this with util.condition.MatchConditions (or a new matcher in controller runtime komega). 2402 func assertConditions(t *testing.T, from conditions.Getter, conditions ...*clusterv1.Condition) { 2403 t.Helper() 2404 2405 for _, condition := range conditions { 2406 assertCondition(t, from, condition) 2407 } 2408 } 2409 2410 // asserts whether a condition of type is set on the Getter object 2411 // when the condition is true, asserting the reason/severity/message 2412 // for the condition are avoided. 2413 func assertCondition(t *testing.T, from conditions.Getter, condition *clusterv1.Condition) { 2414 t.Helper() 2415 2416 g := NewWithT(t) 2417 g.Expect(conditions.Has(from, condition.Type)).To(BeTrue()) 2418 2419 if condition.Status == corev1.ConditionTrue { 2420 conditions.IsTrue(from, condition.Type) 2421 } else { 2422 conditionToBeAsserted := conditions.Get(from, condition.Type) 2423 g.Expect(conditionToBeAsserted.Status).To(Equal(condition.Status)) 2424 g.Expect(conditionToBeAsserted.Severity).To(Equal(condition.Severity)) 2425 g.Expect(conditionToBeAsserted.Reason).To(Equal(condition.Reason)) 2426 if condition.Message != "" { 2427 g.Expect(conditionToBeAsserted.Message).To(Equal(condition.Message)) 2428 } 2429 } 2430 }