sigs.k8s.io/cluster-api@v1.7.1/internal/controllers/machine/machine_controller_test.go (about) 1 /* 2 Copyright 2019 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package machine 18 19 import ( 20 "context" 21 "fmt" 22 "testing" 23 "time" 24 25 "github.com/go-logr/logr" 26 . "github.com/onsi/gomega" 27 corev1 "k8s.io/api/core/v1" 28 apierrors "k8s.io/apimachinery/pkg/api/errors" 29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 31 "k8s.io/client-go/kubernetes/scheme" 32 "k8s.io/client-go/tools/record" 33 "k8s.io/utils/ptr" 34 ctrl "sigs.k8s.io/controller-runtime" 35 "sigs.k8s.io/controller-runtime/pkg/client" 36 "sigs.k8s.io/controller-runtime/pkg/client/apiutil" 37 "sigs.k8s.io/controller-runtime/pkg/client/fake" 38 "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 39 "sigs.k8s.io/controller-runtime/pkg/log" 40 "sigs.k8s.io/controller-runtime/pkg/reconcile" 41 42 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 43 "sigs.k8s.io/cluster-api/api/v1beta1/index" 44 "sigs.k8s.io/cluster-api/controllers/remote" 45 "sigs.k8s.io/cluster-api/internal/test/builder" 46 "sigs.k8s.io/cluster-api/internal/util/ssa" 47 "sigs.k8s.io/cluster-api/util" 48 "sigs.k8s.io/cluster-api/util/conditions" 49 "sigs.k8s.io/cluster-api/util/patch" 50 ) 51 52 func TestWatches(t *testing.T) { 53 g := NewWithT(t) 54 ns, err := env.CreateNamespace(ctx, "test-machine-watches") 55 g.Expect(err).ToNot(HaveOccurred()) 56 57 infraMachine := &unstructured.Unstructured{ 58 Object: map[string]interface{}{ 59 "kind": "GenericInfrastructureMachine", 60 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 61 "metadata": map[string]interface{}{ 62 "name": "infra-config1", 63 "namespace": ns.Name, 64 }, 65 "spec": map[string]interface{}{ 66 "providerID": "test://id-1", 67 }, 68 "status": map[string]interface{}{ 69 "ready": true, 70 "addresses": []interface{}{ 71 map[string]interface{}{ 72 "type": "InternalIP", 73 "address": "10.0.0.1", 74 }, 75 }, 76 }, 77 }, 78 } 79 80 defaultBootstrap := &unstructured.Unstructured{ 81 Object: map[string]interface{}{ 82 "kind": "GenericBootstrapConfig", 83 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 84 "metadata": map[string]interface{}{ 85 "name": "bootstrap-config-machinereconcile", 86 "namespace": ns.Name, 87 }, 88 "spec": map[string]interface{}{}, 89 "status": map[string]interface{}{}, 90 }, 91 } 92 93 testCluster := &clusterv1.Cluster{ 94 ObjectMeta: metav1.ObjectMeta{ 95 GenerateName: "machine-reconcile-", 96 Namespace: ns.Name, 97 }, 98 } 99 100 node := &corev1.Node{ 101 ObjectMeta: metav1.ObjectMeta{ 102 Name: "node-1", 103 Namespace: ns.Name, 104 }, 105 Spec: corev1.NodeSpec{ 106 ProviderID: "test://id-1", 107 }, 108 } 109 110 g.Expect(env.Create(ctx, testCluster)).To(Succeed()) 111 g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) 112 g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed()) 113 g.Expect(env.Create(ctx, node)).To(Succeed()) 114 g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) 115 116 defer func(do ...client.Object) { 117 g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) 118 }(ns, testCluster, defaultBootstrap) 119 120 // Patch infra machine ready 121 patchHelper, err := patch.NewHelper(infraMachine, env) 122 g.Expect(err).ShouldNot(HaveOccurred()) 123 g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) 124 g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) 125 126 // Patch bootstrap ready 127 patchHelper, err = patch.NewHelper(defaultBootstrap, env) 128 g.Expect(err).ShouldNot(HaveOccurred()) 129 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).To(Succeed()) 130 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, "secretData", "status", "dataSecretName")).To(Succeed()) 131 g.Expect(patchHelper.Patch(ctx, defaultBootstrap, patch.WithStatusObservedGeneration{})).To(Succeed()) 132 133 machine := &clusterv1.Machine{ 134 ObjectMeta: metav1.ObjectMeta{ 135 GenerateName: "machine-created-", 136 Namespace: ns.Name, 137 Labels: map[string]string{ 138 clusterv1.MachineControlPlaneLabel: "", 139 }, 140 }, 141 Spec: clusterv1.MachineSpec{ 142 ClusterName: testCluster.Name, 143 InfrastructureRef: corev1.ObjectReference{ 144 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 145 Kind: "GenericInfrastructureMachine", 146 Name: "infra-config1", 147 }, 148 Bootstrap: clusterv1.Bootstrap{ 149 ConfigRef: &corev1.ObjectReference{ 150 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 151 Kind: "GenericBootstrapConfig", 152 Name: "bootstrap-config-machinereconcile", 153 }, 154 }, 155 }, 156 } 157 158 g.Expect(env.Create(ctx, machine)).To(Succeed()) 159 defer func() { 160 g.Expect(env.Cleanup(ctx, machine)).To(Succeed()) 161 }() 162 163 // Wait for reconciliation to happen. 164 // Since infra and bootstrap objects are ready, a nodeRef will be assigned during node reconciliation. 165 key := client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} 166 g.Eventually(func() bool { 167 if err := env.Get(ctx, key, machine); err != nil { 168 return false 169 } 170 return machine.Status.NodeRef != nil 171 }, timeout).Should(BeTrue()) 172 173 // Node deletion will trigger node watchers and a request will be added to the queue. 174 g.Expect(env.Delete(ctx, node)).To(Succeed()) 175 // TODO: Once conditions are in place, check if node deletion triggered a reconcile. 176 177 // Delete infra machine, external tracker will trigger reconcile 178 // and machine Status.FailureReason should be non-nil after reconcileInfrastructure 179 g.Expect(env.Delete(ctx, infraMachine)).To(Succeed()) 180 g.Eventually(func() bool { 181 if err := env.Get(ctx, key, machine); err != nil { 182 return false 183 } 184 return machine.Status.FailureMessage != nil 185 }, timeout).Should(BeTrue()) 186 } 187 188 func TestWatchesDelete(t *testing.T) { 189 g := NewWithT(t) 190 ns, err := env.CreateNamespace(ctx, "test-machine-watches-delete") 191 g.Expect(err).ToNot(HaveOccurred()) 192 193 infraMachine := &unstructured.Unstructured{ 194 Object: map[string]interface{}{ 195 "kind": "GenericInfrastructureMachine", 196 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 197 "metadata": map[string]interface{}{ 198 "name": "infra-config1", 199 "namespace": ns.Name, 200 }, 201 "spec": map[string]interface{}{ 202 "providerID": "test://id-1", 203 }, 204 "status": map[string]interface{}{ 205 "ready": true, 206 "addresses": []interface{}{ 207 map[string]interface{}{ 208 "type": "InternalIP", 209 "address": "10.0.0.1", 210 }, 211 }, 212 }, 213 }, 214 } 215 infraMachineFinalizer := "test.infrastructure.cluster.x-k8s.io" 216 controllerutil.AddFinalizer(infraMachine, infraMachineFinalizer) 217 218 defaultBootstrap := &unstructured.Unstructured{ 219 Object: map[string]interface{}{ 220 "kind": "GenericBootstrapConfig", 221 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 222 "metadata": map[string]interface{}{ 223 "name": "bootstrap-config-machinereconcile", 224 "namespace": ns.Name, 225 }, 226 "spec": map[string]interface{}{}, 227 "status": map[string]interface{}{}, 228 }, 229 } 230 bootstrapFinalizer := "test.bootstrap.cluster.x-k8s.io" 231 controllerutil.AddFinalizer(defaultBootstrap, bootstrapFinalizer) 232 233 testCluster := &clusterv1.Cluster{ 234 ObjectMeta: metav1.ObjectMeta{ 235 GenerateName: "machine-reconcile-", 236 Namespace: ns.Name, 237 }, 238 Spec: clusterv1.ClusterSpec{ 239 // we create the cluster in paused state so we don't reconcile 240 // the machine immediately after creation. 241 // This avoids going through reconcileExternal, which adds watches 242 // for the provider machine and the bootstrap config objects. 243 Paused: true, 244 }, 245 } 246 247 g.Expect(env.Create(ctx, testCluster)).To(Succeed()) 248 g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) 249 g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed()) 250 g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) 251 252 defer func(do ...client.Object) { 253 g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) 254 }(ns, testCluster, defaultBootstrap) 255 256 // Patch infra machine ready 257 patchHelper, err := patch.NewHelper(infraMachine, env) 258 g.Expect(err).ShouldNot(HaveOccurred()) 259 g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) 260 g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) 261 262 // Patch bootstrap ready 263 patchHelper, err = patch.NewHelper(defaultBootstrap, env) 264 g.Expect(err).ShouldNot(HaveOccurred()) 265 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).To(Succeed()) 266 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, "secretData", "status", "dataSecretName")).To(Succeed()) 267 g.Expect(patchHelper.Patch(ctx, defaultBootstrap, patch.WithStatusObservedGeneration{})).To(Succeed()) 268 269 machine := &clusterv1.Machine{ 270 ObjectMeta: metav1.ObjectMeta{ 271 GenerateName: "machine-created-", 272 Namespace: ns.Name, 273 Labels: map[string]string{ 274 clusterv1.MachineControlPlaneLabel: "", 275 }, 276 }, 277 Spec: clusterv1.MachineSpec{ 278 ClusterName: testCluster.Name, 279 InfrastructureRef: corev1.ObjectReference{ 280 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 281 Kind: "GenericInfrastructureMachine", 282 Name: "infra-config1", 283 }, 284 Bootstrap: clusterv1.Bootstrap{ 285 ConfigRef: &corev1.ObjectReference{ 286 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 287 Kind: "GenericBootstrapConfig", 288 Name: "bootstrap-config-machinereconcile", 289 }, 290 }, 291 }, 292 } 293 // We create the machine with a finalizer so the machine is not deleted immediately. 294 controllerutil.AddFinalizer(machine, clusterv1.MachineFinalizer) 295 296 g.Expect(env.Create(ctx, machine)).To(Succeed()) 297 defer func() { 298 g.Expect(env.Cleanup(ctx, machine)).To(Succeed()) 299 }() 300 301 // We mark the machine for deletion 302 g.Expect(env.Delete(ctx, machine)).To(Succeed()) 303 304 // We unpause the cluster so the machine can be reconciled. 305 testCluster.Spec.Paused = false 306 g.Expect(env.Update(ctx, testCluster)).To(Succeed()) 307 308 // Wait for reconciliation to happen. 309 // The first reconciliation should add the cluster name label. 310 key := client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} 311 g.Eventually(func() bool { 312 if err := env.Get(ctx, key, machine); err != nil { 313 return false 314 } 315 return machine.Labels[clusterv1.ClusterNameLabel] == testCluster.Name 316 }, timeout).Should(BeTrue()) 317 318 // Deleting the machine should mark the infra machine for deletion 319 infraMachineKey := client.ObjectKey{Name: infraMachine.GetName(), Namespace: infraMachine.GetNamespace()} 320 g.Eventually(func() bool { 321 if err := env.Get(ctx, infraMachineKey, infraMachine); err != nil { 322 return false 323 } 324 return infraMachine.GetDeletionTimestamp() != nil 325 }, timeout).Should(BeTrue(), "infra machine should be marked for deletion") 326 327 // We wait a bit and remove the finalizer, simulating the infra machine controller. 328 time.Sleep(2 * time.Second) 329 infraMachine.SetFinalizers([]string{}) 330 g.Expect(env.Update(ctx, infraMachine)).To(Succeed()) 331 332 // This should delete the infra machine 333 g.Eventually(func() bool { 334 err := env.Get(ctx, infraMachineKey, infraMachine) 335 return apierrors.IsNotFound(err) 336 }, timeout).Should(BeTrue(), "infra machine should be deleted") 337 338 // If the watch on infra machine works, deleting of the infra machine will trigger another 339 // reconcile, which will mark the bootstrap config for deletion 340 bootstrapKey := client.ObjectKey{Name: defaultBootstrap.GetName(), Namespace: defaultBootstrap.GetNamespace()} 341 g.Eventually(func() bool { 342 if err := env.Get(ctx, bootstrapKey, defaultBootstrap); err != nil { 343 return false 344 } 345 return defaultBootstrap.GetDeletionTimestamp() != nil 346 }, timeout).Should(BeTrue(), "bootstrap config should be marked for deletion") 347 348 // We wait a bit a remove the finalizer, simulating the bootstrap config controller. 349 time.Sleep(2 * time.Second) 350 defaultBootstrap.SetFinalizers([]string{}) 351 g.Expect(env.Update(ctx, defaultBootstrap)).To(Succeed()) 352 353 // This should delete the bootstrap config. 354 g.Eventually(func() bool { 355 err := env.Get(ctx, bootstrapKey, defaultBootstrap) 356 return apierrors.IsNotFound(err) 357 }, timeout).Should(BeTrue(), "bootstrap config should be deleted") 358 359 // If the watch on bootstrap config works, the deleting of the bootstrap config will trigger another 360 // reconcile, which will remove the finalizer and delete the machine 361 g.Eventually(func() bool { 362 err := env.Get(ctx, key, machine) 363 return apierrors.IsNotFound(err) 364 }, timeout).Should(BeTrue(), "machine should be deleted") 365 } 366 367 func TestMachine_Reconcile(t *testing.T) { 368 g := NewWithT(t) 369 370 ns, err := env.CreateNamespace(ctx, "test-machine-reconcile") 371 g.Expect(err).ToNot(HaveOccurred()) 372 373 infraMachine := &unstructured.Unstructured{ 374 Object: map[string]interface{}{ 375 "kind": "GenericInfrastructureMachine", 376 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 377 "metadata": map[string]interface{}{ 378 "name": "infra-config1", 379 "namespace": ns.Name, 380 }, 381 "spec": map[string]interface{}{ 382 "providerID": "test://id-1", 383 }, 384 }, 385 } 386 387 defaultBootstrap := &unstructured.Unstructured{ 388 Object: map[string]interface{}{ 389 "kind": "GenericBootstrapConfig", 390 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 391 "metadata": map[string]interface{}{ 392 "name": "bootstrap-config-machinereconcile", 393 "namespace": ns.Name, 394 }, 395 "spec": map[string]interface{}{}, 396 "status": map[string]interface{}{}, 397 }, 398 } 399 400 testCluster := &clusterv1.Cluster{ 401 ObjectMeta: metav1.ObjectMeta{ 402 GenerateName: "machine-reconcile-", 403 Namespace: ns.Name, 404 }, 405 } 406 407 g.Expect(env.Create(ctx, testCluster)).To(Succeed()) 408 g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) 409 g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed()) 410 411 defer func(do ...client.Object) { 412 g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) 413 }(ns, testCluster, defaultBootstrap) 414 415 machine := &clusterv1.Machine{ 416 ObjectMeta: metav1.ObjectMeta{ 417 GenerateName: "machine-created-", 418 Namespace: ns.Name, 419 Finalizers: []string{clusterv1.MachineFinalizer}, 420 }, 421 Spec: clusterv1.MachineSpec{ 422 ClusterName: testCluster.Name, 423 InfrastructureRef: corev1.ObjectReference{ 424 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 425 Kind: "GenericInfrastructureMachine", 426 Name: "infra-config1", 427 }, 428 Bootstrap: clusterv1.Bootstrap{ 429 ConfigRef: &corev1.ObjectReference{ 430 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 431 Kind: "GenericBootstrapConfig", 432 Name: "bootstrap-config-machinereconcile", 433 }, 434 }, 435 }, 436 Status: clusterv1.MachineStatus{ 437 NodeRef: &corev1.ObjectReference{ 438 Name: "test", 439 }, 440 }, 441 } 442 g.Expect(env.Create(ctx, machine)).To(Succeed()) 443 444 key := client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace} 445 446 // Wait for reconciliation to happen when infra and bootstrap objects are not ready. 447 g.Eventually(func() bool { 448 if err := env.Get(ctx, key, machine); err != nil { 449 return false 450 } 451 return len(machine.Finalizers) > 0 452 }, timeout).Should(BeTrue()) 453 454 // Set bootstrap ready. 455 bootstrapPatch := client.MergeFrom(defaultBootstrap.DeepCopy()) 456 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).ToNot(HaveOccurred()) 457 g.Expect(env.Status().Patch(ctx, defaultBootstrap, bootstrapPatch)).To(Succeed()) 458 459 // Set infrastructure ready. 460 infraMachinePatch := client.MergeFrom(infraMachine.DeepCopy()) 461 g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) 462 g.Expect(env.Status().Patch(ctx, infraMachine, infraMachinePatch)).To(Succeed()) 463 464 // Wait for Machine Ready Condition to become True. 465 g.Eventually(func() bool { 466 if err := env.Get(ctx, key, machine); err != nil { 467 return false 468 } 469 if !conditions.Has(machine, clusterv1.InfrastructureReadyCondition) { 470 return false 471 } 472 readyCondition := conditions.Get(machine, clusterv1.ReadyCondition) 473 return readyCondition.Status == corev1.ConditionTrue 474 }, timeout).Should(BeTrue()) 475 476 g.Expect(env.Delete(ctx, machine)).ToNot(HaveOccurred()) 477 // Wait for Machine to be deleted. 478 g.Eventually(func() bool { 479 if err := env.Get(ctx, key, machine); err != nil { 480 if apierrors.IsNotFound(err) { 481 return true 482 } 483 } 484 return false 485 }, timeout).Should(BeTrue()) 486 487 // Check if Machine deletion successfully deleted infrastructure external reference. 488 keyInfra := client.ObjectKey{Name: infraMachine.GetName(), Namespace: infraMachine.GetNamespace()} 489 g.Eventually(func() bool { 490 if err := env.Get(ctx, keyInfra, infraMachine); err != nil { 491 if apierrors.IsNotFound(err) { 492 return true 493 } 494 } 495 return false 496 }, timeout).Should(BeTrue()) 497 498 // Check if Machine deletion successfully deleted bootstrap external reference. 499 keyBootstrap := client.ObjectKey{Name: defaultBootstrap.GetName(), Namespace: defaultBootstrap.GetNamespace()} 500 g.Eventually(func() bool { 501 if err := env.Get(ctx, keyBootstrap, defaultBootstrap); err != nil { 502 if apierrors.IsNotFound(err) { 503 return true 504 } 505 } 506 return false 507 }, timeout).Should(BeTrue()) 508 } 509 510 func TestMachineFinalizer(t *testing.T) { 511 bootstrapData := "some valid data" 512 clusterCorrectMeta := &clusterv1.Cluster{ 513 ObjectMeta: metav1.ObjectMeta{ 514 Name: "valid-cluster", 515 Namespace: metav1.NamespaceDefault, 516 }, 517 } 518 519 machineValidCluster := &clusterv1.Machine{ 520 ObjectMeta: metav1.ObjectMeta{ 521 Name: "machine1", 522 Namespace: metav1.NamespaceDefault, 523 }, 524 Spec: clusterv1.MachineSpec{ 525 Bootstrap: clusterv1.Bootstrap{ 526 DataSecretName: &bootstrapData, 527 }, 528 ClusterName: "valid-cluster", 529 }, 530 } 531 532 machineWithFinalizer := &clusterv1.Machine{ 533 ObjectMeta: metav1.ObjectMeta{ 534 Name: "machine2", 535 Namespace: metav1.NamespaceDefault, 536 Finalizers: []string{"some-other-finalizer"}, 537 }, 538 Spec: clusterv1.MachineSpec{ 539 Bootstrap: clusterv1.Bootstrap{ 540 DataSecretName: &bootstrapData, 541 }, 542 ClusterName: "valid-cluster", 543 }, 544 } 545 546 testCases := []struct { 547 name string 548 request reconcile.Request 549 m *clusterv1.Machine 550 expectedFinalizers []string 551 }{ 552 { 553 name: "should add a machine finalizer to the machine if it doesn't have one", 554 request: reconcile.Request{ 555 NamespacedName: util.ObjectKey(machineValidCluster), 556 }, 557 m: machineValidCluster, 558 expectedFinalizers: []string{clusterv1.MachineFinalizer}, 559 }, 560 { 561 name: "should append the machine finalizer to the machine if it already has a finalizer", 562 request: reconcile.Request{ 563 NamespacedName: util.ObjectKey(machineWithFinalizer), 564 }, 565 m: machineWithFinalizer, 566 expectedFinalizers: []string{"some-other-finalizer", clusterv1.MachineFinalizer}, 567 }, 568 } 569 570 for _, tc := range testCases { 571 t.Run(tc.name, func(t *testing.T) { 572 g := NewWithT(t) 573 574 c := fake.NewClientBuilder().WithObjects( 575 clusterCorrectMeta, 576 machineValidCluster, 577 machineWithFinalizer, 578 ).Build() 579 mr := &Reconciler{ 580 Client: c, 581 UnstructuredCachingClient: c, 582 } 583 584 _, _ = mr.Reconcile(ctx, tc.request) 585 586 key := client.ObjectKey{Namespace: tc.m.Namespace, Name: tc.m.Name} 587 var actual clusterv1.Machine 588 if len(tc.expectedFinalizers) > 0 { 589 g.Expect(mr.Client.Get(ctx, key, &actual)).To(Succeed()) 590 g.Expect(actual.Finalizers).ToNot(BeEmpty()) 591 g.Expect(actual.Finalizers).To(Equal(tc.expectedFinalizers)) 592 } else { 593 g.Expect(actual.Finalizers).To(BeEmpty()) 594 } 595 }) 596 } 597 } 598 599 func TestMachineOwnerReference(t *testing.T) { 600 bootstrapData := "some valid data" 601 testCluster := &clusterv1.Cluster{ 602 TypeMeta: metav1.TypeMeta{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String()}, 603 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 604 } 605 606 machineInvalidCluster := &clusterv1.Machine{ 607 ObjectMeta: metav1.ObjectMeta{ 608 Name: "machine1", 609 Namespace: metav1.NamespaceDefault, 610 }, 611 Spec: clusterv1.MachineSpec{ 612 ClusterName: "invalid", 613 }, 614 } 615 616 machineValidCluster := &clusterv1.Machine{ 617 ObjectMeta: metav1.ObjectMeta{ 618 Name: "machine2", 619 Namespace: metav1.NamespaceDefault, 620 }, 621 Spec: clusterv1.MachineSpec{ 622 Bootstrap: clusterv1.Bootstrap{ 623 DataSecretName: &bootstrapData, 624 }, 625 ClusterName: "test-cluster", 626 }, 627 } 628 629 machineValidMachine := &clusterv1.Machine{ 630 ObjectMeta: metav1.ObjectMeta{ 631 Name: "machine3", 632 Namespace: metav1.NamespaceDefault, 633 Labels: map[string]string{ 634 clusterv1.ClusterNameLabel: "valid-cluster", 635 }, 636 OwnerReferences: []metav1.OwnerReference{ 637 { 638 APIVersion: clusterv1.GroupVersion.String(), 639 Kind: "MachineSet", 640 Name: "valid-machineset", 641 Controller: ptr.To(true), 642 }, 643 }, 644 }, 645 Spec: clusterv1.MachineSpec{ 646 Bootstrap: clusterv1.Bootstrap{ 647 DataSecretName: &bootstrapData, 648 }, 649 ClusterName: "test-cluster", 650 }, 651 } 652 653 machineValidControlled := &clusterv1.Machine{ 654 ObjectMeta: metav1.ObjectMeta{ 655 Name: "machine4", 656 Namespace: metav1.NamespaceDefault, 657 Labels: map[string]string{ 658 clusterv1.ClusterNameLabel: "valid-cluster", 659 clusterv1.MachineControlPlaneLabel: "", 660 }, 661 OwnerReferences: []metav1.OwnerReference{ 662 { 663 APIVersion: "test.group", 664 Kind: "KubeadmControlPlane", 665 Name: "valid-controlplane", 666 Controller: ptr.To(true), 667 }, 668 }, 669 }, 670 Spec: clusterv1.MachineSpec{ 671 Bootstrap: clusterv1.Bootstrap{ 672 DataSecretName: &bootstrapData, 673 }, 674 ClusterName: "test-cluster", 675 }, 676 } 677 678 testCases := []struct { 679 name string 680 request reconcile.Request 681 m *clusterv1.Machine 682 expectedOR []metav1.OwnerReference 683 }{ 684 { 685 name: "should add owner reference to machine referencing a cluster with correct type meta", 686 request: reconcile.Request{ 687 NamespacedName: util.ObjectKey(machineValidCluster), 688 }, 689 m: machineValidCluster, 690 expectedOR: []metav1.OwnerReference{ 691 { 692 APIVersion: testCluster.APIVersion, 693 Kind: testCluster.Kind, 694 Name: testCluster.Name, 695 UID: testCluster.UID, 696 }, 697 }, 698 }, 699 { 700 name: "should not add cluster owner reference if machine is owned by a machine set", 701 request: reconcile.Request{ 702 NamespacedName: util.ObjectKey(machineValidMachine), 703 }, 704 m: machineValidMachine, 705 expectedOR: []metav1.OwnerReference{ 706 { 707 APIVersion: clusterv1.GroupVersion.String(), 708 Kind: "MachineSet", 709 Name: "valid-machineset", 710 Controller: ptr.To(true), 711 }, 712 }, 713 }, 714 { 715 name: "should not add cluster owner reference if machine has a controller owner", 716 request: reconcile.Request{ 717 NamespacedName: util.ObjectKey(machineValidControlled), 718 }, 719 m: machineValidControlled, 720 expectedOR: []metav1.OwnerReference{ 721 { 722 APIVersion: "test.group", 723 Kind: "KubeadmControlPlane", 724 Name: "valid-controlplane", 725 Controller: ptr.To(true), 726 }, 727 }, 728 }, 729 } 730 731 for _, tc := range testCases { 732 t.Run(tc.name, func(t *testing.T) { 733 g := NewWithT(t) 734 735 c := fake.NewClientBuilder().WithObjects( 736 testCluster, 737 machineInvalidCluster, 738 machineValidCluster, 739 machineValidMachine, 740 machineValidControlled, 741 ).WithStatusSubresource(&clusterv1.Machine{}).Build() 742 mr := &Reconciler{ 743 Client: c, 744 UnstructuredCachingClient: c, 745 APIReader: c, 746 } 747 748 key := client.ObjectKey{Namespace: tc.m.Namespace, Name: tc.m.Name} 749 var actual clusterv1.Machine 750 751 // this first requeue is to add finalizer 752 result, err := mr.Reconcile(ctx, tc.request) 753 g.Expect(err).ToNot(HaveOccurred()) 754 g.Expect(result).To(BeComparableTo(ctrl.Result{})) 755 g.Expect(mr.Client.Get(ctx, key, &actual)).To(Succeed()) 756 g.Expect(actual.Finalizers).To(ContainElement(clusterv1.MachineFinalizer)) 757 758 _, _ = mr.Reconcile(ctx, tc.request) 759 760 if len(tc.expectedOR) > 0 { 761 g.Expect(mr.Client.Get(ctx, key, &actual)).To(Succeed()) 762 g.Expect(actual.OwnerReferences).To(BeComparableTo(tc.expectedOR)) 763 } else { 764 g.Expect(actual.OwnerReferences).To(BeEmpty()) 765 } 766 }) 767 } 768 } 769 770 func TestReconcileRequest(t *testing.T) { 771 infraConfig := unstructured.Unstructured{ 772 Object: map[string]interface{}{ 773 "kind": "GenericInfrastructureMachine", 774 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 775 "metadata": map[string]interface{}{ 776 "name": "infra-config1", 777 "namespace": metav1.NamespaceDefault, 778 }, 779 "spec": map[string]interface{}{ 780 "providerID": "test://id-1", 781 }, 782 "status": map[string]interface{}{ 783 "ready": true, 784 "addresses": []interface{}{ 785 map[string]interface{}{ 786 "type": "InternalIP", 787 "address": "10.0.0.1", 788 }, 789 }, 790 }, 791 }, 792 } 793 794 time := metav1.Now() 795 796 testCluster := clusterv1.Cluster{ 797 ObjectMeta: metav1.ObjectMeta{ 798 Name: "test-cluster", 799 Namespace: metav1.NamespaceDefault, 800 }, 801 } 802 803 node := &corev1.Node{ 804 ObjectMeta: metav1.ObjectMeta{ 805 Name: "test", 806 }, 807 Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, 808 } 809 810 type expected struct { 811 result reconcile.Result 812 err bool 813 } 814 testCases := []struct { 815 machine clusterv1.Machine 816 expected expected 817 }{ 818 { 819 machine: clusterv1.Machine{ 820 ObjectMeta: metav1.ObjectMeta{ 821 Name: "created", 822 Namespace: metav1.NamespaceDefault, 823 Finalizers: []string{clusterv1.MachineFinalizer}, 824 }, 825 Spec: clusterv1.MachineSpec{ 826 ClusterName: "test-cluster", 827 InfrastructureRef: corev1.ObjectReference{ 828 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 829 Kind: "GenericInfrastructureMachine", 830 Name: "infra-config1", 831 }, 832 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 833 }, 834 Status: clusterv1.MachineStatus{ 835 NodeRef: &corev1.ObjectReference{ 836 Name: "test", 837 }, 838 ObservedGeneration: 1, 839 }, 840 }, 841 expected: expected{ 842 result: reconcile.Result{}, 843 err: false, 844 }, 845 }, 846 { 847 machine: clusterv1.Machine{ 848 ObjectMeta: metav1.ObjectMeta{ 849 Name: "updated", 850 Namespace: metav1.NamespaceDefault, 851 Finalizers: []string{clusterv1.MachineFinalizer}, 852 }, 853 Spec: clusterv1.MachineSpec{ 854 ClusterName: "test-cluster", 855 InfrastructureRef: corev1.ObjectReference{ 856 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 857 Kind: "GenericInfrastructureMachine", 858 Name: "infra-config1", 859 }, 860 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 861 }, 862 Status: clusterv1.MachineStatus{ 863 NodeRef: &corev1.ObjectReference{ 864 Name: "test", 865 }, 866 ObservedGeneration: 1, 867 }, 868 }, 869 expected: expected{ 870 result: reconcile.Result{}, 871 err: false, 872 }, 873 }, 874 { 875 machine: clusterv1.Machine{ 876 ObjectMeta: metav1.ObjectMeta{ 877 Name: "deleted", 878 Namespace: metav1.NamespaceDefault, 879 Labels: map[string]string{ 880 clusterv1.MachineControlPlaneLabel: "", 881 }, 882 Finalizers: []string{clusterv1.MachineFinalizer}, 883 DeletionTimestamp: &time, 884 }, 885 Spec: clusterv1.MachineSpec{ 886 ClusterName: "test-cluster", 887 InfrastructureRef: corev1.ObjectReference{ 888 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 889 Kind: "GenericInfrastructureMachine", 890 Name: "infra-config1", 891 }, 892 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 893 }, 894 }, 895 expected: expected{ 896 result: reconcile.Result{}, 897 err: false, 898 }, 899 }, 900 } 901 902 for i := range testCases { 903 tc := testCases[i] 904 t.Run("machine should be "+tc.machine.Name, func(t *testing.T) { 905 g := NewWithT(t) 906 907 clientFake := fake.NewClientBuilder().WithObjects( 908 node, 909 &testCluster, 910 &tc.machine, 911 builder.GenericInfrastructureMachineCRD.DeepCopy(), 912 &infraConfig, 913 ).WithStatusSubresource(&clusterv1.Machine{}).WithIndex(&corev1.Node{}, index.NodeProviderIDField, index.NodeByProviderID).Build() 914 915 r := &Reconciler{ 916 Client: clientFake, 917 UnstructuredCachingClient: clientFake, 918 Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, clientFake, scheme.Scheme, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}), 919 ssaCache: ssa.NewCache(), 920 } 921 922 result, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: util.ObjectKey(&tc.machine)}) 923 if tc.expected.err { 924 g.Expect(err).To(HaveOccurred()) 925 } else { 926 g.Expect(err).ToNot(HaveOccurred()) 927 } 928 929 g.Expect(result).To(BeComparableTo(tc.expected.result)) 930 }) 931 } 932 } 933 934 func TestMachineConditions(t *testing.T) { 935 infraConfig := func(ready bool) *unstructured.Unstructured { 936 return &unstructured.Unstructured{ 937 Object: map[string]interface{}{ 938 "kind": "GenericInfrastructureMachine", 939 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 940 "metadata": map[string]interface{}{ 941 "name": "infra-config1", 942 "namespace": metav1.NamespaceDefault, 943 }, 944 "spec": map[string]interface{}{ 945 "providerID": "test://id-1", 946 }, 947 "status": map[string]interface{}{ 948 "ready": ready, 949 "addresses": []interface{}{ 950 map[string]interface{}{ 951 "type": "InternalIP", 952 "address": "10.0.0.1", 953 }, 954 }, 955 }, 956 }, 957 } 958 } 959 960 boostrapConfig := func(ready bool) *unstructured.Unstructured { 961 status := map[string]interface{}{ 962 "ready": ready, 963 } 964 if ready { 965 status["dataSecretName"] = "data" 966 } 967 return &unstructured.Unstructured{ 968 Object: map[string]interface{}{ 969 "kind": "GenericBootstrapConfig", 970 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 971 "metadata": map[string]interface{}{ 972 "name": "bootstrap-config1", 973 "namespace": metav1.NamespaceDefault, 974 }, 975 "status": status, 976 }, 977 } 978 } 979 980 testCluster := clusterv1.Cluster{ 981 ObjectMeta: metav1.ObjectMeta{ 982 Name: "test-cluster", 983 Namespace: metav1.NamespaceDefault, 984 }, 985 } 986 987 machine := clusterv1.Machine{ 988 ObjectMeta: metav1.ObjectMeta{ 989 Name: "blah", 990 Namespace: metav1.NamespaceDefault, 991 Labels: map[string]string{ 992 clusterv1.MachineControlPlaneLabel: "", 993 }, 994 Finalizers: []string{clusterv1.MachineFinalizer}, 995 }, 996 Spec: clusterv1.MachineSpec{ 997 ProviderID: ptr.To("test://id-1"), 998 ClusterName: "test-cluster", 999 InfrastructureRef: corev1.ObjectReference{ 1000 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 1001 Kind: "GenericInfrastructureMachine", 1002 Name: "infra-config1", 1003 }, 1004 Bootstrap: clusterv1.Bootstrap{ 1005 ConfigRef: &corev1.ObjectReference{ 1006 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 1007 Kind: "GenericBootstrapConfig", 1008 Name: "bootstrap-config1", 1009 }, 1010 }, 1011 }, 1012 Status: clusterv1.MachineStatus{ 1013 NodeRef: &corev1.ObjectReference{ 1014 Name: "test", 1015 }, 1016 ObservedGeneration: 1, 1017 }, 1018 } 1019 1020 node := &corev1.Node{ 1021 ObjectMeta: metav1.ObjectMeta{ 1022 Name: "test", 1023 }, 1024 Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, 1025 } 1026 1027 testcases := []struct { 1028 name string 1029 infraReady bool 1030 bootstrapReady bool 1031 beforeFunc func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) 1032 additionalObjects []client.Object 1033 conditionsToAssert []*clusterv1.Condition 1034 wantErr bool 1035 }{ 1036 { 1037 name: "all conditions true", 1038 infraReady: true, 1039 bootstrapReady: true, 1040 beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { 1041 // since these conditions are set by an external controller 1042 conditions.MarkTrue(m, clusterv1.MachineHealthCheckSucceededCondition) 1043 conditions.MarkTrue(m, clusterv1.MachineOwnerRemediatedCondition) 1044 }, 1045 conditionsToAssert: []*clusterv1.Condition{ 1046 conditions.TrueCondition(clusterv1.InfrastructureReadyCondition), 1047 conditions.TrueCondition(clusterv1.BootstrapReadyCondition), 1048 conditions.TrueCondition(clusterv1.MachineOwnerRemediatedCondition), 1049 conditions.TrueCondition(clusterv1.MachineHealthCheckSucceededCondition), 1050 conditions.TrueCondition(clusterv1.ReadyCondition), 1051 }, 1052 }, 1053 { 1054 name: "infra condition consumes reason from the infra config", 1055 infraReady: false, 1056 bootstrapReady: true, 1057 beforeFunc: func(_, infra *unstructured.Unstructured, _ *clusterv1.Machine) { 1058 addConditionsToExternal(infra, clusterv1.Conditions{ 1059 { 1060 Type: clusterv1.ReadyCondition, 1061 Status: corev1.ConditionFalse, 1062 Severity: clusterv1.ConditionSeverityInfo, 1063 Reason: "Custom reason", 1064 }, 1065 }) 1066 }, 1067 conditionsToAssert: []*clusterv1.Condition{ 1068 conditions.FalseCondition(clusterv1.InfrastructureReadyCondition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), 1069 }, 1070 }, 1071 { 1072 name: "infra condition consumes the fallback reason", 1073 infraReady: false, 1074 bootstrapReady: true, 1075 conditionsToAssert: []*clusterv1.Condition{ 1076 conditions.FalseCondition(clusterv1.InfrastructureReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1077 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1078 }, 1079 }, 1080 { 1081 name: "bootstrap condition consumes reason from the bootstrap config", 1082 infraReady: true, 1083 bootstrapReady: false, 1084 beforeFunc: func(bootstrap, _ *unstructured.Unstructured, _ *clusterv1.Machine) { 1085 addConditionsToExternal(bootstrap, clusterv1.Conditions{ 1086 { 1087 Type: clusterv1.ReadyCondition, 1088 Status: corev1.ConditionFalse, 1089 Severity: clusterv1.ConditionSeverityInfo, 1090 Reason: "Custom reason", 1091 }, 1092 }) 1093 }, 1094 conditionsToAssert: []*clusterv1.Condition{ 1095 conditions.FalseCondition(clusterv1.BootstrapReadyCondition, "Custom reason", clusterv1.ConditionSeverityInfo, ""), 1096 }, 1097 }, 1098 { 1099 name: "bootstrap condition consumes the fallback reason", 1100 infraReady: true, 1101 bootstrapReady: false, 1102 conditionsToAssert: []*clusterv1.Condition{ 1103 conditions.FalseCondition(clusterv1.BootstrapReadyCondition, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1104 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1105 }, 1106 }, 1107 // Assert summary conditions 1108 // infra condition takes precedence over bootstrap condition in generating summary 1109 { 1110 name: "ready condition summary consumes reason from the infra condition", 1111 infraReady: false, 1112 bootstrapReady: false, 1113 conditionsToAssert: []*clusterv1.Condition{ 1114 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), 1115 }, 1116 }, 1117 { 1118 name: "ready condition summary consumes reason from the machine owner remediated condition", 1119 infraReady: true, 1120 bootstrapReady: true, 1121 beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { 1122 conditions.MarkFalse(m, clusterv1.MachineOwnerRemediatedCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "MHC failed") 1123 }, 1124 conditionsToAssert: []*clusterv1.Condition{ 1125 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.WaitingForRemediationReason, clusterv1.ConditionSeverityWarning, "MHC failed"), 1126 }, 1127 }, 1128 { 1129 name: "ready condition summary consumes reason from the MHC succeeded condition", 1130 infraReady: true, 1131 bootstrapReady: true, 1132 beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { 1133 conditions.MarkFalse(m, clusterv1.MachineHealthCheckSucceededCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, "") 1134 }, 1135 conditionsToAssert: []*clusterv1.Condition{ 1136 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.NodeNotFoundReason, clusterv1.ConditionSeverityWarning, ""), 1137 }, 1138 }, 1139 { 1140 name: "machine ready and MachineNodeHealthy unknown", 1141 infraReady: true, 1142 bootstrapReady: true, 1143 additionalObjects: []client.Object{&corev1.Node{ 1144 // This is a duplicate node with the same providerID 1145 // This should lead to an error when trying to get the Node for a Machine. 1146 ObjectMeta: metav1.ObjectMeta{ 1147 Name: "test-duplicate", 1148 }, 1149 Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, 1150 }}, 1151 wantErr: true, 1152 conditionsToAssert: []*clusterv1.Condition{ 1153 conditions.TrueCondition(clusterv1.InfrastructureReadyCondition), 1154 conditions.TrueCondition(clusterv1.BootstrapReadyCondition), 1155 conditions.TrueCondition(clusterv1.ReadyCondition), 1156 conditions.UnknownCondition(clusterv1.MachineNodeHealthyCondition, clusterv1.NodeInspectionFailedReason, "Failed to get the Node for this Machine by ProviderID"), 1157 }, 1158 }, 1159 { 1160 name: "ready condition summary consumes reason from the draining succeeded condition", 1161 infraReady: true, 1162 bootstrapReady: true, 1163 beforeFunc: func(_, _ *unstructured.Unstructured, m *clusterv1.Machine) { 1164 conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, "") 1165 }, 1166 conditionsToAssert: []*clusterv1.Condition{ 1167 conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, ""), 1168 }, 1169 }, 1170 } 1171 1172 for _, tt := range testcases { 1173 t.Run(tt.name, func(t *testing.T) { 1174 g := NewWithT(t) 1175 1176 // setup objects 1177 bootstrap := boostrapConfig(tt.bootstrapReady) 1178 infra := infraConfig(tt.infraReady) 1179 m := machine.DeepCopy() 1180 if tt.beforeFunc != nil { 1181 tt.beforeFunc(bootstrap, infra, m) 1182 } 1183 1184 objs := []client.Object{&testCluster, m, node, 1185 builder.GenericInfrastructureMachineCRD.DeepCopy(), infra, 1186 builder.GenericBootstrapConfigCRD.DeepCopy(), bootstrap, 1187 } 1188 objs = append(objs, tt.additionalObjects...) 1189 1190 clientFake := fake.NewClientBuilder().WithObjects(objs...). 1191 WithIndex(&corev1.Node{}, index.NodeProviderIDField, index.NodeByProviderID). 1192 WithStatusSubresource(&clusterv1.Machine{}). 1193 Build() 1194 1195 r := &Reconciler{ 1196 Client: clientFake, 1197 UnstructuredCachingClient: clientFake, 1198 recorder: record.NewFakeRecorder(10), 1199 Tracker: remote.NewTestClusterCacheTracker(logr.New(log.NullLogSink{}), clientFake, clientFake, scheme.Scheme, client.ObjectKey{Name: testCluster.Name, Namespace: testCluster.Namespace}), 1200 ssaCache: ssa.NewCache(), 1201 } 1202 1203 _, err := r.Reconcile(ctx, reconcile.Request{NamespacedName: util.ObjectKey(&machine)}) 1204 if tt.wantErr { 1205 g.Expect(err).To(HaveOccurred()) 1206 } else { 1207 g.Expect(err).ToNot(HaveOccurred()) 1208 } 1209 1210 m = &clusterv1.Machine{} 1211 g.Expect(r.Client.Get(ctx, client.ObjectKeyFromObject(&machine), m)).ToNot(HaveOccurred()) 1212 1213 assertConditions(t, m, tt.conditionsToAssert...) 1214 }) 1215 } 1216 } 1217 1218 func TestReconcileDeleteExternal(t *testing.T) { 1219 testCluster := &clusterv1.Cluster{ 1220 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 1221 } 1222 1223 bootstrapConfig := &unstructured.Unstructured{ 1224 Object: map[string]interface{}{ 1225 "kind": "BootstrapConfig", 1226 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 1227 "metadata": map[string]interface{}{ 1228 "name": "delete-bootstrap", 1229 "namespace": metav1.NamespaceDefault, 1230 }, 1231 }, 1232 } 1233 1234 machine := &clusterv1.Machine{ 1235 ObjectMeta: metav1.ObjectMeta{ 1236 Name: "delete", 1237 Namespace: metav1.NamespaceDefault, 1238 }, 1239 Spec: clusterv1.MachineSpec{ 1240 ClusterName: "test-cluster", 1241 Bootstrap: clusterv1.Bootstrap{ 1242 ConfigRef: &corev1.ObjectReference{ 1243 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 1244 Kind: "BootstrapConfig", 1245 Name: "delete-bootstrap", 1246 }, 1247 }, 1248 }, 1249 } 1250 1251 testCases := []struct { 1252 name string 1253 bootstrapExists bool 1254 expectError bool 1255 expected *unstructured.Unstructured 1256 }{ 1257 { 1258 name: "should continue to reconcile delete of external refs if exists", 1259 bootstrapExists: true, 1260 expected: &unstructured.Unstructured{ 1261 Object: map[string]interface{}{ 1262 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 1263 "kind": "BootstrapConfig", 1264 "metadata": map[string]interface{}{ 1265 "name": "delete-bootstrap", 1266 "namespace": metav1.NamespaceDefault, 1267 "resourceVersion": "999", 1268 }, 1269 }, 1270 }, 1271 expectError: false, 1272 }, 1273 { 1274 name: "should no longer reconcile deletion of external refs since it doesn't exist", 1275 bootstrapExists: false, 1276 expected: nil, 1277 expectError: false, 1278 }, 1279 } 1280 1281 for _, tc := range testCases { 1282 t.Run(tc.name, func(t *testing.T) { 1283 g := NewWithT(t) 1284 1285 objs := []client.Object{testCluster, machine} 1286 1287 if tc.bootstrapExists { 1288 objs = append(objs, bootstrapConfig) 1289 } 1290 1291 c := fake.NewClientBuilder().WithObjects(objs...).Build() 1292 r := &Reconciler{ 1293 Client: c, 1294 UnstructuredCachingClient: c, 1295 } 1296 1297 obj, err := r.reconcileDeleteExternal(ctx, testCluster, machine, machine.Spec.Bootstrap.ConfigRef) 1298 if tc.expectError { 1299 g.Expect(err).To(HaveOccurred()) 1300 } else { 1301 g.Expect(err).ToNot(HaveOccurred()) 1302 } 1303 g.Expect(obj).To(BeComparableTo(tc.expected)) 1304 }) 1305 } 1306 } 1307 1308 func TestRemoveMachineFinalizerAfterDeleteReconcile(t *testing.T) { 1309 g := NewWithT(t) 1310 1311 dt := metav1.Now() 1312 1313 testCluster := &clusterv1.Cluster{ 1314 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 1315 } 1316 1317 m := &clusterv1.Machine{ 1318 ObjectMeta: metav1.ObjectMeta{ 1319 Name: "delete123", 1320 Namespace: metav1.NamespaceDefault, 1321 Finalizers: []string{clusterv1.MachineFinalizer, "test"}, 1322 DeletionTimestamp: &dt, 1323 }, 1324 Spec: clusterv1.MachineSpec{ 1325 ClusterName: "test-cluster", 1326 InfrastructureRef: corev1.ObjectReference{ 1327 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 1328 Kind: "GenericInfrastructureMachine", 1329 Name: "infra-config1", 1330 }, 1331 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1332 }, 1333 } 1334 key := client.ObjectKey{Namespace: m.Namespace, Name: m.Name} 1335 c := fake.NewClientBuilder().WithObjects(testCluster, m).WithStatusSubresource(&clusterv1.Machine{}).Build() 1336 mr := &Reconciler{ 1337 Client: c, 1338 UnstructuredCachingClient: c, 1339 } 1340 _, err := mr.Reconcile(ctx, reconcile.Request{NamespacedName: key}) 1341 g.Expect(err).ToNot(HaveOccurred()) 1342 1343 var actual clusterv1.Machine 1344 g.Expect(mr.Client.Get(ctx, key, &actual)).To(Succeed()) 1345 g.Expect(actual.ObjectMeta.Finalizers).To(Equal([]string{"test"})) 1346 } 1347 1348 func TestIsNodeDrainedAllowed(t *testing.T) { 1349 testCluster := &clusterv1.Cluster{ 1350 TypeMeta: metav1.TypeMeta{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String()}, 1351 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 1352 } 1353 1354 tests := []struct { 1355 name string 1356 machine *clusterv1.Machine 1357 expected bool 1358 }{ 1359 { 1360 name: "Exclude node draining annotation exists", 1361 machine: &clusterv1.Machine{ 1362 ObjectMeta: metav1.ObjectMeta{ 1363 Name: "test-machine", 1364 Namespace: metav1.NamespaceDefault, 1365 Finalizers: []string{clusterv1.MachineFinalizer}, 1366 Annotations: map[string]string{clusterv1.ExcludeNodeDrainingAnnotation: "existed!!"}, 1367 }, 1368 Spec: clusterv1.MachineSpec{ 1369 ClusterName: "test-cluster", 1370 InfrastructureRef: corev1.ObjectReference{}, 1371 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1372 }, 1373 Status: clusterv1.MachineStatus{}, 1374 }, 1375 expected: false, 1376 }, 1377 { 1378 name: "Node draining timeout is over", 1379 machine: &clusterv1.Machine{ 1380 ObjectMeta: metav1.ObjectMeta{ 1381 Name: "test-machine", 1382 Namespace: metav1.NamespaceDefault, 1383 Finalizers: []string{clusterv1.MachineFinalizer}, 1384 }, 1385 Spec: clusterv1.MachineSpec{ 1386 ClusterName: "test-cluster", 1387 InfrastructureRef: corev1.ObjectReference{}, 1388 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1389 NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, 1390 }, 1391 1392 Status: clusterv1.MachineStatus{ 1393 Conditions: clusterv1.Conditions{ 1394 { 1395 Type: clusterv1.DrainingSucceededCondition, 1396 Status: corev1.ConditionFalse, 1397 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 70)).UTC()}, 1398 }, 1399 }, 1400 }, 1401 }, 1402 expected: false, 1403 }, 1404 { 1405 name: "Node draining timeout is not yet over", 1406 machine: &clusterv1.Machine{ 1407 ObjectMeta: metav1.ObjectMeta{ 1408 Name: "test-machine", 1409 Namespace: metav1.NamespaceDefault, 1410 Finalizers: []string{clusterv1.MachineFinalizer}, 1411 }, 1412 Spec: clusterv1.MachineSpec{ 1413 ClusterName: "test-cluster", 1414 InfrastructureRef: corev1.ObjectReference{}, 1415 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1416 NodeDrainTimeout: &metav1.Duration{Duration: time.Second * 60}, 1417 }, 1418 Status: clusterv1.MachineStatus{ 1419 Conditions: clusterv1.Conditions{ 1420 { 1421 Type: clusterv1.DrainingSucceededCondition, 1422 Status: corev1.ConditionFalse, 1423 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, 1424 }, 1425 }, 1426 }, 1427 }, 1428 expected: true, 1429 }, 1430 { 1431 name: "NodeDrainTimeout option is set to its default value 0", 1432 machine: &clusterv1.Machine{ 1433 ObjectMeta: metav1.ObjectMeta{ 1434 Name: "test-machine", 1435 Namespace: metav1.NamespaceDefault, 1436 Finalizers: []string{clusterv1.MachineFinalizer}, 1437 }, 1438 Spec: clusterv1.MachineSpec{ 1439 ClusterName: "test-cluster", 1440 InfrastructureRef: corev1.ObjectReference{}, 1441 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1442 }, 1443 Status: clusterv1.MachineStatus{ 1444 Conditions: clusterv1.Conditions{ 1445 { 1446 Type: clusterv1.DrainingSucceededCondition, 1447 Status: corev1.ConditionFalse, 1448 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, 1449 }, 1450 }, 1451 }, 1452 }, 1453 expected: true, 1454 }, 1455 } 1456 for _, tt := range tests { 1457 t.Run(tt.name, func(t *testing.T) { 1458 g := NewWithT(t) 1459 1460 var objs []client.Object 1461 objs = append(objs, testCluster, tt.machine) 1462 1463 c := fake.NewClientBuilder().WithObjects(objs...).Build() 1464 r := &Reconciler{ 1465 Client: c, 1466 UnstructuredCachingClient: c, 1467 } 1468 1469 got := r.isNodeDrainAllowed(tt.machine) 1470 g.Expect(got).To(Equal(tt.expected)) 1471 }) 1472 } 1473 } 1474 1475 func TestIsNodeVolumeDetachingAllowed(t *testing.T) { 1476 testCluster := &clusterv1.Cluster{ 1477 TypeMeta: metav1.TypeMeta{Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String()}, 1478 ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault, Name: "test-cluster"}, 1479 } 1480 1481 tests := []struct { 1482 name string 1483 machine *clusterv1.Machine 1484 expected bool 1485 }{ 1486 { 1487 name: "Exclude wait node volume detaching annotation exists", 1488 machine: &clusterv1.Machine{ 1489 ObjectMeta: metav1.ObjectMeta{ 1490 Name: "test-machine", 1491 Namespace: metav1.NamespaceDefault, 1492 Finalizers: []string{clusterv1.MachineFinalizer}, 1493 Annotations: map[string]string{clusterv1.ExcludeWaitForNodeVolumeDetachAnnotation: "existed!!"}, 1494 }, 1495 Spec: clusterv1.MachineSpec{ 1496 ClusterName: "test-cluster", 1497 InfrastructureRef: corev1.ObjectReference{}, 1498 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1499 }, 1500 Status: clusterv1.MachineStatus{}, 1501 }, 1502 expected: false, 1503 }, 1504 { 1505 name: "Volume detach timeout is over", 1506 machine: &clusterv1.Machine{ 1507 ObjectMeta: metav1.ObjectMeta{ 1508 Name: "test-machine", 1509 Namespace: metav1.NamespaceDefault, 1510 Finalizers: []string{clusterv1.MachineFinalizer}, 1511 }, 1512 Spec: clusterv1.MachineSpec{ 1513 ClusterName: "test-cluster", 1514 InfrastructureRef: corev1.ObjectReference{}, 1515 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1516 NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 30}, 1517 }, 1518 1519 Status: clusterv1.MachineStatus{ 1520 Conditions: clusterv1.Conditions{ 1521 { 1522 Type: clusterv1.VolumeDetachSucceededCondition, 1523 Status: corev1.ConditionFalse, 1524 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 60)).UTC()}, 1525 }, 1526 }, 1527 }, 1528 }, 1529 expected: false, 1530 }, 1531 { 1532 name: "Volume detach timeout is not yet over", 1533 machine: &clusterv1.Machine{ 1534 ObjectMeta: metav1.ObjectMeta{ 1535 Name: "test-machine", 1536 Namespace: metav1.NamespaceDefault, 1537 Finalizers: []string{clusterv1.MachineFinalizer}, 1538 }, 1539 Spec: clusterv1.MachineSpec{ 1540 ClusterName: "test-cluster", 1541 InfrastructureRef: corev1.ObjectReference{}, 1542 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1543 NodeVolumeDetachTimeout: &metav1.Duration{Duration: time.Second * 60}, 1544 }, 1545 Status: clusterv1.MachineStatus{ 1546 Conditions: clusterv1.Conditions{ 1547 { 1548 Type: clusterv1.VolumeDetachSucceededCondition, 1549 Status: corev1.ConditionFalse, 1550 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 30)).UTC()}, 1551 }, 1552 }, 1553 }, 1554 }, 1555 expected: true, 1556 }, 1557 { 1558 name: "Volume detach timeout option is set to it's default value 0", 1559 machine: &clusterv1.Machine{ 1560 ObjectMeta: metav1.ObjectMeta{ 1561 Name: "test-machine", 1562 Namespace: metav1.NamespaceDefault, 1563 Finalizers: []string{clusterv1.MachineFinalizer}, 1564 }, 1565 Spec: clusterv1.MachineSpec{ 1566 ClusterName: "test-cluster", 1567 InfrastructureRef: corev1.ObjectReference{}, 1568 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1569 }, 1570 Status: clusterv1.MachineStatus{ 1571 Conditions: clusterv1.Conditions{ 1572 { 1573 Type: clusterv1.VolumeDetachSucceededCondition, 1574 Status: corev1.ConditionFalse, 1575 LastTransitionTime: metav1.Time{Time: time.Now().Add(-(time.Second * 1000)).UTC()}, 1576 }, 1577 }, 1578 }, 1579 }, 1580 expected: true, 1581 }, 1582 } 1583 for _, tt := range tests { 1584 t.Run(tt.name, func(t *testing.T) { 1585 g := NewWithT(t) 1586 1587 var objs []client.Object 1588 objs = append(objs, testCluster, tt.machine) 1589 1590 c := fake.NewClientBuilder().WithObjects(objs...).Build() 1591 r := &Reconciler{ 1592 Client: c, 1593 UnstructuredCachingClient: c, 1594 } 1595 1596 got := r.isNodeVolumeDetachingAllowed(tt.machine) 1597 g.Expect(got).To(Equal(tt.expected)) 1598 }) 1599 } 1600 } 1601 1602 func TestIsDeleteNodeAllowed(t *testing.T) { 1603 deletionts := metav1.Now() 1604 1605 testCases := []struct { 1606 name string 1607 cluster *clusterv1.Cluster 1608 machine *clusterv1.Machine 1609 expectedError error 1610 }{ 1611 { 1612 name: "machine without nodeRef", 1613 cluster: &clusterv1.Cluster{ 1614 ObjectMeta: metav1.ObjectMeta{ 1615 Name: "test-cluster", 1616 Namespace: metav1.NamespaceDefault, 1617 }, 1618 }, 1619 machine: &clusterv1.Machine{ 1620 ObjectMeta: metav1.ObjectMeta{ 1621 Name: "created", 1622 Namespace: metav1.NamespaceDefault, 1623 Labels: map[string]string{ 1624 clusterv1.ClusterNameLabel: "test-cluster", 1625 }, 1626 Finalizers: []string{clusterv1.MachineFinalizer}, 1627 }, 1628 Spec: clusterv1.MachineSpec{ 1629 ClusterName: "test-cluster", 1630 InfrastructureRef: corev1.ObjectReference{}, 1631 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1632 }, 1633 Status: clusterv1.MachineStatus{}, 1634 }, 1635 expectedError: errNilNodeRef, 1636 }, 1637 { 1638 name: "no control plane members", 1639 cluster: &clusterv1.Cluster{ 1640 ObjectMeta: metav1.ObjectMeta{ 1641 Name: "test-cluster", 1642 Namespace: metav1.NamespaceDefault, 1643 }, 1644 }, 1645 machine: &clusterv1.Machine{ 1646 ObjectMeta: metav1.ObjectMeta{ 1647 Name: "created", 1648 Namespace: metav1.NamespaceDefault, 1649 Labels: map[string]string{ 1650 clusterv1.ClusterNameLabel: "test-cluster", 1651 }, 1652 Finalizers: []string{clusterv1.MachineFinalizer}, 1653 }, 1654 Spec: clusterv1.MachineSpec{ 1655 ClusterName: "test-cluster", 1656 InfrastructureRef: corev1.ObjectReference{}, 1657 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1658 }, 1659 Status: clusterv1.MachineStatus{ 1660 NodeRef: &corev1.ObjectReference{ 1661 Name: "test", 1662 }, 1663 }, 1664 }, 1665 expectedError: errNoControlPlaneNodes, 1666 }, 1667 { 1668 name: "is last control plane member", 1669 cluster: &clusterv1.Cluster{ 1670 ObjectMeta: metav1.ObjectMeta{ 1671 Name: "test-cluster", 1672 Namespace: metav1.NamespaceDefault, 1673 }, 1674 }, 1675 machine: &clusterv1.Machine{ 1676 ObjectMeta: metav1.ObjectMeta{ 1677 Name: "created", 1678 Namespace: metav1.NamespaceDefault, 1679 Labels: map[string]string{ 1680 clusterv1.ClusterNameLabel: "test-cluster", 1681 clusterv1.MachineControlPlaneLabel: "", 1682 }, 1683 Finalizers: []string{clusterv1.MachineFinalizer}, 1684 DeletionTimestamp: &metav1.Time{Time: time.Now().UTC()}, 1685 }, 1686 Spec: clusterv1.MachineSpec{ 1687 ClusterName: "test-cluster", 1688 InfrastructureRef: corev1.ObjectReference{}, 1689 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1690 }, 1691 Status: clusterv1.MachineStatus{ 1692 NodeRef: &corev1.ObjectReference{ 1693 Name: "test", 1694 }, 1695 }, 1696 }, 1697 expectedError: errNoControlPlaneNodes, 1698 }, 1699 { 1700 name: "has nodeRef and control plane is healthy", 1701 cluster: &clusterv1.Cluster{ 1702 ObjectMeta: metav1.ObjectMeta{ 1703 Name: "test-cluster", 1704 Namespace: metav1.NamespaceDefault, 1705 }, 1706 }, 1707 machine: &clusterv1.Machine{ 1708 ObjectMeta: metav1.ObjectMeta{ 1709 Name: "created", 1710 Namespace: metav1.NamespaceDefault, 1711 Labels: map[string]string{ 1712 clusterv1.ClusterNameLabel: "test-cluster", 1713 }, 1714 Finalizers: []string{clusterv1.MachineFinalizer}, 1715 }, 1716 Spec: clusterv1.MachineSpec{ 1717 ClusterName: "test-cluster", 1718 InfrastructureRef: corev1.ObjectReference{}, 1719 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1720 }, 1721 Status: clusterv1.MachineStatus{ 1722 NodeRef: &corev1.ObjectReference{ 1723 Name: "test", 1724 }, 1725 }, 1726 }, 1727 expectedError: nil, 1728 }, 1729 { 1730 name: "has nodeRef and cluster is being deleted", 1731 cluster: &clusterv1.Cluster{ 1732 ObjectMeta: metav1.ObjectMeta{ 1733 Name: "test-cluster", 1734 Namespace: metav1.NamespaceDefault, 1735 DeletionTimestamp: &deletionts, 1736 Finalizers: []string{clusterv1.ClusterFinalizer}, 1737 }, 1738 }, 1739 machine: &clusterv1.Machine{}, 1740 expectedError: errClusterIsBeingDeleted, 1741 }, 1742 { 1743 name: "has nodeRef and control plane is healthy and externally managed", 1744 cluster: &clusterv1.Cluster{ 1745 ObjectMeta: metav1.ObjectMeta{ 1746 Name: "test-cluster", 1747 Namespace: metav1.NamespaceDefault, 1748 }, 1749 Spec: clusterv1.ClusterSpec{ 1750 ControlPlaneRef: &corev1.ObjectReference{ 1751 APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", 1752 Kind: "AWSManagedControlPlane", 1753 Name: "test-cluster", 1754 Namespace: "test-cluster", 1755 }, 1756 }, 1757 }, 1758 machine: &clusterv1.Machine{ 1759 ObjectMeta: metav1.ObjectMeta{ 1760 Name: "created", 1761 Namespace: metav1.NamespaceDefault, 1762 Labels: map[string]string{ 1763 clusterv1.ClusterNameLabel: "test-cluster", 1764 }, 1765 Finalizers: []string{clusterv1.MachineFinalizer}, 1766 }, 1767 Spec: clusterv1.MachineSpec{ 1768 ClusterName: "test-cluster", 1769 InfrastructureRef: corev1.ObjectReference{}, 1770 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1771 }, 1772 Status: clusterv1.MachineStatus{ 1773 NodeRef: &corev1.ObjectReference{ 1774 Name: "test", 1775 }, 1776 }, 1777 }, 1778 expectedError: nil, 1779 }, 1780 { 1781 name: "has nodeRef, control plane is being deleted and not externally managed", 1782 cluster: &clusterv1.Cluster{ 1783 ObjectMeta: metav1.ObjectMeta{ 1784 Name: "test-cluster", 1785 Namespace: metav1.NamespaceDefault, 1786 }, 1787 Spec: clusterv1.ClusterSpec{ 1788 ControlPlaneRef: &corev1.ObjectReference{ 1789 APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", 1790 Kind: "AWSManagedControlPlane", 1791 Name: "test-cluster-2", 1792 Namespace: "test-cluster", 1793 }, 1794 }, 1795 }, 1796 machine: &clusterv1.Machine{ 1797 ObjectMeta: metav1.ObjectMeta{ 1798 Name: "created", 1799 Namespace: metav1.NamespaceDefault, 1800 Labels: map[string]string{ 1801 clusterv1.ClusterNameLabel: "test-cluster", 1802 }, 1803 Finalizers: []string{clusterv1.MachineFinalizer}, 1804 }, 1805 Spec: clusterv1.MachineSpec{ 1806 ClusterName: "test-cluster", 1807 InfrastructureRef: corev1.ObjectReference{}, 1808 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1809 }, 1810 Status: clusterv1.MachineStatus{ 1811 NodeRef: &corev1.ObjectReference{ 1812 Name: "test", 1813 }, 1814 }, 1815 }, 1816 expectedError: errControlPlaneIsBeingDeleted, 1817 }, 1818 { 1819 name: "has nodeRef, control plane is being deleted and is externally managed", 1820 cluster: &clusterv1.Cluster{ 1821 ObjectMeta: metav1.ObjectMeta{ 1822 Name: "test-cluster", 1823 Namespace: metav1.NamespaceDefault, 1824 }, 1825 Spec: clusterv1.ClusterSpec{ 1826 ControlPlaneRef: &corev1.ObjectReference{ 1827 APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", 1828 Kind: "AWSManagedControlPlane", 1829 Name: "test-cluster-3", 1830 Namespace: "test-cluster", 1831 }, 1832 }, 1833 }, 1834 machine: &clusterv1.Machine{ 1835 ObjectMeta: metav1.ObjectMeta{ 1836 Name: "created", 1837 Namespace: metav1.NamespaceDefault, 1838 Labels: map[string]string{ 1839 clusterv1.ClusterNameLabel: "test-cluster", 1840 }, 1841 Finalizers: []string{clusterv1.MachineFinalizer}, 1842 }, 1843 Spec: clusterv1.MachineSpec{ 1844 ClusterName: "test-cluster", 1845 InfrastructureRef: corev1.ObjectReference{}, 1846 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1847 }, 1848 Status: clusterv1.MachineStatus{ 1849 NodeRef: &corev1.ObjectReference{ 1850 Name: "test", 1851 }, 1852 }, 1853 }, 1854 expectedError: errControlPlaneIsBeingDeleted, 1855 }, 1856 } 1857 1858 emp := &unstructured.Unstructured{ 1859 Object: map[string]interface{}{ 1860 "status": map[string]interface{}{ 1861 "externalManagedControlPlane": true, 1862 }, 1863 }, 1864 } 1865 emp.SetAPIVersion("controlplane.cluster.x-k8s.io/v1beta1") 1866 emp.SetKind("AWSManagedControlPlane") 1867 emp.SetName("test-cluster") 1868 emp.SetNamespace("test-cluster") 1869 1870 mcpBeingDeleted := &unstructured.Unstructured{ 1871 Object: map[string]interface{}{}, 1872 } 1873 mcpBeingDeleted.SetAPIVersion("controlplane.cluster.x-k8s.io/v1beta1") 1874 mcpBeingDeleted.SetKind("AWSManagedControlPlane") 1875 mcpBeingDeleted.SetName("test-cluster-2") 1876 mcpBeingDeleted.SetNamespace("test-cluster") 1877 mcpBeingDeleted.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) 1878 mcpBeingDeleted.SetFinalizers([]string{"block-deletion"}) 1879 1880 empBeingDeleted := &unstructured.Unstructured{ 1881 Object: map[string]interface{}{ 1882 "status": map[string]interface{}{ 1883 "externalManagedControlPlane": true, 1884 }, 1885 }, 1886 } 1887 empBeingDeleted.SetAPIVersion("controlplane.cluster.x-k8s.io/v1beta1") 1888 empBeingDeleted.SetKind("AWSManagedControlPlane") 1889 empBeingDeleted.SetName("test-cluster-3") 1890 empBeingDeleted.SetNamespace("test-cluster") 1891 empBeingDeleted.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) 1892 empBeingDeleted.SetFinalizers([]string{"block-deletion"}) 1893 1894 for _, tc := range testCases { 1895 t.Run(tc.name, func(t *testing.T) { 1896 g := NewWithT(t) 1897 1898 m1 := &clusterv1.Machine{ 1899 ObjectMeta: metav1.ObjectMeta{ 1900 Name: "cp1", 1901 Namespace: metav1.NamespaceDefault, 1902 Labels: map[string]string{ 1903 clusterv1.ClusterNameLabel: "test-cluster", 1904 }, 1905 Finalizers: []string{clusterv1.MachineFinalizer}, 1906 }, 1907 Spec: clusterv1.MachineSpec{ 1908 ClusterName: "test-cluster", 1909 InfrastructureRef: corev1.ObjectReference{}, 1910 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1911 }, 1912 Status: clusterv1.MachineStatus{ 1913 NodeRef: &corev1.ObjectReference{ 1914 Name: "test1", 1915 }, 1916 }, 1917 } 1918 m2 := &clusterv1.Machine{ 1919 ObjectMeta: metav1.ObjectMeta{ 1920 Name: "cp2", 1921 Namespace: metav1.NamespaceDefault, 1922 Labels: map[string]string{ 1923 clusterv1.ClusterNameLabel: "test-cluster", 1924 }, 1925 Finalizers: []string{clusterv1.MachineFinalizer}, 1926 }, 1927 Spec: clusterv1.MachineSpec{ 1928 ClusterName: "test-cluster", 1929 InfrastructureRef: corev1.ObjectReference{}, 1930 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 1931 }, 1932 Status: clusterv1.MachineStatus{ 1933 NodeRef: &corev1.ObjectReference{ 1934 Name: "test2", 1935 }, 1936 }, 1937 } 1938 // For isDeleteNodeAllowed to be true we assume a healthy control plane. 1939 if tc.expectedError == nil { 1940 m1.Labels[clusterv1.MachineControlPlaneLabel] = "" 1941 m2.Labels[clusterv1.MachineControlPlaneLabel] = "" 1942 } 1943 1944 c := fake.NewClientBuilder().WithObjects( 1945 tc.cluster, 1946 tc.machine, 1947 m1, 1948 m2, 1949 emp, 1950 mcpBeingDeleted, 1951 empBeingDeleted, 1952 ).Build() 1953 mr := &Reconciler{ 1954 Client: c, 1955 UnstructuredCachingClient: c, 1956 } 1957 1958 err := mr.isDeleteNodeAllowed(ctx, tc.cluster, tc.machine) 1959 if tc.expectedError == nil { 1960 g.Expect(err).ToNot(HaveOccurred()) 1961 } else { 1962 g.Expect(err).To(Equal(tc.expectedError)) 1963 } 1964 }) 1965 } 1966 } 1967 1968 func TestNodeToMachine(t *testing.T) { 1969 g := NewWithT(t) 1970 ns, err := env.CreateNamespace(ctx, "test-node-to-machine") 1971 g.Expect(err).ToNot(HaveOccurred()) 1972 1973 // Set up cluster, machines and nodes to test against. 1974 infraMachine := &unstructured.Unstructured{ 1975 Object: map[string]interface{}{ 1976 "kind": "GenericInfrastructureMachine", 1977 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 1978 "metadata": map[string]interface{}{ 1979 "name": "infra-config1", 1980 "namespace": ns.Name, 1981 }, 1982 "spec": map[string]interface{}{ 1983 "providerID": "test://id-1", 1984 }, 1985 "status": map[string]interface{}{ 1986 "ready": true, 1987 "addresses": []interface{}{ 1988 map[string]interface{}{ 1989 "type": "InternalIP", 1990 "address": "10.0.0.1", 1991 }, 1992 }, 1993 }, 1994 }, 1995 } 1996 1997 infraMachine2 := &unstructured.Unstructured{ 1998 Object: map[string]interface{}{ 1999 "kind": "GenericInfrastructureMachine", 2000 "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", 2001 "metadata": map[string]interface{}{ 2002 "name": "infra-config2", 2003 "namespace": ns.Name, 2004 }, 2005 "spec": map[string]interface{}{ 2006 "providerID": "test://id-2", 2007 }, 2008 "status": map[string]interface{}{ 2009 "ready": true, 2010 "addresses": []interface{}{ 2011 map[string]interface{}{ 2012 "type": "InternalIP", 2013 "address": "10.0.0.1", 2014 }, 2015 }, 2016 }, 2017 }, 2018 } 2019 2020 defaultBootstrap := &unstructured.Unstructured{ 2021 Object: map[string]interface{}{ 2022 "kind": "GenericBootstrapConfig", 2023 "apiVersion": "bootstrap.cluster.x-k8s.io/v1beta1", 2024 "metadata": map[string]interface{}{ 2025 "name": "bootstrap-config-machinereconcile", 2026 "namespace": ns.Name, 2027 }, 2028 "spec": map[string]interface{}{}, 2029 "status": map[string]interface{}{}, 2030 }, 2031 } 2032 2033 testCluster := &clusterv1.Cluster{ 2034 ObjectMeta: metav1.ObjectMeta{ 2035 GenerateName: "machine-reconcile-", 2036 Namespace: ns.Name, 2037 }, 2038 } 2039 2040 targetNode := &corev1.Node{ 2041 ObjectMeta: metav1.ObjectMeta{ 2042 Name: "test-node-to-machine-1", 2043 }, 2044 Spec: corev1.NodeSpec{ 2045 ProviderID: "test://id-1", 2046 }, 2047 } 2048 2049 randomNode := &corev1.Node{ 2050 ObjectMeta: metav1.ObjectMeta{ 2051 Name: "test-node-to-machine-node-2", 2052 }, 2053 Spec: corev1.NodeSpec{ 2054 ProviderID: "test://id-2", 2055 }, 2056 } 2057 2058 g.Expect(env.Create(ctx, testCluster)).To(Succeed()) 2059 g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed()) 2060 g.Expect(env.Create(ctx, defaultBootstrap)).To(Succeed()) 2061 g.Expect(env.Create(ctx, targetNode)).To(Succeed()) 2062 g.Expect(env.Create(ctx, randomNode)).To(Succeed()) 2063 g.Expect(env.Create(ctx, infraMachine)).To(Succeed()) 2064 g.Expect(env.Create(ctx, infraMachine2)).To(Succeed()) 2065 2066 defer func(do ...client.Object) { 2067 g.Expect(env.Cleanup(ctx, do...)).To(Succeed()) 2068 }(ns, testCluster, defaultBootstrap) 2069 2070 // Patch infra expectedMachine ready 2071 patchHelper, err := patch.NewHelper(infraMachine, env) 2072 g.Expect(err).ShouldNot(HaveOccurred()) 2073 g.Expect(unstructured.SetNestedField(infraMachine.Object, true, "status", "ready")).To(Succeed()) 2074 g.Expect(patchHelper.Patch(ctx, infraMachine, patch.WithStatusObservedGeneration{})).To(Succeed()) 2075 2076 // Patch infra randomMachine ready 2077 patchHelper, err = patch.NewHelper(infraMachine2, env) 2078 g.Expect(err).ShouldNot(HaveOccurred()) 2079 g.Expect(unstructured.SetNestedField(infraMachine2.Object, true, "status", "ready")).To(Succeed()) 2080 g.Expect(patchHelper.Patch(ctx, infraMachine2, patch.WithStatusObservedGeneration{})).To(Succeed()) 2081 2082 // Patch bootstrap ready 2083 patchHelper, err = patch.NewHelper(defaultBootstrap, env) 2084 g.Expect(err).ShouldNot(HaveOccurred()) 2085 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, true, "status", "ready")).To(Succeed()) 2086 g.Expect(unstructured.SetNestedField(defaultBootstrap.Object, "secretData", "status", "dataSecretName")).To(Succeed()) 2087 g.Expect(patchHelper.Patch(ctx, defaultBootstrap, patch.WithStatusObservedGeneration{})).To(Succeed()) 2088 2089 expectedMachine := &clusterv1.Machine{ 2090 ObjectMeta: metav1.ObjectMeta{ 2091 GenerateName: "machine-created-", 2092 Namespace: ns.Name, 2093 Labels: map[string]string{ 2094 clusterv1.MachineControlPlaneLabel: "", 2095 }, 2096 }, 2097 Spec: clusterv1.MachineSpec{ 2098 ClusterName: testCluster.Name, 2099 InfrastructureRef: corev1.ObjectReference{ 2100 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 2101 Kind: "GenericInfrastructureMachine", 2102 Name: "infra-config1", 2103 }, 2104 Bootstrap: clusterv1.Bootstrap{ 2105 ConfigRef: &corev1.ObjectReference{ 2106 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 2107 Kind: "GenericBootstrapConfig", 2108 Name: "bootstrap-config-machinereconcile", 2109 }, 2110 }, 2111 }, 2112 } 2113 2114 g.Expect(env.Create(ctx, expectedMachine)).To(Succeed()) 2115 defer func() { 2116 g.Expect(env.Cleanup(ctx, expectedMachine)).To(Succeed()) 2117 }() 2118 2119 // Wait for reconciliation to happen. 2120 // Since infra and bootstrap objects are ready, a nodeRef will be assigned during node reconciliation. 2121 key := client.ObjectKey{Name: expectedMachine.Name, Namespace: expectedMachine.Namespace} 2122 g.Eventually(func() bool { 2123 if err := env.Get(ctx, key, expectedMachine); err != nil { 2124 return false 2125 } 2126 return expectedMachine.Status.NodeRef != nil 2127 }, timeout).Should(BeTrue()) 2128 2129 randomMachine := &clusterv1.Machine{ 2130 ObjectMeta: metav1.ObjectMeta{ 2131 GenerateName: "machine-created-", 2132 Namespace: ns.Name, 2133 Labels: map[string]string{ 2134 clusterv1.MachineControlPlaneLabel: "", 2135 }, 2136 }, 2137 Spec: clusterv1.MachineSpec{ 2138 ClusterName: testCluster.Name, 2139 InfrastructureRef: corev1.ObjectReference{ 2140 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 2141 Kind: "GenericInfrastructureMachine", 2142 Name: "infra-config2", 2143 }, 2144 Bootstrap: clusterv1.Bootstrap{ 2145 ConfigRef: &corev1.ObjectReference{ 2146 APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", 2147 Kind: "GenericBootstrapConfig", 2148 Name: "bootstrap-config-machinereconcile", 2149 }, 2150 }, 2151 }, 2152 } 2153 2154 g.Expect(env.Create(ctx, randomMachine)).To(Succeed()) 2155 defer func() { 2156 g.Expect(env.Cleanup(ctx, randomMachine)).To(Succeed()) 2157 }() 2158 2159 // Wait for reconciliation to happen. 2160 // Since infra and bootstrap objects are ready, a nodeRef will be assigned during node reconciliation. 2161 key = client.ObjectKey{Name: randomMachine.Name, Namespace: randomMachine.Namespace} 2162 g.Eventually(func() bool { 2163 if err := env.Get(ctx, key, randomMachine); err != nil { 2164 return false 2165 } 2166 return randomMachine.Status.NodeRef != nil 2167 }, timeout).Should(BeTrue()) 2168 2169 // Fake nodes for actual test of nodeToMachine. 2170 fakeNodes := []*corev1.Node{ 2171 // None annotations. 2172 { 2173 ObjectMeta: metav1.ObjectMeta{ 2174 Name: targetNode.GetName(), 2175 }, 2176 Spec: corev1.NodeSpec{ 2177 ProviderID: targetNode.Spec.ProviderID, 2178 }, 2179 }, 2180 // ClusterNameAnnotation annotation. 2181 { 2182 ObjectMeta: metav1.ObjectMeta{ 2183 Name: targetNode.GetName(), 2184 Annotations: map[string]string{ 2185 clusterv1.ClusterNameAnnotation: testCluster.GetName(), 2186 }, 2187 }, 2188 Spec: corev1.NodeSpec{ 2189 ProviderID: targetNode.Spec.ProviderID, 2190 }, 2191 }, 2192 // ClusterNamespaceAnnotation annotation. 2193 { 2194 ObjectMeta: metav1.ObjectMeta{ 2195 Name: targetNode.GetName(), 2196 Annotations: map[string]string{ 2197 clusterv1.ClusterNamespaceAnnotation: ns.GetName(), 2198 }, 2199 }, 2200 Spec: corev1.NodeSpec{ 2201 ProviderID: targetNode.Spec.ProviderID, 2202 }, 2203 }, 2204 // Both annotations. 2205 { 2206 ObjectMeta: metav1.ObjectMeta{ 2207 Name: targetNode.GetName(), 2208 Annotations: map[string]string{ 2209 clusterv1.ClusterNameAnnotation: testCluster.GetName(), 2210 clusterv1.ClusterNamespaceAnnotation: ns.GetName(), 2211 }, 2212 }, 2213 Spec: corev1.NodeSpec{ 2214 ProviderID: targetNode.Spec.ProviderID, 2215 }, 2216 }, 2217 } 2218 2219 r := &Reconciler{ 2220 Client: env, 2221 UnstructuredCachingClient: env, 2222 } 2223 for _, node := range fakeNodes { 2224 request := r.nodeToMachine(ctx, node) 2225 g.Expect(request).To(BeEquivalentTo([]reconcile.Request{ 2226 { 2227 NamespacedName: client.ObjectKeyFromObject(expectedMachine), 2228 }, 2229 })) 2230 } 2231 } 2232 2233 type fakeClientWithNodeDeletionErr struct { 2234 client.Client 2235 } 2236 2237 func (fc fakeClientWithNodeDeletionErr) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { 2238 gvk, err := apiutil.GVKForObject(obj, fakeScheme) 2239 if err == nil && gvk.Kind == "Node" { 2240 return fmt.Errorf("fake error") 2241 } 2242 return fc.Client.Delete(ctx, obj, opts...) 2243 } 2244 2245 func TestNodeDeletion(t *testing.T) { 2246 g := NewWithT(t) 2247 2248 deletionTime := metav1.Now().Add(-1 * time.Second) 2249 2250 testCluster := clusterv1.Cluster{ 2251 ObjectMeta: metav1.ObjectMeta{ 2252 Name: "test-cluster", 2253 Namespace: metav1.NamespaceDefault, 2254 }, 2255 } 2256 2257 node := &corev1.Node{ 2258 ObjectMeta: metav1.ObjectMeta{ 2259 Name: "test", 2260 }, 2261 Spec: corev1.NodeSpec{ProviderID: "test://id-1"}, 2262 } 2263 2264 testMachine := clusterv1.Machine{ 2265 ObjectMeta: metav1.ObjectMeta{ 2266 Name: "test", 2267 Namespace: metav1.NamespaceDefault, 2268 Labels: map[string]string{ 2269 clusterv1.MachineControlPlaneLabel: "", 2270 }, 2271 Annotations: map[string]string{ 2272 "machine.cluster.x-k8s.io/exclude-node-draining": "", 2273 }, 2274 Finalizers: []string{clusterv1.MachineFinalizer}, 2275 DeletionTimestamp: &metav1.Time{Time: deletionTime}, 2276 }, 2277 Spec: clusterv1.MachineSpec{ 2278 ClusterName: "test-cluster", 2279 InfrastructureRef: corev1.ObjectReference{ 2280 APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", 2281 Kind: "GenericInfrastructureMachine", 2282 Name: "infra-config1", 2283 }, 2284 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 2285 }, 2286 Status: clusterv1.MachineStatus{ 2287 NodeRef: &corev1.ObjectReference{ 2288 Name: "test", 2289 }, 2290 }, 2291 } 2292 2293 cpmachine1 := &clusterv1.Machine{ 2294 ObjectMeta: metav1.ObjectMeta{ 2295 Name: "cp1", 2296 Namespace: metav1.NamespaceDefault, 2297 Labels: map[string]string{ 2298 clusterv1.ClusterNameLabel: "test-cluster", 2299 clusterv1.MachineControlPlaneLabel: "", 2300 }, 2301 Finalizers: []string{clusterv1.MachineFinalizer}, 2302 }, 2303 Spec: clusterv1.MachineSpec{ 2304 ClusterName: "test-cluster", 2305 InfrastructureRef: corev1.ObjectReference{}, 2306 Bootstrap: clusterv1.Bootstrap{DataSecretName: ptr.To("data")}, 2307 }, 2308 Status: clusterv1.MachineStatus{ 2309 NodeRef: &corev1.ObjectReference{ 2310 Name: "cp1", 2311 }, 2312 }, 2313 } 2314 2315 testCases := []struct { 2316 name string 2317 deletionTimeout *metav1.Duration 2318 resultErr bool 2319 clusterDeleted bool 2320 expectNodeDeletion bool 2321 createFakeClient func(...client.Object) client.Client 2322 }{ 2323 { 2324 name: "should return no error when deletion is successful", 2325 deletionTimeout: &metav1.Duration{Duration: time.Second}, 2326 resultErr: false, 2327 expectNodeDeletion: true, 2328 createFakeClient: func(initObjs ...client.Object) client.Client { 2329 return fake.NewClientBuilder(). 2330 WithObjects(initObjs...). 2331 WithStatusSubresource(&clusterv1.Machine{}). 2332 Build() 2333 }, 2334 }, 2335 { 2336 name: "should return an error when timeout is not expired and node deletion fails", 2337 deletionTimeout: &metav1.Duration{Duration: time.Hour}, 2338 resultErr: true, 2339 expectNodeDeletion: false, 2340 createFakeClient: func(initObjs ...client.Object) client.Client { 2341 fc := fake.NewClientBuilder(). 2342 WithObjects(initObjs...). 2343 WithStatusSubresource(&clusterv1.Machine{}). 2344 Build() 2345 return fakeClientWithNodeDeletionErr{fc} 2346 }, 2347 }, 2348 { 2349 name: "should return an error when timeout is infinite and node deletion fails", 2350 deletionTimeout: &metav1.Duration{Duration: 0}, // should lead to infinite timeout 2351 resultErr: true, 2352 expectNodeDeletion: false, 2353 createFakeClient: func(initObjs ...client.Object) client.Client { 2354 fc := fake.NewClientBuilder(). 2355 WithObjects(initObjs...). 2356 WithStatusSubresource(&clusterv1.Machine{}). 2357 Build() 2358 return fakeClientWithNodeDeletionErr{fc} 2359 }, 2360 }, 2361 { 2362 name: "should not return an error when timeout is expired and node deletion fails", 2363 deletionTimeout: &metav1.Duration{Duration: time.Millisecond}, 2364 resultErr: false, 2365 expectNodeDeletion: false, 2366 createFakeClient: func(initObjs ...client.Object) client.Client { 2367 fc := fake.NewClientBuilder(). 2368 WithObjects(initObjs...). 2369 WithStatusSubresource(&clusterv1.Machine{}). 2370 Build() 2371 return fakeClientWithNodeDeletionErr{fc} 2372 }, 2373 }, 2374 { 2375 name: "should not delete the node or return an error when the cluster is marked for deletion", 2376 deletionTimeout: nil, // should lead to infinite timeout 2377 resultErr: false, 2378 clusterDeleted: true, 2379 expectNodeDeletion: false, 2380 createFakeClient: func(initObjs ...client.Object) client.Client { 2381 fc := fake.NewClientBuilder(). 2382 WithObjects(initObjs...). 2383 WithStatusSubresource(&clusterv1.Machine{}). 2384 Build() 2385 return fakeClientWithNodeDeletionErr{fc} 2386 }, 2387 }, 2388 } 2389 2390 for _, tc := range testCases { 2391 t.Run(tc.name, func(*testing.T) { 2392 m := testMachine.DeepCopy() 2393 m.Spec.NodeDeletionTimeout = tc.deletionTimeout 2394 2395 fakeClient := tc.createFakeClient(node, m, cpmachine1) 2396 tracker := remote.NewTestClusterCacheTracker(ctrl.Log, fakeClient, fakeClient, fakeScheme, client.ObjectKeyFromObject(&testCluster)) 2397 2398 r := &Reconciler{ 2399 Client: fakeClient, 2400 UnstructuredCachingClient: fakeClient, 2401 Tracker: tracker, 2402 recorder: record.NewFakeRecorder(10), 2403 nodeDeletionRetryTimeout: 10 * time.Millisecond, 2404 } 2405 2406 cluster := testCluster.DeepCopy() 2407 if tc.clusterDeleted { 2408 cluster.DeletionTimestamp = &metav1.Time{Time: deletionTime.Add(time.Hour)} 2409 } 2410 2411 _, err := r.reconcileDelete(context.Background(), cluster, m) 2412 2413 if tc.resultErr { 2414 g.Expect(err).To(HaveOccurred()) 2415 } else { 2416 g.Expect(err).ToNot(HaveOccurred()) 2417 if tc.expectNodeDeletion { 2418 n := &corev1.Node{} 2419 g.Expect(fakeClient.Get(context.Background(), client.ObjectKeyFromObject(node), n)).NotTo(Succeed()) 2420 } 2421 } 2422 }) 2423 } 2424 } 2425 2426 // adds a condition list to an external object. 2427 func addConditionsToExternal(u *unstructured.Unstructured, newConditions clusterv1.Conditions) { 2428 existingConditions := clusterv1.Conditions{} 2429 if cs := conditions.UnstructuredGetter(u).GetConditions(); len(cs) != 0 { 2430 existingConditions = cs 2431 } 2432 existingConditions = append(existingConditions, newConditions...) 2433 conditions.UnstructuredSetter(u).SetConditions(existingConditions) 2434 } 2435 2436 // asserts the conditions set on the Getter object. 2437 // TODO: replace this with util.condition.MatchConditions (or a new matcher in controller runtime komega). 2438 func assertConditions(t *testing.T, from conditions.Getter, conditions ...*clusterv1.Condition) { 2439 t.Helper() 2440 2441 for _, condition := range conditions { 2442 assertCondition(t, from, condition) 2443 } 2444 } 2445 2446 // asserts whether a condition of type is set on the Getter object 2447 // when the condition is true, asserting the reason/severity/message 2448 // for the condition are avoided. 2449 func assertCondition(t *testing.T, from conditions.Getter, condition *clusterv1.Condition) { 2450 t.Helper() 2451 2452 g := NewWithT(t) 2453 g.Expect(conditions.Has(from, condition.Type)).To(BeTrue()) 2454 2455 if condition.Status == corev1.ConditionTrue { 2456 g.Expect(conditions.IsTrue(from, condition.Type)).To(BeTrue()) 2457 } else { 2458 conditionToBeAsserted := conditions.Get(from, condition.Type) 2459 g.Expect(conditionToBeAsserted.Status).To(Equal(condition.Status)) 2460 g.Expect(conditionToBeAsserted.Severity).To(Equal(condition.Severity)) 2461 g.Expect(conditionToBeAsserted.Reason).To(Equal(condition.Reason)) 2462 if condition.Message != "" { 2463 g.Expect(conditionToBeAsserted.Message).To(Equal(condition.Message)) 2464 } 2465 } 2466 }