sigs.k8s.io/cluster-api@v1.7.1/internal/controllers/topology/cluster/cluster_controller_test.go (about) 1 /* 2 Copyright 2021 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package cluster 18 19 import ( 20 "fmt" 21 "reflect" 22 "testing" 23 "time" 24 25 "github.com/google/go-cmp/cmp" 26 . "github.com/onsi/gomega" 27 corev1 "k8s.io/api/core/v1" 28 apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 31 "k8s.io/apimachinery/pkg/runtime/schema" 32 utilfeature "k8s.io/component-base/featuregate/testing" 33 ctrl "sigs.k8s.io/controller-runtime" 34 "sigs.k8s.io/controller-runtime/pkg/client" 35 "sigs.k8s.io/controller-runtime/pkg/client/fake" 36 "sigs.k8s.io/controller-runtime/pkg/reconcile" 37 38 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 39 expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" 40 runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" 41 runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" 42 runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" 43 "sigs.k8s.io/cluster-api/exp/topology/scope" 44 "sigs.k8s.io/cluster-api/feature" 45 "sigs.k8s.io/cluster-api/internal/contract" 46 "sigs.k8s.io/cluster-api/internal/hooks" 47 fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" 48 "sigs.k8s.io/cluster-api/internal/test/builder" 49 "sigs.k8s.io/cluster-api/util/conditions" 50 "sigs.k8s.io/cluster-api/util/kubeconfig" 51 "sigs.k8s.io/cluster-api/util/patch" 52 ) 53 54 var ( 55 clusterName1 = "cluster1" 56 clusterName2 = "cluster2" 57 clusterClassName1 = "class1" 58 clusterClassName2 = "class2" 59 infrastructureMachineTemplateName1 = "inframachinetemplate1" 60 infrastructureMachineTemplateName2 = "inframachinetemplate2" 61 infrastructureMachinePoolTemplateName1 = "inframachinepooltemplate1" 62 infrastructureMachinePoolTemplateName2 = "inframachinepooltemplate2" 63 ) 64 65 func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) { 66 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 67 g := NewWithT(t) 68 timeout := 5 * time.Second 69 70 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 71 g.Expect(err).ToNot(HaveOccurred()) 72 73 // Create the objects needed for the integration test: 74 // - a ClusterClass with all the related templates 75 // - a Cluster using the above ClusterClass 76 cleanup, err := setupTestEnvForIntegrationTests(ns) 77 g.Expect(err).ToNot(HaveOccurred()) 78 79 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 80 defer func() { 81 g.Expect(cleanup()).To(Succeed()) 82 }() 83 84 g.Eventually(func(g Gomega) error { 85 // Get the cluster object. 86 actualCluster := &clusterv1.Cluster{} 87 if err := env.GetAPIReader().Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, actualCluster); err != nil { 88 return err 89 } 90 91 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 92 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 93 94 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 95 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 96 97 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 98 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 99 100 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 101 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 102 103 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 104 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 105 106 // Check if the Cluster has the relevant TopologyReconciledCondition. 107 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 108 109 return nil 110 }, timeout).Should(Succeed()) 111 } 112 113 func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) { 114 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 115 116 g := NewWithT(t) 117 timeout := 5 * time.Second 118 119 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 120 g.Expect(err).ToNot(HaveOccurred()) 121 122 // Create the objects needed for the integration test: 123 // - a ClusterClass with all the related templates 124 // - a Cluster using the above ClusterClass 125 // - a second Cluster using the same ClusterClass 126 cleanup, err := setupTestEnvForIntegrationTests(ns) 127 g.Expect(err).ToNot(HaveOccurred()) 128 129 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 130 defer func() { 131 g.Expect(cleanup()).To(Succeed()) 132 }() 133 134 // Check to see that both clusters were correctly created and reconciled using the existing clusterClass. 135 g.Eventually(func(g Gomega) error { 136 for _, name := range []string{clusterName1, clusterName2} { 137 // Get the cluster object. 138 actualCluster := &clusterv1.Cluster{} 139 if err := env.Get(ctx, client.ObjectKey{Name: name, Namespace: ns.Name}, actualCluster); err != nil { 140 return err 141 } 142 143 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 144 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 145 146 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 147 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 148 149 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 150 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 151 152 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 153 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 154 155 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 156 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 157 158 // Check if the Cluster has the relevant TopologyReconciledCondition. 159 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 160 } 161 return nil 162 }, timeout).Should(Succeed()) 163 } 164 165 func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { 166 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 167 g := NewWithT(t) 168 timeout := 300 * time.Second 169 170 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 171 g.Expect(err).ToNot(HaveOccurred()) 172 173 // Create the objects needed for the integration test: 174 // - a ClusterClass with all the related templates 175 // - a Cluster using the above ClusterClass 176 cleanup, err := setupTestEnvForIntegrationTests(ns) 177 g.Expect(err).ToNot(HaveOccurred()) 178 179 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 180 defer func() { 181 g.Expect(cleanup()).To(Succeed()) 182 }() 183 184 actualCluster := &clusterv1.Cluster{} 185 // First ensure that the initial cluster and other objects are created and populated as expected. 186 g.Eventually(func(g Gomega) error { 187 // Get the cluster object now including the updated replica number for the Machine deployment. 188 if err := env.Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, actualCluster); err != nil { 189 return err 190 } 191 192 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 193 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 194 195 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 196 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 197 198 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 199 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 200 201 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 202 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 203 204 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 205 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 206 207 // Check if the Cluster has the relevant TopologyReconciledCondition. 208 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 209 return nil 210 }, timeout).Should(Succeed()) 211 212 // Change the replicas field in the managed topology of our cluster and update the object in the API. 213 replicas := int32(100) 214 patchHelper, err := patch.NewHelper(actualCluster, env.Client) 215 g.Expect(err).ToNot(HaveOccurred()) 216 clusterWithTopologyChange := actualCluster.DeepCopy() 217 clusterWithTopologyChange.Spec.Topology.Workers.MachineDeployments[0].Replicas = &replicas 218 clusterWithTopologyChange.Spec.Topology.Workers.MachinePools[0].Replicas = &replicas 219 g.Expect(patchHelper.Patch(ctx, clusterWithTopologyChange)).Should(Succeed()) 220 221 // Check to ensure all objects are correctly reconciled with the new MachineDeployment and MachinePool replica count in Topology. 222 g.Eventually(func(g Gomega) error { 223 // Get the cluster object. 224 updatedCluster := &clusterv1.Cluster{} 225 if err := env.Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, updatedCluster); err != nil { 226 return err 227 } 228 229 // Check to ensure the replica count has been successfully updated in the API server and cache. 230 g.Expect(updatedCluster.Spec.Topology.Workers.MachineDeployments[0].Replicas).To(Equal(&replicas)) 231 232 // Check to ensure the replica count has been successfully updated in the API server and cache. 233 g.Expect(updatedCluster.Spec.Topology.Workers.MachinePools[0].Replicas).To(Equal(&replicas)) 234 235 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 236 g.Expect(assertClusterReconcile(updatedCluster)).Should(Succeed()) 237 238 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 239 g.Expect(assertInfrastructureClusterReconcile(updatedCluster)).Should(Succeed()) 240 241 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 242 g.Expect(assertControlPlaneReconcile(updatedCluster)).Should(Succeed()) 243 244 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 245 g.Expect(assertMachineDeploymentsReconcile(updatedCluster)).Should(Succeed()) 246 247 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 248 g.Expect(assertMachinePoolsReconcile(updatedCluster)).Should(Succeed()) 249 250 // Check if the Cluster has the relevant TopologyReconciledCondition. 251 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 252 return nil 253 }, timeout).Should(Succeed()) 254 } 255 256 func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { 257 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 258 g := NewWithT(t) 259 timeout := 5 * time.Second 260 261 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 262 g.Expect(err).ToNot(HaveOccurred()) 263 264 // Create the objects needed for the integration test: 265 // - a ClusterClass with all the related templates 266 // - a Cluster using the above ClusterClass 267 // - a second Cluster using the same ClusterClass 268 cleanup, err := setupTestEnvForIntegrationTests(ns) 269 g.Expect(err).ToNot(HaveOccurred()) 270 271 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 272 defer func() { 273 g.Expect(cleanup()).To(Succeed()) 274 }() 275 276 actualCluster := &clusterv1.Cluster{} 277 278 g.Eventually(func(g Gomega) error { 279 for _, name := range []string{clusterName1, clusterName2} { 280 // Get the cluster object. 281 if err := env.Get(ctx, client.ObjectKey{Name: name, Namespace: ns.Name}, actualCluster); err != nil { 282 return err 283 } 284 285 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 286 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 287 288 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 289 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 290 291 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 292 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 293 294 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 295 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 296 297 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 298 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 299 300 // Check if the Cluster has the relevant TopologyReconciledCondition. 301 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 302 } 303 return nil 304 }, timeout).Should(Succeed()) 305 306 // Get the clusterClass to update and check if clusterClass updates are being correctly reconciled. 307 clusterClass := &clusterv1.ClusterClass{} 308 g.Expect(env.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: actualCluster.Spec.Topology.Class}, clusterClass)).To(Succeed()) 309 310 patchHelper, err := patch.NewHelper(clusterClass, env.Client) 311 g.Expect(err).ToNot(HaveOccurred()) 312 // Change the infrastructureMachineTemplateName for the first of our MachineDeployments and update in the API. 313 clusterClass.Spec.Workers.MachineDeployments[0].Template.Infrastructure.Ref.Name = infrastructureMachineTemplateName2 314 // Change the infrastructureMachinePoolTemplateName for the first of our MachinePools and update in the API. 315 clusterClass.Spec.Workers.MachinePools[0].Template.Infrastructure.Ref.Name = infrastructureMachinePoolTemplateName2 316 g.Expect(patchHelper.Patch(ctx, clusterClass)).To(Succeed()) 317 318 g.Eventually(func(g Gomega) error { 319 // Check that the clusterClass has been correctly updated to use the new infrastructure template. 320 // This is necessary as sometimes the cache can take a little time to update. 321 class := &clusterv1.ClusterClass{} 322 g.Expect(env.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: actualCluster.Spec.Topology.Class}, class)).To(Succeed()) 323 g.Expect(class.Spec.Workers.MachineDeployments[0].Template.Infrastructure.Ref.Name).To(Equal(infrastructureMachineTemplateName2)) 324 g.Expect(class.Spec.Workers.MachinePools[0].Template.Infrastructure.Ref.Name).To(Equal(infrastructureMachinePoolTemplateName2)) 325 326 // For each cluster check that the clusterClass changes have been correctly reconciled. 327 for _, name := range []string{clusterName1, clusterName2} { 328 // Get the cluster object. 329 actualCluster = &clusterv1.Cluster{} 330 331 if err := env.Get(ctx, client.ObjectKey{Name: name, Namespace: ns.Name}, actualCluster); err != nil { 332 return err 333 } 334 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 335 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 336 337 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 338 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 339 340 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 341 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 342 343 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 344 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 345 346 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 347 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 348 349 // Check if the Cluster has the relevant TopologyReconciledCondition. 350 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 351 } 352 return nil 353 }, timeout).Should(Succeed()) 354 } 355 356 func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) { 357 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 358 g := NewWithT(t) 359 timeout := 30 * time.Second 360 361 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 362 g.Expect(err).ToNot(HaveOccurred()) 363 364 // Create the objects needed for the integration test: 365 // - a ClusterClass with all the related templates 366 // - a Cluster using the first ClusterClass 367 // - a compatible ClusterClass to rebase the Cluster to 368 cleanup, err := setupTestEnvForIntegrationTests(ns) 369 g.Expect(err).ToNot(HaveOccurred()) 370 371 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 372 defer func() { 373 g.Expect(cleanup()).To(Succeed()) 374 }() 375 376 actualCluster := &clusterv1.Cluster{} 377 // First ensure that the initial cluster and other objects are created and populated as expected. 378 g.Eventually(func(g Gomega) error { 379 // Get the cluster object. 380 if err := env.Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, actualCluster); err != nil { 381 return err 382 } 383 384 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 385 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 386 387 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 388 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 389 390 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 391 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 392 393 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 394 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 395 396 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 397 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 398 399 return nil 400 }, timeout).Should(Succeed()) 401 402 patchHelper, err := patch.NewHelper(actualCluster, env.Client) 403 g.Expect(err).ToNot(HaveOccurred()) 404 // Change the ClusterClass pointed to in the Cluster's Topology. This is a ClusterClass rebase operation. 405 clusterWithRebase := actualCluster.DeepCopy() 406 clusterWithRebase.Spec.Topology.Class = clusterClassName2 407 g.Expect(patchHelper.Patch(ctx, clusterWithRebase)).Should(Succeed()) 408 409 // Check to ensure all objects are correctly reconciled with the new ClusterClass. 410 g.Eventually(func(g Gomega) error { 411 // Get the cluster object. 412 updatedCluster := &clusterv1.Cluster{} 413 if err := env.Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, updatedCluster); err != nil { 414 return err 415 } 416 // Check to ensure the spec.topology.class has been successfully updated in the API server and cache. 417 g.Expect(updatedCluster.Spec.Topology.Class).To(Equal(clusterClassName2)) 418 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 419 g.Expect(assertClusterReconcile(updatedCluster)).Should(Succeed()) 420 421 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 422 g.Expect(assertInfrastructureClusterReconcile(updatedCluster)).Should(Succeed()) 423 424 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 425 g.Expect(assertControlPlaneReconcile(updatedCluster)).Should(Succeed()) 426 427 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 428 g.Expect(assertMachineDeploymentsReconcile(updatedCluster)).Should(Succeed()) 429 430 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 431 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 432 433 return nil 434 }, timeout).Should(Succeed()) 435 } 436 437 func TestClusterReconciler_reconcileDelete(t *testing.T) { 438 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)() 439 440 catalog := runtimecatalog.New() 441 _ = runtimehooksv1.AddToCatalog(catalog) 442 443 beforeClusterDeleteGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterDelete) 444 if err != nil { 445 panic(err) 446 } 447 448 blockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{ 449 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 450 RetryAfterSeconds: int32(10), 451 CommonResponse: runtimehooksv1.CommonResponse{ 452 Status: runtimehooksv1.ResponseStatusSuccess, 453 }, 454 }, 455 } 456 nonBlockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{ 457 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 458 RetryAfterSeconds: int32(0), 459 CommonResponse: runtimehooksv1.CommonResponse{ 460 Status: runtimehooksv1.ResponseStatusSuccess, 461 }, 462 }, 463 } 464 failureResponse := &runtimehooksv1.BeforeClusterDeleteResponse{ 465 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 466 CommonResponse: runtimehooksv1.CommonResponse{ 467 Status: runtimehooksv1.ResponseStatusFailure, 468 }, 469 }, 470 } 471 472 tests := []struct { 473 name string 474 cluster *clusterv1.Cluster 475 hookResponse *runtimehooksv1.BeforeClusterDeleteResponse 476 wantHookToBeCalled bool 477 wantResult ctrl.Result 478 wantOkToDelete bool 479 wantErr bool 480 }{ 481 { 482 name: "should apply the ok-to-delete annotation if the BeforeClusterDelete hook returns a non-blocking response", 483 cluster: &clusterv1.Cluster{ 484 ObjectMeta: metav1.ObjectMeta{ 485 Name: "test-cluster", 486 Namespace: "test-ns", 487 }, 488 Spec: clusterv1.ClusterSpec{ 489 Topology: &clusterv1.Topology{}, 490 }, 491 }, 492 hookResponse: nonBlockingResponse, 493 wantResult: ctrl.Result{}, 494 wantHookToBeCalled: true, 495 wantOkToDelete: true, 496 wantErr: false, 497 }, 498 { 499 name: "should requeue if the BeforeClusterDelete hook returns a blocking response", 500 cluster: &clusterv1.Cluster{ 501 ObjectMeta: metav1.ObjectMeta{ 502 Name: "test-cluster", 503 Namespace: "test-ns", 504 }, 505 Spec: clusterv1.ClusterSpec{ 506 Topology: &clusterv1.Topology{}, 507 }, 508 }, 509 hookResponse: blockingResponse, 510 wantResult: ctrl.Result{RequeueAfter: time.Duration(10) * time.Second}, 511 wantHookToBeCalled: true, 512 wantOkToDelete: false, 513 wantErr: false, 514 }, 515 { 516 name: "should fail if the BeforeClusterDelete hook returns a failure response", 517 cluster: &clusterv1.Cluster{ 518 ObjectMeta: metav1.ObjectMeta{ 519 Name: "test-cluster", 520 Namespace: "test-ns", 521 }, 522 Spec: clusterv1.ClusterSpec{ 523 Topology: &clusterv1.Topology{}, 524 }, 525 }, 526 hookResponse: failureResponse, 527 wantResult: ctrl.Result{}, 528 wantHookToBeCalled: true, 529 wantOkToDelete: false, 530 wantErr: true, 531 }, 532 { 533 name: "should succeed if the ok-to-delete annotation is already present", 534 cluster: &clusterv1.Cluster{ 535 ObjectMeta: metav1.ObjectMeta{ 536 Name: "test-cluster", 537 Namespace: "test-ns", 538 Annotations: map[string]string{ 539 // If the hook is already marked the hook should not be called during cluster delete. 540 runtimev1.OkToDeleteAnnotation: "", 541 }, 542 }, 543 Spec: clusterv1.ClusterSpec{ 544 Topology: &clusterv1.Topology{}, 545 }, 546 }, 547 // Using a blocking response here should not matter as the hook should never be called. 548 // Using a blocking response to enforce the point. 549 hookResponse: blockingResponse, 550 wantResult: ctrl.Result{}, 551 wantHookToBeCalled: false, 552 wantOkToDelete: true, 553 wantErr: false, 554 }, 555 } 556 557 for _, tt := range tests { 558 t.Run(tt.name, func(t *testing.T) { 559 g := NewWithT(t) 560 561 fakeClient := fake.NewClientBuilder().WithObjects(tt.cluster).Build() 562 fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). 563 WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ 564 beforeClusterDeleteGVH: tt.hookResponse, 565 }). 566 WithCatalog(catalog). 567 Build() 568 569 r := &Reconciler{ 570 Client: fakeClient, 571 APIReader: fakeClient, 572 RuntimeClient: fakeRuntimeClient, 573 } 574 575 res, err := r.reconcileDelete(ctx, tt.cluster) 576 if tt.wantErr { 577 g.Expect(err).To(HaveOccurred()) 578 } else { 579 g.Expect(err).ToNot(HaveOccurred()) 580 g.Expect(res).To(BeComparableTo(tt.wantResult)) 581 g.Expect(hooks.IsOkToDelete(tt.cluster)).To(Equal(tt.wantOkToDelete)) 582 g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.BeforeClusterDelete) == 1).To(Equal(tt.wantHookToBeCalled)) 583 } 584 }) 585 } 586 } 587 588 // TestClusterReconciler_deleteClusterClass tests the correct deletion behaviour for a ClusterClass with references in existing Clusters. 589 // In this case deletion of the ClusterClass should be blocked by the webhook. 590 func TestClusterReconciler_deleteClusterClass(t *testing.T) { 591 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 592 g := NewWithT(t) 593 timeout := 5 * time.Second 594 595 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 596 g.Expect(err).ToNot(HaveOccurred()) 597 598 // Create the objects needed for the integration test: 599 // - a ClusterClass with all the related templates 600 // - a Cluster using the above ClusterClass 601 cleanup, err := setupTestEnvForIntegrationTests(ns) 602 g.Expect(err).ToNot(HaveOccurred()) 603 604 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 605 defer func() { 606 g.Expect(cleanup()).To(Succeed()) 607 }() 608 609 actualCluster := &clusterv1.Cluster{} 610 611 g.Eventually(func(g Gomega) error { 612 for _, name := range []string{clusterName1, clusterName2} { 613 // Get the cluster object. 614 if err := env.Get(ctx, client.ObjectKey{Name: name, Namespace: ns.Name}, actualCluster); err != nil { 615 return err 616 } 617 618 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 619 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 620 621 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 622 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 623 624 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 625 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 626 627 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 628 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 629 630 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 631 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 632 } 633 return nil 634 }, timeout).Should(Succeed()) 635 636 // Ensure the clusterClass is available in the API server . 637 clusterClass := &clusterv1.ClusterClass{} 638 g.Expect(env.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: actualCluster.Spec.Topology.Class}, clusterClass)).To(Succeed()) 639 640 // Attempt to delete the ClusterClass. Expect an error here as the ClusterClass deletion is blocked by the webhook. 641 g.Expect(env.Delete(ctx, clusterClass)).NotTo(Succeed()) 642 } 643 644 func TestReconciler_callBeforeClusterCreateHook(t *testing.T) { 645 catalog := runtimecatalog.New() 646 _ = runtimehooksv1.AddToCatalog(catalog) 647 gvh, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterCreate) 648 if err != nil { 649 panic(err) 650 } 651 652 blockingResponse := &runtimehooksv1.BeforeClusterCreateResponse{ 653 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 654 CommonResponse: runtimehooksv1.CommonResponse{ 655 Status: runtimehooksv1.ResponseStatusSuccess, 656 }, 657 RetryAfterSeconds: int32(10), 658 }, 659 } 660 nonBlockingResponse := &runtimehooksv1.BeforeClusterCreateResponse{ 661 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 662 CommonResponse: runtimehooksv1.CommonResponse{ 663 Status: runtimehooksv1.ResponseStatusSuccess, 664 }, 665 RetryAfterSeconds: int32(0), 666 }, 667 } 668 failingResponse := &runtimehooksv1.BeforeClusterCreateResponse{ 669 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 670 CommonResponse: runtimehooksv1.CommonResponse{ 671 Status: runtimehooksv1.ResponseStatusFailure, 672 }, 673 }, 674 } 675 676 tests := []struct { 677 name string 678 hookResponse *runtimehooksv1.BeforeClusterCreateResponse 679 wantResult reconcile.Result 680 wantErr bool 681 }{ 682 { 683 name: "should return a requeue response when the BeforeClusterCreate hook is blocking", 684 hookResponse: blockingResponse, 685 wantResult: ctrl.Result{RequeueAfter: time.Duration(10) * time.Second}, 686 wantErr: false, 687 }, 688 { 689 name: "should return an empty response when the BeforeClusterCreate hook is not blocking", 690 hookResponse: nonBlockingResponse, 691 wantResult: ctrl.Result{}, 692 wantErr: false, 693 }, 694 { 695 name: "should error when the BeforeClusterCreate hook returns a failure response", 696 hookResponse: failingResponse, 697 wantResult: ctrl.Result{}, 698 wantErr: true, 699 }, 700 } 701 702 for _, tt := range tests { 703 t.Run(tt.name, func(t *testing.T) { 704 g := NewWithT(t) 705 706 runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). 707 WithCatalog(catalog). 708 WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ 709 gvh: tt.hookResponse, 710 }). 711 Build() 712 713 r := &Reconciler{ 714 RuntimeClient: runtimeClient, 715 } 716 s := &scope.Scope{ 717 Current: &scope.ClusterState{ 718 Cluster: &clusterv1.Cluster{}, 719 }, 720 HookResponseTracker: scope.NewHookResponseTracker(), 721 } 722 res, err := r.callBeforeClusterCreateHook(ctx, s) 723 if tt.wantErr { 724 g.Expect(err).To(HaveOccurred()) 725 } else { 726 g.Expect(err).ToNot(HaveOccurred()) 727 g.Expect(res).To(BeComparableTo(tt.wantResult)) 728 } 729 }) 730 } 731 } 732 733 // setupTestEnvForIntegrationTests builds and then creates in the envtest API server all objects required at init time for each of the 734 // integration tests in this file. This includes: 735 // - a first clusterClass with all the related templates 736 // - a second clusterClass, compatible with the first, used to test a ClusterClass rebase 737 // - a first Cluster using the above ClusterClass 738 // - a second Cluster using the above ClusterClass, but with different version/Machine deployment definition 739 // NOTE: The objects are created for every test, though some may not be used in every test. 740 func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) { 741 workerClassName1 := "linux-worker" 742 workerClassName2 := "windows-worker" 743 workerClassName3 := "solaris-worker" 744 745 // The below objects are created in order to feed the reconcile loop all the information it needs to create a full 746 // Cluster given a skeletal Cluster object and a ClusterClass. The objects include: 747 748 // 1) Templates for Machine, Cluster, ControlPlane and Bootstrap. 749 infrastructureMachineTemplate1 := builder.TestInfrastructureMachineTemplate(ns.Name, infrastructureMachineTemplateName1).Build() 750 infrastructureMachineTemplate2 := builder.TestInfrastructureMachineTemplate(ns.Name, infrastructureMachineTemplateName2). 751 WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}). 752 Build() 753 infrastructureMachinePoolTemplate1 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName1).Build() 754 infrastructureMachinePoolTemplate2 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName2). 755 WithSpecFields(map[string]interface{}{"spec.template.fakeSetting": true}). 756 Build() 757 infrastructureClusterTemplate1 := builder.TestInfrastructureClusterTemplate(ns.Name, "infraclustertemplate1"). 758 Build() 759 infrastructureClusterTemplate2 := builder.TestInfrastructureClusterTemplate(ns.Name, "infraclustertemplate2"). 760 WithSpecFields(map[string]interface{}{"spec.template.spec.alteredSetting": true}). 761 Build() 762 controlPlaneTemplate := builder.TestControlPlaneTemplate(ns.Name, "cp1"). 763 WithInfrastructureMachineTemplate(infrastructureMachineTemplate1). 764 Build() 765 bootstrapTemplate := builder.TestBootstrapTemplate(ns.Name, "bootstraptemplate").Build() 766 767 // 2) ClusterClass definitions including definitions of MachineDeploymentClasses and MachinePoolClasses used inside the ClusterClass. 768 machineDeploymentClass1 := builder.MachineDeploymentClass(workerClassName1 + "-md"). 769 WithInfrastructureTemplate(infrastructureMachineTemplate1). 770 WithBootstrapTemplate(bootstrapTemplate). 771 WithLabels(map[string]string{"foo": "bar"}). 772 WithAnnotations(map[string]string{"foo": "bar"}). 773 Build() 774 machineDeploymentClass2 := builder.MachineDeploymentClass(workerClassName2 + "-md"). 775 WithInfrastructureTemplate(infrastructureMachineTemplate1). 776 WithBootstrapTemplate(bootstrapTemplate). 777 Build() 778 machineDeploymentClass3 := builder.MachineDeploymentClass(workerClassName3 + "-md"). 779 WithInfrastructureTemplate(infrastructureMachineTemplate2). 780 WithBootstrapTemplate(bootstrapTemplate). 781 Build() 782 machinePoolClass1 := builder.MachinePoolClass(workerClassName1 + "-mp"). 783 WithInfrastructureTemplate(infrastructureMachinePoolTemplate1). 784 WithBootstrapTemplate(bootstrapTemplate). 785 WithLabels(map[string]string{"foo": "bar"}). 786 WithAnnotations(map[string]string{"foo": "bar"}). 787 Build() 788 machinePoolClass2 := builder.MachinePoolClass(workerClassName2 + "-mp"). 789 WithInfrastructureTemplate(infrastructureMachinePoolTemplate1). 790 WithBootstrapTemplate(bootstrapTemplate). 791 Build() 792 machinePoolClass3 := builder.MachinePoolClass(workerClassName3 + "-mp"). 793 WithInfrastructureTemplate(infrastructureMachinePoolTemplate2). 794 WithBootstrapTemplate(bootstrapTemplate). 795 Build() 796 clusterClass := builder.ClusterClass(ns.Name, clusterClassName1). 797 WithInfrastructureClusterTemplate(infrastructureClusterTemplate1). 798 WithControlPlaneTemplate(controlPlaneTemplate). 799 WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate1). 800 WithWorkerMachineDeploymentClasses(*machineDeploymentClass1, *machineDeploymentClass2). 801 WithWorkerMachinePoolClasses(*machinePoolClass1, *machinePoolClass2). 802 Build() 803 804 // This ClusterClass changes a number of things in a ClusterClass in a way that is compatible for a ClusterClass rebase operation. 805 // 1) It changes the controlPlaneMachineInfrastructureTemplate to a new template. 806 // 2) It adds a new machineDeploymentClass and machinePoolClass 807 // 3) It changes the infrastructureClusterTemplate. 808 clusterClassForRebase := builder.ClusterClass(ns.Name, clusterClassName2). 809 WithInfrastructureClusterTemplate(infrastructureClusterTemplate2). 810 WithControlPlaneTemplate(controlPlaneTemplate). 811 WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate2). 812 WithWorkerMachineDeploymentClasses(*machineDeploymentClass1, *machineDeploymentClass2, *machineDeploymentClass3). 813 WithWorkerMachinePoolClasses(*machinePoolClass1, *machinePoolClass2, *machinePoolClass3). 814 Build() 815 816 // 3) Two Clusters including a Cluster Topology objects and the MachineDeploymentTopology and MachinePoolTopology objects used in the 817 // Cluster Topology. The second cluster differs from the first both in version and in its MachineDeployment and MachinePool definitions. 818 machineDeploymentTopology1 := builder.MachineDeploymentTopology("mdm1"). 819 WithClass(workerClassName1 + "-md"). 820 WithReplicas(3). 821 Build() 822 machineDeploymentTopology2 := builder.MachineDeploymentTopology("mdm2"). 823 WithClass(workerClassName2 + "-md"). 824 WithReplicas(1). 825 Build() 826 machinePoolTopology1 := builder.MachinePoolTopology("mp1"). 827 WithClass(workerClassName1 + "-mp"). 828 WithReplicas(3). 829 Build() 830 machinePoolTopology2 := builder.MachinePoolTopology("mp2"). 831 WithClass(workerClassName2 + "-mp"). 832 WithReplicas(1). 833 Build() 834 835 cluster1 := builder.Cluster(ns.Name, clusterName1). 836 WithTopology( 837 builder.ClusterTopology(). 838 WithClass(clusterClass.Name). 839 WithMachineDeployment(machineDeploymentTopology1). 840 WithMachineDeployment(machineDeploymentTopology2). 841 WithMachinePool(machinePoolTopology1). 842 WithMachinePool(machinePoolTopology2). 843 WithVersion("1.22.2"). 844 WithControlPlaneReplicas(3). 845 Build()). 846 Build() 847 848 cluster2 := builder.Cluster(ns.Name, clusterName2). 849 WithTopology( 850 builder.ClusterTopology(). 851 WithClass(clusterClass.Name). 852 WithMachineDeployment(machineDeploymentTopology2). 853 WithMachinePool(machinePoolTopology2). 854 WithVersion("1.21.0"). 855 WithControlPlaneReplicas(1). 856 Build()). 857 Build() 858 859 // Setup kubeconfig secrets for the clusters, so the ClusterCacheTracker works. 860 cluster1Secret := kubeconfig.GenerateSecret(cluster1, kubeconfig.FromEnvTestConfig(env.Config, cluster1)) 861 cluster2Secret := kubeconfig.GenerateSecret(cluster2, kubeconfig.FromEnvTestConfig(env.Config, cluster2)) 862 // Unset the ownerrefs otherwise they are invalid because they contain an empty uid. 863 cluster1Secret.ObjectMeta.OwnerReferences = nil 864 cluster2Secret.ObjectMeta.OwnerReferences = nil 865 866 // Create a set of setupTestEnvForIntegrationTests from the objects above to add to the API server when the test environment starts. 867 // The objects are created for every test, though some e.g. infrastructureMachineTemplate2 may not be used in every test. 868 initObjs := []client.Object{ 869 infrastructureClusterTemplate1, 870 infrastructureClusterTemplate2, 871 infrastructureMachineTemplate1, 872 infrastructureMachineTemplate2, 873 infrastructureMachinePoolTemplate1, 874 infrastructureMachinePoolTemplate2, 875 bootstrapTemplate, 876 controlPlaneTemplate, 877 clusterClass, 878 clusterClassForRebase, 879 cluster1, 880 cluster2, 881 cluster1Secret, 882 cluster2Secret, 883 } 884 cleanup := func() error { 885 // Delete Objects in reverse, because we cannot delete a ClusterCLass if it is still used by a Cluster. 886 for i := len(initObjs) - 1; i >= 0; i-- { 887 if err := env.CleanupAndWait(ctx, initObjs[i]); err != nil { 888 return err 889 } 890 } 891 return nil 892 } 893 894 for _, obj := range initObjs { 895 if err := env.CreateAndWait(ctx, obj); err != nil { 896 return cleanup, err 897 } 898 } 899 return cleanup, nil 900 } 901 902 func assertClusterTopologyReconciledCondition(cluster *clusterv1.Cluster) error { 903 if !conditions.Has(cluster, clusterv1.TopologyReconciledCondition) { 904 return fmt.Errorf("cluster should have the TopologyReconciled condition set") 905 } 906 return nil 907 } 908 909 // assertClusterReconcile checks if the Cluster object: 910 // 1) Has its InfrastructureReference and ControlPlane reference added correctly. 911 // 2) InfrastructureReference and ControlPlaneRef have the expected Group, Version and Kind. 912 func assertClusterReconcile(cluster *clusterv1.Cluster) error { 913 // Check if relevant managed topology labels are present. 914 if err := assertClusterTopologyOwnedLabel(cluster); err != nil { 915 return err 916 } 917 918 // Check if InfrastructureRef exists and is of the expected Kind and APIVersion. 919 if err := referenceExistsWithCorrectKindAndAPIVersion(cluster.Spec.InfrastructureRef, 920 builder.TestInfrastructureClusterKind, 921 builder.InfrastructureGroupVersion); err != nil { 922 return err 923 } 924 925 // Check if ControlPlaneRef exists is of the expected Kind and APIVersion. 926 return referenceExistsWithCorrectKindAndAPIVersion(cluster.Spec.ControlPlaneRef, 927 builder.TestControlPlaneKind, 928 builder.ControlPlaneGroupVersion) 929 } 930 931 // assertInfrastructureClusterReconcile checks if the infrastructureCluster object: 932 // 1) Is created. 933 // 2) Has the correct labels and annotations. 934 func assertInfrastructureClusterReconcile(cluster *clusterv1.Cluster) error { 935 _, err := getAndAssertLabelsAndAnnotations(*cluster.Spec.InfrastructureRef, cluster.Name) 936 return err 937 } 938 939 // assertControlPlaneReconcile checks if the ControlPlane object: 940 // 1. Is created. 941 // 2. Has the correct labels and annotations. 942 // 3. If it requires ControlPlane Infrastructure and if so: 943 // i) That the infrastructureMachineTemplate is created correctly. 944 // ii) That the infrastructureMachineTemplate has the correct labels and annotations 945 func assertControlPlaneReconcile(cluster *clusterv1.Cluster) error { 946 cp, err := getAndAssertLabelsAndAnnotations(*cluster.Spec.ControlPlaneRef, cluster.Name) 947 if err != nil { 948 return err 949 } 950 // Check if the ControlPlane Version matches the version in the Cluster's managed topology spec. 951 version, err := contract.ControlPlane().Version().Get(cp) 952 if err != nil { 953 return err 954 } 955 956 if *version != cluster.Spec.Topology.Version { 957 return fmt.Errorf("version %v does not match expected %v", *version, cluster.Spec.Topology.Version) 958 } 959 960 // Check for Control Plane replicase if it's set in the Cluster.Spec.Topology 961 if cluster.Spec.Topology.ControlPlane.Replicas != nil { 962 replicas, err := contract.ControlPlane().Replicas().Get(cp) 963 if err != nil { 964 return err 965 } 966 967 // Check for Control Plane replicase if it's set in the Cluster.Spec.Topology 968 if int32(*replicas) != *cluster.Spec.Topology.ControlPlane.Replicas { 969 return fmt.Errorf("replicas %v do not match expected %v", int32(*replicas), *cluster.Spec.Topology.ControlPlane.Replicas) 970 } 971 } 972 clusterClass := &clusterv1.ClusterClass{} 973 if err := env.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Spec.Topology.Class}, clusterClass); err != nil { 974 return err 975 } 976 // Check for the ControlPlaneInfrastructure if it's referenced in the clusterClass. 977 if clusterClass.Spec.ControlPlane.MachineInfrastructure != nil && clusterClass.Spec.ControlPlane.MachineInfrastructure.Ref != nil { 978 cpInfra, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(cp) 979 if err != nil { 980 return err 981 } 982 if err := referenceExistsWithCorrectKindAndAPIVersion(cpInfra, 983 builder.TestInfrastructureMachineTemplateKind, 984 builder.InfrastructureGroupVersion); err != nil { 985 return err 986 } 987 if _, err := getAndAssertLabelsAndAnnotations(*cpInfra, cluster.Name); err != nil { 988 return err 989 } 990 } 991 return nil 992 } 993 994 // assertMachineDeploymentsReconcile checks if the MachineDeployments: 995 // 1) Are created in the correct number. 996 // 2) Have the correct labels (TopologyOwned, ClusterName, MachineDeploymentName). 997 // 3) Have the correct replicas and version. 998 // 4) Have the correct Kind/APIVersion and Labels/Annotations for BoostrapRef and InfrastructureRef templates. 999 func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) error { 1000 // List all created machine deployments to assert the expected numbers are created. 1001 machineDeployments := &clusterv1.MachineDeploymentList{} 1002 if err := env.List(ctx, machineDeployments, client.InNamespace(cluster.Namespace)); err != nil { 1003 return err 1004 } 1005 1006 // clusterMDs will hold the MachineDeployments that have labels associating them with the cluster. 1007 clusterMDs := []clusterv1.MachineDeployment{} 1008 1009 // Run through all machine deployments and add only those with the TopologyOwnedLabel and the correct 1010 // ClusterNameLabel to the items for further testing. 1011 for _, m := range machineDeployments.Items { 1012 // If the machineDeployment doesn't have the ClusterTopologyOwnedLabel and the ClusterNameLabel ignore. 1013 md := m 1014 if err := assertClusterTopologyOwnedLabel(&md); err != nil { 1015 continue 1016 } 1017 if err := assertClusterNameLabel(&md, cluster.Name); err != nil { 1018 continue 1019 } 1020 clusterMDs = append(clusterMDs, md) 1021 } 1022 1023 // If the total number of machine deployments is not as expected return false. 1024 if len(clusterMDs) != len(cluster.Spec.Topology.Workers.MachineDeployments) { 1025 return fmt.Errorf("number of MachineDeployments %v does not match number expected %v", len(clusterMDs), len(cluster.Spec.Topology.Workers.MachineDeployments)) 1026 } 1027 for _, m := range clusterMDs { 1028 for _, topologyMD := range cluster.Spec.Topology.Workers.MachineDeployments { 1029 md := m 1030 // use the ClusterTopologyMachineDeploymentLabel to get the specific machineDeployment to compare to. 1031 if topologyMD.Name != md.GetLabels()[clusterv1.ClusterTopologyMachineDeploymentNameLabel] { 1032 continue 1033 } 1034 1035 // Check if the ClusterTopologyLabelName and ClusterTopologyOwnedLabel are set correctly. 1036 if err := assertClusterTopologyOwnedLabel(&md); err != nil { 1037 return err 1038 } 1039 1040 if err := assertClusterNameLabel(&md, cluster.Name); err != nil { 1041 return err 1042 } 1043 1044 // Check replicas and version for the MachineDeployment. 1045 if *md.Spec.Replicas != *topologyMD.Replicas { 1046 return fmt.Errorf("replicas %v does not match expected %v", md.Spec.Replicas, topologyMD.Replicas) 1047 } 1048 if *md.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { 1049 return fmt.Errorf("version %v does not match expected %v", *md.Spec.Template.Spec.Version, cluster.Spec.Topology.Version) 1050 } 1051 1052 // Check if the InfrastructureReference exists. 1053 if err := referenceExistsWithCorrectKindAndAPIVersion(&md.Spec.Template.Spec.InfrastructureRef, 1054 builder.TestInfrastructureMachineTemplateKind, 1055 builder.InfrastructureGroupVersion); err != nil { 1056 return err 1057 } 1058 1059 // Check if the InfrastructureReference has the expected labels and annotations. 1060 if _, err := getAndAssertLabelsAndAnnotations(md.Spec.Template.Spec.InfrastructureRef, cluster.Name); err != nil { 1061 return err 1062 } 1063 1064 // Check if the Bootstrap reference has the expected Kind and APIVersion. 1065 if err := referenceExistsWithCorrectKindAndAPIVersion(md.Spec.Template.Spec.Bootstrap.ConfigRef, 1066 builder.TestBootstrapConfigTemplateKind, 1067 builder.BootstrapGroupVersion); err != nil { 1068 return err 1069 } 1070 1071 // Check if the Bootstrap reference has the expected labels and annotations. 1072 if _, err := getAndAssertLabelsAndAnnotations(*md.Spec.Template.Spec.Bootstrap.ConfigRef, cluster.Name); err != nil { 1073 return err 1074 } 1075 } 1076 } 1077 return nil 1078 } 1079 1080 // assertMachinePoolsReconcile checks if the MachinePools: 1081 // 1) Are created in the correct number. 1082 // 2) Have the correct labels (TopologyOwned, ClusterName, MachinePoolName). 1083 // 3) Have the correct replicas and version. 1084 // 4) Have the correct Kind/APIVersion and Labels/Annotations for BoostrapRef and InfrastructureRef templates. 1085 func assertMachinePoolsReconcile(cluster *clusterv1.Cluster) error { 1086 // List all created machine pools to assert the expected numbers are created. 1087 machinePools := &expv1.MachinePoolList{} 1088 if err := env.List(ctx, machinePools, client.InNamespace(cluster.Namespace)); err != nil { 1089 return err 1090 } 1091 1092 // clusterMPs will hold the MachinePools that have labels associating them with the cluster. 1093 clusterMPs := []expv1.MachinePool{} 1094 1095 // Run through all machine pools and add only those with the TopologyOwnedLabel and the correct 1096 // ClusterNameLabel to the items for further testing. 1097 for _, m := range machinePools.Items { 1098 // If the machinePool doesn't have the ClusterTopologyOwnedLabel and the ClusterNameLabel ignore. 1099 mp := m 1100 if err := assertClusterTopologyOwnedLabel(&mp); err != nil { 1101 continue 1102 } 1103 if err := assertClusterNameLabel(&mp, cluster.Name); err != nil { 1104 continue 1105 } 1106 clusterMPs = append(clusterMPs, mp) 1107 } 1108 1109 // If the total number of machine pools is not as expected return false. 1110 if len(clusterMPs) != len(cluster.Spec.Topology.Workers.MachinePools) { 1111 return fmt.Errorf("number of MachinePools %v does not match number expected %v", len(clusterMPs), len(cluster.Spec.Topology.Workers.MachinePools)) 1112 } 1113 for _, m := range clusterMPs { 1114 for _, topologyMP := range cluster.Spec.Topology.Workers.MachinePools { 1115 mp := m 1116 // use the ClusterTopologyMachinePoolLabel to get the specific machinePool to compare to. 1117 if topologyMP.Name != mp.GetLabels()[clusterv1.ClusterTopologyMachinePoolNameLabel] { 1118 continue 1119 } 1120 1121 // Check if the ClusterTopologyLabelName and ClusterTopologyOwnedLabel are set correctly. 1122 if err := assertClusterTopologyOwnedLabel(&mp); err != nil { 1123 return err 1124 } 1125 1126 if err := assertClusterNameLabel(&mp, cluster.Name); err != nil { 1127 return err 1128 } 1129 1130 // Check replicas and version for the MachinePool. 1131 if *mp.Spec.Replicas != *topologyMP.Replicas { 1132 return fmt.Errorf("replicas %v does not match expected %v", mp.Spec.Replicas, topologyMP.Replicas) 1133 } 1134 if *mp.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { 1135 return fmt.Errorf("version %v does not match expected %v", *mp.Spec.Template.Spec.Version, cluster.Spec.Topology.Version) 1136 } 1137 1138 // Check if the InfrastructureReference exists. 1139 if err := referenceExistsWithCorrectKindAndAPIVersion(&mp.Spec.Template.Spec.InfrastructureRef, 1140 builder.TestInfrastructureMachinePoolKind, 1141 builder.InfrastructureGroupVersion); err != nil { 1142 return err 1143 } 1144 1145 // Check if the InfrastructureReference has the expected labels and annotations. 1146 if _, err := getAndAssertLabelsAndAnnotations(mp.Spec.Template.Spec.InfrastructureRef, cluster.Name); err != nil { 1147 return err 1148 } 1149 1150 // Check if the Bootstrap reference has the expected Kind and APIVersion. 1151 if err := referenceExistsWithCorrectKindAndAPIVersion(mp.Spec.Template.Spec.Bootstrap.ConfigRef, 1152 builder.TestBootstrapConfigKind, 1153 builder.BootstrapGroupVersion); err != nil { 1154 return err 1155 } 1156 1157 // Check if the Bootstrap reference has the expected labels and annotations. 1158 if _, err := getAndAssertLabelsAndAnnotations(*mp.Spec.Template.Spec.Bootstrap.ConfigRef, cluster.Name); err != nil { 1159 return err 1160 } 1161 } 1162 } 1163 return nil 1164 } 1165 1166 // getAndAssertLabelsAndAnnotations pulls the template referenced in the ObjectReference from the API server, checks for: 1167 // 1) The ClusterTopologyOwnedLabel. 1168 // 2) The correct ClusterNameLabel. 1169 // 3) The annotation stating where the template was cloned from. 1170 // The function returns the unstructured object and a bool indicating if it passed all tests. 1171 func getAndAssertLabelsAndAnnotations(template corev1.ObjectReference, clusterName string) (*unstructured.Unstructured, error) { 1172 got := &unstructured.Unstructured{} 1173 got.SetKind(template.Kind) 1174 got.SetAPIVersion(template.APIVersion) 1175 1176 if err := env.Get(ctx, client.ObjectKey{Name: template.Name, Namespace: template.Namespace}, got); err != nil { 1177 return nil, err 1178 } 1179 1180 if err := assertLabelsAndAnnotations(got, clusterName); err != nil { 1181 return nil, err 1182 } 1183 return got, nil 1184 } 1185 1186 // assertLabelsAndAnnotations runs the specific label checks required to assert that an unstructured object has been 1187 // correctly created by a clusterClass reconciliation. 1188 func assertLabelsAndAnnotations(got client.Object, clusterName string) error { 1189 if err := assertClusterTopologyOwnedLabel(got); err != nil { 1190 return err 1191 } 1192 if err := assertClusterNameLabel(got, clusterName); err != nil { 1193 return err 1194 } 1195 return assertTemplateClonedFromNameAnnotation(got) 1196 } 1197 1198 // assertClusterTopologyOwnedLabel asserts the label exists. 1199 func assertClusterTopologyOwnedLabel(got client.Object) error { 1200 _, ok := got.GetLabels()[clusterv1.ClusterTopologyOwnedLabel] 1201 if !ok { 1202 return fmt.Errorf("%v not found on %v: %v", clusterv1.ClusterTopologyOwnedLabel, got.GetObjectKind().GroupVersionKind().Kind, got.GetName()) 1203 } 1204 return nil 1205 } 1206 1207 // assertClusterNameLabel asserts the label exists and is set to the correct value. 1208 func assertClusterNameLabel(got client.Object, clusterName string) error { 1209 v, ok := got.GetLabels()[clusterv1.ClusterNameLabel] 1210 if !ok { 1211 return fmt.Errorf("%v not found in %v: %v", clusterv1.ClusterNameLabel, got.GetObjectKind().GroupVersionKind().Kind, got.GetName()) 1212 } 1213 if v != clusterName { 1214 return fmt.Errorf("%v %v does not match expected %v", clusterv1.ClusterNameLabel, v, clusterName) 1215 } 1216 return nil 1217 } 1218 1219 // assertTemplateClonedFromNameAnnotation asserts the annotation exists. This check does not assert that the template 1220 // named in the annotation is as expected. 1221 func assertTemplateClonedFromNameAnnotation(got client.Object) error { 1222 _, ok := got.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] 1223 if !ok { 1224 return fmt.Errorf("%v not found in %v; %v", clusterv1.TemplateClonedFromNameAnnotation, got.GetObjectKind().GroupVersionKind().Kind, got.GetName()) 1225 } 1226 return nil 1227 } 1228 1229 // referenceExistsWithCorrectKindAndAPIVersion asserts that the passed ObjectReference is not nil and that it has the correct kind and apiVersion. 1230 func referenceExistsWithCorrectKindAndAPIVersion(reference *corev1.ObjectReference, kind string, apiVersion schema.GroupVersion) error { 1231 if reference == nil { 1232 return fmt.Errorf("object reference passed was nil") 1233 } 1234 if reference.Kind != kind { 1235 return fmt.Errorf("object reference kind %v does not match expected %v", reference.Kind, kind) 1236 } 1237 if reference.APIVersion != apiVersion.String() { 1238 return fmt.Errorf("apiVersion %v does not match expected %v", reference.APIVersion, apiVersion.String()) 1239 } 1240 return nil 1241 } 1242 1243 func TestReconciler_DefaultCluster(t *testing.T) { 1244 g := NewWithT(t) 1245 classBuilder := builder.ClusterClass(metav1.NamespaceDefault, clusterClassName1) 1246 topologyBase := builder.ClusterTopology(). 1247 WithClass(clusterClassName1). 1248 WithVersion("1.22.2"). 1249 WithControlPlaneReplicas(3) 1250 mdClass1 := builder.MachineDeploymentClass("worker1"). 1251 Build() 1252 mdTopologyBase := builder.MachineDeploymentTopology("md1"). 1253 WithClass("worker1"). 1254 WithReplicas(3) 1255 mpClass1 := builder.MachinePoolClass("worker1"). 1256 Build() 1257 mpTopologyBase := builder.MachinePoolTopology("mp1"). 1258 WithClass("worker1"). 1259 WithReplicas(3) 1260 clusterBuilder := builder.Cluster(metav1.NamespaceDefault, clusterName1). 1261 WithTopology(topologyBase.DeepCopy().Build()) 1262 1263 tests := []struct { 1264 name string 1265 clusterClass *clusterv1.ClusterClass 1266 initialCluster *clusterv1.Cluster 1267 wantCluster *clusterv1.Cluster 1268 }{ 1269 { 1270 name: "Default Cluster variables with values from ClusterClass", 1271 clusterClass: classBuilder.DeepCopy(). 1272 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1273 Name: "location", 1274 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1275 { 1276 Required: true, 1277 From: clusterv1.VariableDefinitionFromInline, 1278 Schema: clusterv1.VariableSchema{ 1279 OpenAPIV3Schema: clusterv1.JSONSchemaProps{ 1280 Type: "string", 1281 Default: &apiextensionsv1.JSON{Raw: []byte(`"us-east"`)}, 1282 }, 1283 }, 1284 }, 1285 }, 1286 }). 1287 Build(), 1288 initialCluster: clusterBuilder.DeepCopy(). 1289 Build(), 1290 wantCluster: clusterBuilder.DeepCopy(). 1291 WithTopology(topologyBase.DeepCopy().WithVariables( 1292 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-east"`)}, DefinitionFrom: ""}). 1293 Build()). 1294 Build(), 1295 }, 1296 { 1297 name: "Do not default variable if a value is defined in the Cluster", 1298 clusterClass: classBuilder.DeepCopy(). 1299 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1300 Name: "location", 1301 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1302 { 1303 Required: true, 1304 From: clusterv1.VariableDefinitionFromInline, 1305 Schema: clusterv1.VariableSchema{ 1306 OpenAPIV3Schema: clusterv1.JSONSchemaProps{ 1307 Type: "string", 1308 Default: &apiextensionsv1.JSON{Raw: []byte(`"us-east"`)}, 1309 }, 1310 }, 1311 }, 1312 }, 1313 }). 1314 Build(), 1315 initialCluster: clusterBuilder.DeepCopy().WithTopology(topologyBase.DeepCopy().WithVariables( 1316 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}). 1317 Build()). 1318 Build(), 1319 wantCluster: clusterBuilder.DeepCopy().WithTopology(topologyBase.DeepCopy().WithVariables( 1320 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}). 1321 Build()). 1322 Build(), 1323 }, 1324 { 1325 name: "Default nested values of Cluster variables with values from ClusterClass", 1326 clusterClass: classBuilder.DeepCopy(). 1327 WithWorkerMachineDeploymentClasses(*mdClass1). 1328 WithWorkerMachinePoolClasses(*mpClass1). 1329 WithStatusVariables([]clusterv1.ClusterClassStatusVariable{ 1330 { 1331 Name: "location", 1332 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1333 { 1334 Required: true, 1335 From: clusterv1.VariableDefinitionFromInline, 1336 Schema: clusterv1.VariableSchema{ 1337 OpenAPIV3Schema: clusterv1.JSONSchemaProps{ 1338 Type: "string", 1339 Default: &apiextensionsv1.JSON{Raw: []byte(`"us-east"`)}, 1340 }, 1341 }, 1342 }, 1343 }, 1344 }, 1345 { 1346 Name: "httpProxy", 1347 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1348 { 1349 Required: true, 1350 From: clusterv1.VariableDefinitionFromInline, 1351 Schema: clusterv1.VariableSchema{ 1352 OpenAPIV3Schema: clusterv1.JSONSchemaProps{ 1353 Type: "object", 1354 Properties: map[string]clusterv1.JSONSchemaProps{ 1355 "enabled": { 1356 Type: "boolean", 1357 }, 1358 "url": { 1359 Type: "string", 1360 Default: &apiextensionsv1.JSON{Raw: []byte(`"http://localhost:3128"`)}, 1361 }, 1362 }, 1363 }, 1364 }, 1365 }, 1366 }, 1367 }}...). 1368 Build(), 1369 initialCluster: clusterBuilder.DeepCopy(). 1370 WithTopology(topologyBase.DeepCopy(). 1371 WithVariables( 1372 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}, 1373 clusterv1.ClusterVariable{Name: "httpProxy", Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true}`)}}). 1374 WithMachineDeployment(mdTopologyBase.DeepCopy(). 1375 WithVariables(clusterv1.ClusterVariable{ 1376 Name: "httpProxy", 1377 Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true}`)}, 1378 }).Build()). 1379 WithMachinePool(mpTopologyBase.DeepCopy(). 1380 WithVariables(clusterv1.ClusterVariable{ 1381 Name: "httpProxy", 1382 Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true}`)}, 1383 }).Build()). 1384 Build()). 1385 Build(), 1386 wantCluster: clusterBuilder.DeepCopy().WithTopology( 1387 topologyBase.DeepCopy(). 1388 WithVariables( 1389 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}, 1390 clusterv1.ClusterVariable{Name: "httpProxy", Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true,"url":"http://localhost:3128"}`)}}). 1391 WithMachineDeployment( 1392 mdTopologyBase.DeepCopy().WithVariables( 1393 clusterv1.ClusterVariable{ 1394 Name: "httpProxy", 1395 Value: apiextensionsv1.JSON{ 1396 // url has been added by defaulting. 1397 Raw: []byte(`{"enabled":true,"url":"http://localhost:3128"}`), 1398 }, 1399 }). 1400 Build()). 1401 WithMachinePool( 1402 mpTopologyBase.DeepCopy().WithVariables( 1403 clusterv1.ClusterVariable{ 1404 Name: "httpProxy", 1405 Value: apiextensionsv1.JSON{ 1406 // url has been added by defaulting. 1407 Raw: []byte(`{"enabled":true,"url":"http://localhost:3128"}`), 1408 }, 1409 }). 1410 Build()). 1411 Build()). 1412 Build(), 1413 }, 1414 } 1415 for _, tt := range tests { 1416 t.Run(tt.name, func(*testing.T) { 1417 initObjects := []client.Object{tt.initialCluster, tt.clusterClass} 1418 fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(initObjects...).Build() 1419 r := &Reconciler{ 1420 Client: fakeClient, 1421 APIReader: fakeClient, 1422 } 1423 // Ignore the error here as we expect the ClusterClass to fail in reconciliation as its references do not exist. 1424 var _, _ = r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKey{Name: tt.initialCluster.Name, Namespace: tt.initialCluster.Namespace}}) 1425 got := &clusterv1.Cluster{} 1426 g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: tt.initialCluster.Name, Namespace: tt.initialCluster.Namespace}, got)).To(Succeed()) 1427 // Compare the spec of the two clusters to ensure that variables are defaulted correctly. 1428 g.Expect(reflect.DeepEqual(got.Spec, tt.wantCluster.Spec)).To(BeTrue(), cmp.Diff(got.Spec, tt.wantCluster.Spec)) 1429 }) 1430 } 1431 } 1432 1433 func TestReconciler_ValidateCluster(t *testing.T) { 1434 g := NewWithT(t) 1435 mdTopologyBase := builder.MachineDeploymentTopology("md1"). 1436 WithClass("worker1"). 1437 WithReplicas(3) 1438 mpTopologyBase := builder.MachinePoolTopology("mp1"). 1439 WithClass("worker1"). 1440 WithReplicas(3) 1441 classBuilder := builder.ClusterClass(metav1.NamespaceDefault, clusterClassName1) 1442 topologyBase := builder.ClusterTopology(). 1443 WithClass(clusterClassName1). 1444 WithVersion("1.22.2"). 1445 WithControlPlaneReplicas(3) 1446 clusterBuilder := builder.Cluster(metav1.NamespaceDefault, clusterName1). 1447 WithTopology( 1448 topologyBase.Build()) 1449 tests := []struct { 1450 name string 1451 clusterClass *clusterv1.ClusterClass 1452 cluster *clusterv1.Cluster 1453 wantValidationErr bool 1454 }{ 1455 { 1456 name: "Valid cluster should not throw validation error", 1457 clusterClass: classBuilder.DeepCopy(). 1458 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1459 Name: "httpProxy", 1460 }). 1461 Build(), 1462 cluster: clusterBuilder.DeepCopy(). 1463 Build(), 1464 wantValidationErr: false, 1465 }, 1466 { 1467 name: "Cluster invalid as it does not define a required variable", 1468 clusterClass: classBuilder.DeepCopy(). 1469 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1470 Name: "httpProxy", 1471 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1472 { 1473 Required: true, 1474 From: clusterv1.VariableDefinitionFromInline, 1475 }, 1476 }, 1477 }). 1478 Build(), 1479 cluster: clusterBuilder. 1480 Build(), 1481 wantValidationErr: true, 1482 }, 1483 { 1484 name: "Cluster invalid as it defines an MDTopology without a corresponding MDClass", 1485 clusterClass: classBuilder.DeepCopy(). 1486 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1487 Name: "httpProxy", 1488 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1489 { 1490 Required: true, 1491 From: clusterv1.VariableDefinitionFromInline, 1492 }, 1493 }, 1494 }). 1495 Build(), 1496 cluster: clusterBuilder.WithTopology( 1497 builder.ClusterTopology().DeepCopy(). 1498 WithClass(clusterClassName1). 1499 WithVersion("1.22.2"). 1500 WithControlPlaneReplicas(3). 1501 WithMachineDeployment(mdTopologyBase.Build()). 1502 WithMachinePool(mpTopologyBase.Build()).Build(), 1503 ). 1504 Build(), 1505 wantValidationErr: true, 1506 }, 1507 } 1508 for _, tt := range tests { 1509 t.Run(tt.name, func(*testing.T) { 1510 initObjects := []client.Object{tt.cluster, tt.clusterClass} 1511 fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(initObjects...).Build() 1512 r := &Reconciler{ 1513 Client: fakeClient, 1514 APIReader: fakeClient, 1515 } 1516 var _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKey{Name: tt.cluster.Name, Namespace: tt.cluster.Namespace}}) 1517 // Reconcile will always return an error here as the topology is incomplete. This test checks specifically for 1518 // validation errors. 1519 validationErrMessage := fmt.Sprintf("Cluster.cluster.x-k8s.io %q is invalid:", tt.cluster.Name) 1520 if tt.wantValidationErr { 1521 g.Expect(err.Error()).To(ContainSubstring(validationErrMessage)) 1522 return 1523 } 1524 g.Expect(err.Error()).ToNot(ContainSubstring(validationErrMessage)) 1525 }) 1526 } 1527 }