sigs.k8s.io/cluster-api@v1.6.3/internal/controllers/topology/cluster/cluster_controller_test.go (about) 1 /* 2 Copyright 2021 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package cluster 18 19 import ( 20 "fmt" 21 "reflect" 22 "testing" 23 "time" 24 25 "github.com/google/go-cmp/cmp" 26 . "github.com/onsi/gomega" 27 corev1 "k8s.io/api/core/v1" 28 apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 31 "k8s.io/apimachinery/pkg/runtime/schema" 32 utilfeature "k8s.io/component-base/featuregate/testing" 33 ctrl "sigs.k8s.io/controller-runtime" 34 "sigs.k8s.io/controller-runtime/pkg/client" 35 "sigs.k8s.io/controller-runtime/pkg/client/fake" 36 "sigs.k8s.io/controller-runtime/pkg/reconcile" 37 38 clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 39 expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" 40 runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" 41 runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" 42 runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" 43 "sigs.k8s.io/cluster-api/feature" 44 "sigs.k8s.io/cluster-api/internal/contract" 45 "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" 46 "sigs.k8s.io/cluster-api/internal/hooks" 47 fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" 48 "sigs.k8s.io/cluster-api/internal/test/builder" 49 "sigs.k8s.io/cluster-api/util/conditions" 50 "sigs.k8s.io/cluster-api/util/kubeconfig" 51 "sigs.k8s.io/cluster-api/util/patch" 52 ) 53 54 var ( 55 clusterName1 = "cluster1" 56 clusterName2 = "cluster2" 57 clusterClassName1 = "class1" 58 clusterClassName2 = "class2" 59 infrastructureMachineTemplateName1 = "inframachinetemplate1" 60 infrastructureMachineTemplateName2 = "inframachinetemplate2" 61 infrastructureMachinePoolTemplateName1 = "inframachinepooltemplate1" 62 infrastructureMachinePoolTemplateName2 = "inframachinepooltemplate2" 63 ) 64 65 func TestClusterReconciler_reconcileNewlyCreatedCluster(t *testing.T) { 66 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 67 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() 68 g := NewWithT(t) 69 timeout := 5 * time.Second 70 71 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 72 g.Expect(err).ToNot(HaveOccurred()) 73 74 // Create the objects needed for the integration test: 75 // - a ClusterClass with all the related templates 76 // - a Cluster using the above ClusterClass 77 cleanup, err := setupTestEnvForIntegrationTests(ns) 78 g.Expect(err).ToNot(HaveOccurred()) 79 80 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 81 defer func() { 82 g.Expect(cleanup()).To(Succeed()) 83 }() 84 85 g.Eventually(func(g Gomega) error { 86 // Get the cluster object. 87 actualCluster := &clusterv1.Cluster{} 88 if err := env.GetAPIReader().Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, actualCluster); err != nil { 89 return err 90 } 91 92 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 93 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 94 95 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 96 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 97 98 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 99 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 100 101 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 102 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 103 104 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 105 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 106 107 // Check if the Cluster has the relevant TopologyReconciledCondition. 108 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 109 110 return nil 111 }, timeout).Should(Succeed()) 112 } 113 114 func TestClusterReconciler_reconcileMultipleClustersFromOneClass(t *testing.T) { 115 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 116 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() 117 118 g := NewWithT(t) 119 timeout := 5 * time.Second 120 121 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 122 g.Expect(err).ToNot(HaveOccurred()) 123 124 // Create the objects needed for the integration test: 125 // - a ClusterClass with all the related templates 126 // - a Cluster using the above ClusterClass 127 // - a second Cluster using the same ClusterClass 128 cleanup, err := setupTestEnvForIntegrationTests(ns) 129 g.Expect(err).ToNot(HaveOccurred()) 130 131 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 132 defer func() { 133 g.Expect(cleanup()).To(Succeed()) 134 }() 135 136 // Check to see that both clusters were correctly created and reconciled using the existing clusterClass. 137 g.Eventually(func(g Gomega) error { 138 for _, name := range []string{clusterName1, clusterName2} { 139 // Get the cluster object. 140 actualCluster := &clusterv1.Cluster{} 141 if err := env.Get(ctx, client.ObjectKey{Name: name, Namespace: ns.Name}, actualCluster); err != nil { 142 return err 143 } 144 145 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 146 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 147 148 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 149 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 150 151 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 152 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 153 154 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 155 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 156 157 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 158 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 159 160 // Check if the Cluster has the relevant TopologyReconciledCondition. 161 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 162 } 163 return nil 164 }, timeout).Should(Succeed()) 165 } 166 167 func TestClusterReconciler_reconcileUpdateOnClusterTopology(t *testing.T) { 168 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 169 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() 170 g := NewWithT(t) 171 timeout := 300 * time.Second 172 173 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 174 g.Expect(err).ToNot(HaveOccurred()) 175 176 // Create the objects needed for the integration test: 177 // - a ClusterClass with all the related templates 178 // - a Cluster using the above ClusterClass 179 cleanup, err := setupTestEnvForIntegrationTests(ns) 180 g.Expect(err).ToNot(HaveOccurred()) 181 182 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 183 defer func() { 184 g.Expect(cleanup()).To(Succeed()) 185 }() 186 187 actualCluster := &clusterv1.Cluster{} 188 // First ensure that the initial cluster and other objects are created and populated as expected. 189 g.Eventually(func(g Gomega) error { 190 // Get the cluster object now including the updated replica number for the Machine deployment. 191 if err := env.Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, actualCluster); err != nil { 192 return err 193 } 194 195 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 196 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 197 198 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 199 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 200 201 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 202 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 203 204 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 205 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 206 207 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 208 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 209 210 // Check if the Cluster has the relevant TopologyReconciledCondition. 211 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 212 return nil 213 }, timeout).Should(Succeed()) 214 215 // Change the replicas field in the managed topology of our cluster and update the object in the API. 216 replicas := int32(100) 217 patchHelper, err := patch.NewHelper(actualCluster, env.Client) 218 g.Expect(err).ToNot(HaveOccurred()) 219 clusterWithTopologyChange := actualCluster.DeepCopy() 220 clusterWithTopologyChange.Spec.Topology.Workers.MachineDeployments[0].Replicas = &replicas 221 clusterWithTopologyChange.Spec.Topology.Workers.MachinePools[0].Replicas = &replicas 222 g.Expect(patchHelper.Patch(ctx, clusterWithTopologyChange)).Should(Succeed()) 223 224 // Check to ensure all objects are correctly reconciled with the new MachineDeployment and MachinePool replica count in Topology. 225 g.Eventually(func(g Gomega) error { 226 // Get the cluster object. 227 updatedCluster := &clusterv1.Cluster{} 228 if err := env.Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, updatedCluster); err != nil { 229 return err 230 } 231 232 // Check to ensure the replica count has been successfully updated in the API server and cache. 233 g.Expect(updatedCluster.Spec.Topology.Workers.MachineDeployments[0].Replicas).To(Equal(&replicas)) 234 235 // Check to ensure the replica count has been successfully updated in the API server and cache. 236 g.Expect(updatedCluster.Spec.Topology.Workers.MachinePools[0].Replicas).To(Equal(&replicas)) 237 238 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 239 g.Expect(assertClusterReconcile(updatedCluster)).Should(Succeed()) 240 241 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 242 g.Expect(assertInfrastructureClusterReconcile(updatedCluster)).Should(Succeed()) 243 244 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 245 g.Expect(assertControlPlaneReconcile(updatedCluster)).Should(Succeed()) 246 247 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 248 g.Expect(assertMachineDeploymentsReconcile(updatedCluster)).Should(Succeed()) 249 250 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 251 g.Expect(assertMachinePoolsReconcile(updatedCluster)).Should(Succeed()) 252 253 // Check if the Cluster has the relevant TopologyReconciledCondition. 254 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 255 return nil 256 }, timeout).Should(Succeed()) 257 } 258 259 func TestClusterReconciler_reconcileUpdatesOnClusterClass(t *testing.T) { 260 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 261 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() 262 g := NewWithT(t) 263 timeout := 5 * time.Second 264 265 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 266 g.Expect(err).ToNot(HaveOccurred()) 267 268 // Create the objects needed for the integration test: 269 // - a ClusterClass with all the related templates 270 // - a Cluster using the above ClusterClass 271 // - a second Cluster using the same ClusterClass 272 cleanup, err := setupTestEnvForIntegrationTests(ns) 273 g.Expect(err).ToNot(HaveOccurred()) 274 275 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 276 defer func() { 277 g.Expect(cleanup()).To(Succeed()) 278 }() 279 280 actualCluster := &clusterv1.Cluster{} 281 282 g.Eventually(func(g Gomega) error { 283 for _, name := range []string{clusterName1, clusterName2} { 284 // Get the cluster object. 285 if err := env.Get(ctx, client.ObjectKey{Name: name, Namespace: ns.Name}, actualCluster); err != nil { 286 return err 287 } 288 289 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 290 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 291 292 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 293 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 294 295 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 296 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 297 298 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 299 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 300 301 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 302 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 303 304 // Check if the Cluster has the relevant TopologyReconciledCondition. 305 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 306 } 307 return nil 308 }, timeout).Should(Succeed()) 309 310 // Get the clusterClass to update and check if clusterClass updates are being correctly reconciled. 311 clusterClass := &clusterv1.ClusterClass{} 312 g.Expect(env.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: actualCluster.Spec.Topology.Class}, clusterClass)).To(Succeed()) 313 314 patchHelper, err := patch.NewHelper(clusterClass, env.Client) 315 g.Expect(err).ToNot(HaveOccurred()) 316 // Change the infrastructureMachineTemplateName for the first of our MachineDeployments and update in the API. 317 clusterClass.Spec.Workers.MachineDeployments[0].Template.Infrastructure.Ref.Name = infrastructureMachineTemplateName2 318 // Change the infrastructureMachinePoolTemplateName for the first of our MachinePools and update in the API. 319 clusterClass.Spec.Workers.MachinePools[0].Template.Infrastructure.Ref.Name = infrastructureMachinePoolTemplateName2 320 g.Expect(patchHelper.Patch(ctx, clusterClass)).To(Succeed()) 321 322 g.Eventually(func(g Gomega) error { 323 // Check that the clusterClass has been correctly updated to use the new infrastructure template. 324 // This is necessary as sometimes the cache can take a little time to update. 325 class := &clusterv1.ClusterClass{} 326 g.Expect(env.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: actualCluster.Spec.Topology.Class}, class)).To(Succeed()) 327 g.Expect(class.Spec.Workers.MachineDeployments[0].Template.Infrastructure.Ref.Name).To(Equal(infrastructureMachineTemplateName2)) 328 g.Expect(class.Spec.Workers.MachinePools[0].Template.Infrastructure.Ref.Name).To(Equal(infrastructureMachinePoolTemplateName2)) 329 330 // For each cluster check that the clusterClass changes have been correctly reconciled. 331 for _, name := range []string{clusterName1, clusterName2} { 332 // Get the cluster object. 333 actualCluster = &clusterv1.Cluster{} 334 335 if err := env.Get(ctx, client.ObjectKey{Name: name, Namespace: ns.Name}, actualCluster); err != nil { 336 return err 337 } 338 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 339 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 340 341 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 342 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 343 344 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 345 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 346 347 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 348 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 349 350 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 351 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 352 353 // Check if the Cluster has the relevant TopologyReconciledCondition. 354 g.Expect(assertClusterTopologyReconciledCondition(actualCluster)).Should(Succeed()) 355 } 356 return nil 357 }, timeout).Should(Succeed()) 358 } 359 360 func TestClusterReconciler_reconcileClusterClassRebase(t *testing.T) { 361 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 362 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() 363 g := NewWithT(t) 364 timeout := 30 * time.Second 365 366 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 367 g.Expect(err).ToNot(HaveOccurred()) 368 369 // Create the objects needed for the integration test: 370 // - a ClusterClass with all the related templates 371 // - a Cluster using the first ClusterClass 372 // - a compatible ClusterClass to rebase the Cluster to 373 cleanup, err := setupTestEnvForIntegrationTests(ns) 374 g.Expect(err).ToNot(HaveOccurred()) 375 376 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 377 defer func() { 378 g.Expect(cleanup()).To(Succeed()) 379 }() 380 381 actualCluster := &clusterv1.Cluster{} 382 // First ensure that the initial cluster and other objects are created and populated as expected. 383 g.Eventually(func(g Gomega) error { 384 // Get the cluster object. 385 if err := env.Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, actualCluster); err != nil { 386 return err 387 } 388 389 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 390 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 391 392 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 393 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 394 395 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 396 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 397 398 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 399 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 400 401 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 402 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 403 404 return nil 405 }, timeout).Should(Succeed()) 406 407 patchHelper, err := patch.NewHelper(actualCluster, env.Client) 408 g.Expect(err).ToNot(HaveOccurred()) 409 // Change the ClusterClass pointed to in the Cluster's Topology. This is a ClusterClass rebase operation. 410 clusterWithRebase := actualCluster.DeepCopy() 411 clusterWithRebase.Spec.Topology.Class = clusterClassName2 412 g.Expect(patchHelper.Patch(ctx, clusterWithRebase)).Should(Succeed()) 413 414 // Check to ensure all objects are correctly reconciled with the new ClusterClass. 415 g.Eventually(func(g Gomega) error { 416 // Get the cluster object. 417 updatedCluster := &clusterv1.Cluster{} 418 if err := env.Get(ctx, client.ObjectKey{Name: clusterName1, Namespace: ns.Name}, updatedCluster); err != nil { 419 return err 420 } 421 // Check to ensure the spec.topology.class has been successfully updated in the API server and cache. 422 g.Expect(updatedCluster.Spec.Topology.Class).To(Equal(clusterClassName2)) 423 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 424 g.Expect(assertClusterReconcile(updatedCluster)).Should(Succeed()) 425 426 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 427 g.Expect(assertInfrastructureClusterReconcile(updatedCluster)).Should(Succeed()) 428 429 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 430 g.Expect(assertControlPlaneReconcile(updatedCluster)).Should(Succeed()) 431 432 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 433 g.Expect(assertMachineDeploymentsReconcile(updatedCluster)).Should(Succeed()) 434 435 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 436 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 437 438 return nil 439 }, timeout).Should(Succeed()) 440 } 441 442 func TestClusterReconciler_reconcileDelete(t *testing.T) { 443 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)() 444 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() 445 446 catalog := runtimecatalog.New() 447 _ = runtimehooksv1.AddToCatalog(catalog) 448 449 beforeClusterDeleteGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterDelete) 450 if err != nil { 451 panic(err) 452 } 453 454 blockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{ 455 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 456 RetryAfterSeconds: int32(10), 457 CommonResponse: runtimehooksv1.CommonResponse{ 458 Status: runtimehooksv1.ResponseStatusSuccess, 459 }, 460 }, 461 } 462 nonBlockingResponse := &runtimehooksv1.BeforeClusterDeleteResponse{ 463 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 464 RetryAfterSeconds: int32(0), 465 CommonResponse: runtimehooksv1.CommonResponse{ 466 Status: runtimehooksv1.ResponseStatusSuccess, 467 }, 468 }, 469 } 470 failureResponse := &runtimehooksv1.BeforeClusterDeleteResponse{ 471 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 472 CommonResponse: runtimehooksv1.CommonResponse{ 473 Status: runtimehooksv1.ResponseStatusFailure, 474 }, 475 }, 476 } 477 478 tests := []struct { 479 name string 480 cluster *clusterv1.Cluster 481 hookResponse *runtimehooksv1.BeforeClusterDeleteResponse 482 wantHookToBeCalled bool 483 wantResult ctrl.Result 484 wantOkToDelete bool 485 wantErr bool 486 }{ 487 { 488 name: "should apply the ok-to-delete annotation if the BeforeClusterDelete hook returns a non-blocking response", 489 cluster: &clusterv1.Cluster{ 490 ObjectMeta: metav1.ObjectMeta{ 491 Name: "test-cluster", 492 Namespace: "test-ns", 493 }, 494 Spec: clusterv1.ClusterSpec{ 495 Topology: &clusterv1.Topology{}, 496 }, 497 }, 498 hookResponse: nonBlockingResponse, 499 wantResult: ctrl.Result{}, 500 wantHookToBeCalled: true, 501 wantOkToDelete: true, 502 wantErr: false, 503 }, 504 { 505 name: "should requeue if the BeforeClusterDelete hook returns a blocking response", 506 cluster: &clusterv1.Cluster{ 507 ObjectMeta: metav1.ObjectMeta{ 508 Name: "test-cluster", 509 Namespace: "test-ns", 510 }, 511 Spec: clusterv1.ClusterSpec{ 512 Topology: &clusterv1.Topology{}, 513 }, 514 }, 515 hookResponse: blockingResponse, 516 wantResult: ctrl.Result{RequeueAfter: time.Duration(10) * time.Second}, 517 wantHookToBeCalled: true, 518 wantOkToDelete: false, 519 wantErr: false, 520 }, 521 { 522 name: "should fail if the BeforeClusterDelete hook returns a failure response", 523 cluster: &clusterv1.Cluster{ 524 ObjectMeta: metav1.ObjectMeta{ 525 Name: "test-cluster", 526 Namespace: "test-ns", 527 }, 528 Spec: clusterv1.ClusterSpec{ 529 Topology: &clusterv1.Topology{}, 530 }, 531 }, 532 hookResponse: failureResponse, 533 wantResult: ctrl.Result{}, 534 wantHookToBeCalled: true, 535 wantOkToDelete: false, 536 wantErr: true, 537 }, 538 { 539 name: "should succeed if the ok-to-delete annotation is already present", 540 cluster: &clusterv1.Cluster{ 541 ObjectMeta: metav1.ObjectMeta{ 542 Name: "test-cluster", 543 Namespace: "test-ns", 544 Annotations: map[string]string{ 545 // If the hook is already marked the hook should not be called during cluster delete. 546 runtimev1.OkToDeleteAnnotation: "", 547 }, 548 }, 549 Spec: clusterv1.ClusterSpec{ 550 Topology: &clusterv1.Topology{}, 551 }, 552 }, 553 // Using a blocking response here should not matter as the hook should never be called. 554 // Using a blocking response to enforce the point. 555 hookResponse: blockingResponse, 556 wantResult: ctrl.Result{}, 557 wantHookToBeCalled: false, 558 wantOkToDelete: true, 559 wantErr: false, 560 }, 561 } 562 563 for _, tt := range tests { 564 t.Run(tt.name, func(t *testing.T) { 565 g := NewWithT(t) 566 567 fakeClient := fake.NewClientBuilder().WithObjects(tt.cluster).Build() 568 fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). 569 WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ 570 beforeClusterDeleteGVH: tt.hookResponse, 571 }). 572 WithCatalog(catalog). 573 Build() 574 575 r := &Reconciler{ 576 Client: fakeClient, 577 APIReader: fakeClient, 578 RuntimeClient: fakeRuntimeClient, 579 } 580 581 res, err := r.reconcileDelete(ctx, tt.cluster) 582 if tt.wantErr { 583 g.Expect(err).To(HaveOccurred()) 584 } else { 585 g.Expect(err).ToNot(HaveOccurred()) 586 g.Expect(res).To(BeComparableTo(tt.wantResult)) 587 g.Expect(hooks.IsOkToDelete(tt.cluster)).To(Equal(tt.wantOkToDelete)) 588 g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.BeforeClusterDelete) == 1).To(Equal(tt.wantHookToBeCalled)) 589 } 590 }) 591 } 592 } 593 594 // TestClusterReconciler_deleteClusterClass tests the correct deletion behaviour for a ClusterClass with references in existing Clusters. 595 // In this case deletion of the ClusterClass should be blocked by the webhook. 596 func TestClusterReconciler_deleteClusterClass(t *testing.T) { 597 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)() 598 defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)() 599 g := NewWithT(t) 600 timeout := 5 * time.Second 601 602 ns, err := env.CreateNamespace(ctx, "test-topology-cluster-reconcile") 603 g.Expect(err).ToNot(HaveOccurred()) 604 605 // Create the objects needed for the integration test: 606 // - a ClusterClass with all the related templates 607 // - a Cluster using the above ClusterClass 608 cleanup, err := setupTestEnvForIntegrationTests(ns) 609 g.Expect(err).ToNot(HaveOccurred()) 610 611 // Defer a cleanup function that deletes each of the objects created during setupTestEnvForIntegrationTests. 612 defer func() { 613 g.Expect(cleanup()).To(Succeed()) 614 }() 615 616 actualCluster := &clusterv1.Cluster{} 617 618 g.Eventually(func(g Gomega) error { 619 for _, name := range []string{clusterName1, clusterName2} { 620 // Get the cluster object. 621 if err := env.Get(ctx, client.ObjectKey{Name: name, Namespace: ns.Name}, actualCluster); err != nil { 622 return err 623 } 624 625 // Check if Cluster has relevant Infrastructure and ControlPlane and labels and annotations. 626 g.Expect(assertClusterReconcile(actualCluster)).Should(Succeed()) 627 628 // Check if InfrastructureCluster has been created and has the correct labels and annotations. 629 g.Expect(assertInfrastructureClusterReconcile(actualCluster)).Should(Succeed()) 630 631 // Check if ControlPlane has been created and has the correct version, replicas, labels and annotations. 632 g.Expect(assertControlPlaneReconcile(actualCluster)).Should(Succeed()) 633 634 // Check if MachineDeployments are created and have the correct version, replicas, labels annotations and templates. 635 g.Expect(assertMachineDeploymentsReconcile(actualCluster)).Should(Succeed()) 636 637 // Check if MachinePools are created and have the correct version, replicas, labels annotations and templates. 638 g.Expect(assertMachinePoolsReconcile(actualCluster)).Should(Succeed()) 639 } 640 return nil 641 }, timeout).Should(Succeed()) 642 643 // Ensure the clusterClass is available in the API server . 644 clusterClass := &clusterv1.ClusterClass{} 645 g.Expect(env.Get(ctx, client.ObjectKey{Namespace: ns.Name, Name: actualCluster.Spec.Topology.Class}, clusterClass)).To(Succeed()) 646 647 // Attempt to delete the ClusterClass. Expect an error here as the ClusterClass deletion is blocked by the webhook. 648 g.Expect(env.Delete(ctx, clusterClass)).NotTo(Succeed()) 649 } 650 651 func TestReconciler_callBeforeClusterCreateHook(t *testing.T) { 652 catalog := runtimecatalog.New() 653 _ = runtimehooksv1.AddToCatalog(catalog) 654 gvh, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterCreate) 655 if err != nil { 656 panic(err) 657 } 658 659 blockingResponse := &runtimehooksv1.BeforeClusterCreateResponse{ 660 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 661 CommonResponse: runtimehooksv1.CommonResponse{ 662 Status: runtimehooksv1.ResponseStatusSuccess, 663 }, 664 RetryAfterSeconds: int32(10), 665 }, 666 } 667 nonBlockingResponse := &runtimehooksv1.BeforeClusterCreateResponse{ 668 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 669 CommonResponse: runtimehooksv1.CommonResponse{ 670 Status: runtimehooksv1.ResponseStatusSuccess, 671 }, 672 RetryAfterSeconds: int32(0), 673 }, 674 } 675 failingResponse := &runtimehooksv1.BeforeClusterCreateResponse{ 676 CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ 677 CommonResponse: runtimehooksv1.CommonResponse{ 678 Status: runtimehooksv1.ResponseStatusFailure, 679 }, 680 }, 681 } 682 683 tests := []struct { 684 name string 685 hookResponse *runtimehooksv1.BeforeClusterCreateResponse 686 wantResult reconcile.Result 687 wantErr bool 688 }{ 689 { 690 name: "should return a requeue response when the BeforeClusterCreate hook is blocking", 691 hookResponse: blockingResponse, 692 wantResult: ctrl.Result{RequeueAfter: time.Duration(10) * time.Second}, 693 wantErr: false, 694 }, 695 { 696 name: "should return an empty response when the BeforeClusterCreate hook is not blocking", 697 hookResponse: nonBlockingResponse, 698 wantResult: ctrl.Result{}, 699 wantErr: false, 700 }, 701 { 702 name: "should error when the BeforeClusterCreate hook returns a failure response", 703 hookResponse: failingResponse, 704 wantResult: ctrl.Result{}, 705 wantErr: true, 706 }, 707 } 708 709 for _, tt := range tests { 710 t.Run(tt.name, func(t *testing.T) { 711 g := NewWithT(t) 712 713 runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). 714 WithCatalog(catalog). 715 WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ 716 gvh: tt.hookResponse, 717 }). 718 Build() 719 720 r := &Reconciler{ 721 RuntimeClient: runtimeClient, 722 } 723 s := &scope.Scope{ 724 Current: &scope.ClusterState{ 725 Cluster: &clusterv1.Cluster{}, 726 }, 727 HookResponseTracker: scope.NewHookResponseTracker(), 728 } 729 res, err := r.callBeforeClusterCreateHook(ctx, s) 730 if tt.wantErr { 731 g.Expect(err).To(HaveOccurred()) 732 } else { 733 g.Expect(err).ToNot(HaveOccurred()) 734 g.Expect(res).To(BeComparableTo(tt.wantResult)) 735 } 736 }) 737 } 738 } 739 740 // setupTestEnvForIntegrationTests builds and then creates in the envtest API server all objects required at init time for each of the 741 // integration tests in this file. This includes: 742 // - a first clusterClass with all the related templates 743 // - a second clusterClass, compatible with the first, used to test a ClusterClass rebase 744 // - a first Cluster using the above ClusterClass 745 // - a second Cluster using the above ClusterClass, but with different version/Machine deployment definition 746 // NOTE: The objects are created for every test, though some may not be used in every test. 747 func setupTestEnvForIntegrationTests(ns *corev1.Namespace) (func() error, error) { 748 workerClassName1 := "linux-worker" 749 workerClassName2 := "windows-worker" 750 workerClassName3 := "solaris-worker" 751 752 // The below objects are created in order to feed the reconcile loop all the information it needs to create a full 753 // Cluster given a skeletal Cluster object and a ClusterClass. The objects include: 754 755 // 1) Templates for Machine, Cluster, ControlPlane and Bootstrap. 756 infrastructureMachineTemplate1 := builder.TestInfrastructureMachineTemplate(ns.Name, infrastructureMachineTemplateName1).Build() 757 infrastructureMachineTemplate2 := builder.TestInfrastructureMachineTemplate(ns.Name, infrastructureMachineTemplateName2). 758 WithSpecFields(map[string]interface{}{"spec.template.spec.fakeSetting": true}). 759 Build() 760 infrastructureMachinePoolTemplate1 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName1).Build() 761 infrastructureMachinePoolTemplate2 := builder.TestInfrastructureMachinePoolTemplate(ns.Name, infrastructureMachinePoolTemplateName2). 762 WithSpecFields(map[string]interface{}{"spec.template.fakeSetting": true}). 763 Build() 764 infrastructureClusterTemplate1 := builder.TestInfrastructureClusterTemplate(ns.Name, "infraclustertemplate1"). 765 Build() 766 infrastructureClusterTemplate2 := builder.TestInfrastructureClusterTemplate(ns.Name, "infraclustertemplate2"). 767 WithSpecFields(map[string]interface{}{"spec.template.spec.alteredSetting": true}). 768 Build() 769 controlPlaneTemplate := builder.TestControlPlaneTemplate(ns.Name, "cp1"). 770 WithInfrastructureMachineTemplate(infrastructureMachineTemplate1). 771 Build() 772 bootstrapTemplate := builder.TestBootstrapTemplate(ns.Name, "bootstraptemplate").Build() 773 774 // 2) ClusterClass definitions including definitions of MachineDeploymentClasses and MachinePoolClasses used inside the ClusterClass. 775 machineDeploymentClass1 := builder.MachineDeploymentClass(workerClassName1 + "-md"). 776 WithInfrastructureTemplate(infrastructureMachineTemplate1). 777 WithBootstrapTemplate(bootstrapTemplate). 778 WithLabels(map[string]string{"foo": "bar"}). 779 WithAnnotations(map[string]string{"foo": "bar"}). 780 Build() 781 machineDeploymentClass2 := builder.MachineDeploymentClass(workerClassName2 + "-md"). 782 WithInfrastructureTemplate(infrastructureMachineTemplate1). 783 WithBootstrapTemplate(bootstrapTemplate). 784 Build() 785 machineDeploymentClass3 := builder.MachineDeploymentClass(workerClassName3 + "-md"). 786 WithInfrastructureTemplate(infrastructureMachineTemplate2). 787 WithBootstrapTemplate(bootstrapTemplate). 788 Build() 789 machinePoolClass1 := builder.MachinePoolClass(workerClassName1 + "-mp"). 790 WithInfrastructureTemplate(infrastructureMachinePoolTemplate1). 791 WithBootstrapTemplate(bootstrapTemplate). 792 WithLabels(map[string]string{"foo": "bar"}). 793 WithAnnotations(map[string]string{"foo": "bar"}). 794 Build() 795 machinePoolClass2 := builder.MachinePoolClass(workerClassName2 + "-mp"). 796 WithInfrastructureTemplate(infrastructureMachinePoolTemplate1). 797 WithBootstrapTemplate(bootstrapTemplate). 798 Build() 799 machinePoolClass3 := builder.MachinePoolClass(workerClassName3 + "-mp"). 800 WithInfrastructureTemplate(infrastructureMachinePoolTemplate2). 801 WithBootstrapTemplate(bootstrapTemplate). 802 Build() 803 clusterClass := builder.ClusterClass(ns.Name, clusterClassName1). 804 WithInfrastructureClusterTemplate(infrastructureClusterTemplate1). 805 WithControlPlaneTemplate(controlPlaneTemplate). 806 WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate1). 807 WithWorkerMachineDeploymentClasses(*machineDeploymentClass1, *machineDeploymentClass2). 808 WithWorkerMachinePoolClasses(*machinePoolClass1, *machinePoolClass2). 809 Build() 810 811 // This ClusterClass changes a number of things in a ClusterClass in a way that is compatible for a ClusterClass rebase operation. 812 // 1) It changes the controlPlaneMachineInfrastructureTemplate to a new template. 813 // 2) It adds a new machineDeploymentClass and machinePoolClass 814 // 3) It changes the infrastructureClusterTemplate. 815 clusterClassForRebase := builder.ClusterClass(ns.Name, clusterClassName2). 816 WithInfrastructureClusterTemplate(infrastructureClusterTemplate2). 817 WithControlPlaneTemplate(controlPlaneTemplate). 818 WithControlPlaneInfrastructureMachineTemplate(infrastructureMachineTemplate2). 819 WithWorkerMachineDeploymentClasses(*machineDeploymentClass1, *machineDeploymentClass2, *machineDeploymentClass3). 820 WithWorkerMachinePoolClasses(*machinePoolClass1, *machinePoolClass2, *machinePoolClass3). 821 Build() 822 823 // 3) Two Clusters including a Cluster Topology objects and the MachineDeploymentTopology and MachinePoolTopology objects used in the 824 // Cluster Topology. The second cluster differs from the first both in version and in its MachineDeployment and MachinePool definitions. 825 machineDeploymentTopology1 := builder.MachineDeploymentTopology("mdm1"). 826 WithClass(workerClassName1 + "-md"). 827 WithReplicas(3). 828 Build() 829 machineDeploymentTopology2 := builder.MachineDeploymentTopology("mdm2"). 830 WithClass(workerClassName2 + "-md"). 831 WithReplicas(1). 832 Build() 833 machinePoolTopology1 := builder.MachinePoolTopology("mp1"). 834 WithClass(workerClassName1 + "-mp"). 835 WithReplicas(3). 836 Build() 837 machinePoolTopology2 := builder.MachinePoolTopology("mp2"). 838 WithClass(workerClassName2 + "-mp"). 839 WithReplicas(1). 840 Build() 841 842 cluster1 := builder.Cluster(ns.Name, clusterName1). 843 WithTopology( 844 builder.ClusterTopology(). 845 WithClass(clusterClass.Name). 846 WithMachineDeployment(machineDeploymentTopology1). 847 WithMachineDeployment(machineDeploymentTopology2). 848 WithMachinePool(machinePoolTopology1). 849 WithMachinePool(machinePoolTopology2). 850 WithVersion("1.22.2"). 851 WithControlPlaneReplicas(3). 852 Build()). 853 Build() 854 855 cluster2 := builder.Cluster(ns.Name, clusterName2). 856 WithTopology( 857 builder.ClusterTopology(). 858 WithClass(clusterClass.Name). 859 WithMachineDeployment(machineDeploymentTopology2). 860 WithMachinePool(machinePoolTopology2). 861 WithVersion("1.21.0"). 862 WithControlPlaneReplicas(1). 863 Build()). 864 Build() 865 866 // Setup kubeconfig secrets for the clusters, so the ClusterCacheTracker works. 867 cluster1Secret := kubeconfig.GenerateSecret(cluster1, kubeconfig.FromEnvTestConfig(env.Config, cluster1)) 868 cluster2Secret := kubeconfig.GenerateSecret(cluster2, kubeconfig.FromEnvTestConfig(env.Config, cluster2)) 869 // Unset the ownerrefs otherwise they are invalid because they contain an empty uid. 870 cluster1Secret.ObjectMeta.OwnerReferences = nil 871 cluster2Secret.ObjectMeta.OwnerReferences = nil 872 873 // Create a set of setupTestEnvForIntegrationTests from the objects above to add to the API server when the test environment starts. 874 // The objects are created for every test, though some e.g. infrastructureMachineTemplate2 may not be used in every test. 875 initObjs := []client.Object{ 876 infrastructureClusterTemplate1, 877 infrastructureClusterTemplate2, 878 infrastructureMachineTemplate1, 879 infrastructureMachineTemplate2, 880 infrastructureMachinePoolTemplate1, 881 infrastructureMachinePoolTemplate2, 882 bootstrapTemplate, 883 controlPlaneTemplate, 884 clusterClass, 885 clusterClassForRebase, 886 cluster1, 887 cluster2, 888 cluster1Secret, 889 cluster2Secret, 890 } 891 cleanup := func() error { 892 // Delete Objects in reverse, because we cannot delete a ClusterCLass if it is still used by a Cluster. 893 for i := len(initObjs) - 1; i >= 0; i-- { 894 if err := env.CleanupAndWait(ctx, initObjs[i]); err != nil { 895 return err 896 } 897 } 898 return nil 899 } 900 901 for _, obj := range initObjs { 902 if err := env.CreateAndWait(ctx, obj); err != nil { 903 return cleanup, err 904 } 905 } 906 return cleanup, nil 907 } 908 909 func assertClusterTopologyReconciledCondition(cluster *clusterv1.Cluster) error { 910 if !conditions.Has(cluster, clusterv1.TopologyReconciledCondition) { 911 return fmt.Errorf("cluster should have the TopologyReconciled condition set") 912 } 913 return nil 914 } 915 916 // assertClusterReconcile checks if the Cluster object: 917 // 1) Has its InfrastructureReference and ControlPlane reference added correctly. 918 // 2) InfrastructureReference and ControlPlaneRef have the expected Group, Version and Kind. 919 func assertClusterReconcile(cluster *clusterv1.Cluster) error { 920 // Check if relevant managed topology labels are present. 921 if err := assertClusterTopologyOwnedLabel(cluster); err != nil { 922 return err 923 } 924 925 // Check if InfrastructureRef exists and is of the expected Kind and APIVersion. 926 if err := referenceExistsWithCorrectKindAndAPIVersion(cluster.Spec.InfrastructureRef, 927 builder.TestInfrastructureClusterKind, 928 builder.InfrastructureGroupVersion); err != nil { 929 return err 930 } 931 932 // Check if ControlPlaneRef exists is of the expected Kind and APIVersion. 933 return referenceExistsWithCorrectKindAndAPIVersion(cluster.Spec.ControlPlaneRef, 934 builder.TestControlPlaneKind, 935 builder.ControlPlaneGroupVersion) 936 } 937 938 // assertInfrastructureClusterReconcile checks if the infrastructureCluster object: 939 // 1) Is created. 940 // 2) Has the correct labels and annotations. 941 func assertInfrastructureClusterReconcile(cluster *clusterv1.Cluster) error { 942 _, err := getAndAssertLabelsAndAnnotations(*cluster.Spec.InfrastructureRef, cluster.Name) 943 return err 944 } 945 946 // assertControlPlaneReconcile checks if the ControlPlane object: 947 // 1. Is created. 948 // 2. Has the correct labels and annotations. 949 // 3. If it requires ControlPlane Infrastructure and if so: 950 // i) That the infrastructureMachineTemplate is created correctly. 951 // ii) That the infrastructureMachineTemplate has the correct labels and annotations 952 func assertControlPlaneReconcile(cluster *clusterv1.Cluster) error { 953 cp, err := getAndAssertLabelsAndAnnotations(*cluster.Spec.ControlPlaneRef, cluster.Name) 954 if err != nil { 955 return err 956 } 957 // Check if the ControlPlane Version matches the version in the Cluster's managed topology spec. 958 version, err := contract.ControlPlane().Version().Get(cp) 959 if err != nil { 960 return err 961 } 962 963 if *version != cluster.Spec.Topology.Version { 964 return fmt.Errorf("version %v does not match expected %v", *version, cluster.Spec.Topology.Version) 965 } 966 967 // Check for Control Plane replicase if it's set in the Cluster.Spec.Topology 968 if cluster.Spec.Topology.ControlPlane.Replicas != nil { 969 replicas, err := contract.ControlPlane().Replicas().Get(cp) 970 if err != nil { 971 return err 972 } 973 974 // Check for Control Plane replicase if it's set in the Cluster.Spec.Topology 975 if int32(*replicas) != *cluster.Spec.Topology.ControlPlane.Replicas { 976 return fmt.Errorf("replicas %v do not match expected %v", int32(*replicas), *cluster.Spec.Topology.ControlPlane.Replicas) 977 } 978 } 979 clusterClass := &clusterv1.ClusterClass{} 980 if err := env.Get(ctx, client.ObjectKey{Namespace: cluster.Namespace, Name: cluster.Spec.Topology.Class}, clusterClass); err != nil { 981 return err 982 } 983 // Check for the ControlPlaneInfrastructure if it's referenced in the clusterClass. 984 if clusterClass.Spec.ControlPlane.MachineInfrastructure != nil && clusterClass.Spec.ControlPlane.MachineInfrastructure.Ref != nil { 985 cpInfra, err := contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(cp) 986 if err != nil { 987 return err 988 } 989 if err := referenceExistsWithCorrectKindAndAPIVersion(cpInfra, 990 builder.TestInfrastructureMachineTemplateKind, 991 builder.InfrastructureGroupVersion); err != nil { 992 return err 993 } 994 if _, err := getAndAssertLabelsAndAnnotations(*cpInfra, cluster.Name); err != nil { 995 return err 996 } 997 } 998 return nil 999 } 1000 1001 // assertMachineDeploymentsReconcile checks if the MachineDeployments: 1002 // 1) Are created in the correct number. 1003 // 2) Have the correct labels (TopologyOwned, ClusterName, MachineDeploymentName). 1004 // 3) Have the correct replicas and version. 1005 // 4) Have the correct Kind/APIVersion and Labels/Annotations for BoostrapRef and InfrastructureRef templates. 1006 func assertMachineDeploymentsReconcile(cluster *clusterv1.Cluster) error { 1007 // List all created machine deployments to assert the expected numbers are created. 1008 machineDeployments := &clusterv1.MachineDeploymentList{} 1009 if err := env.List(ctx, machineDeployments, client.InNamespace(cluster.Namespace)); err != nil { 1010 return err 1011 } 1012 1013 // clusterMDs will hold the MachineDeployments that have labels associating them with the cluster. 1014 clusterMDs := []clusterv1.MachineDeployment{} 1015 1016 // Run through all machine deployments and add only those with the TopologyOwnedLabel and the correct 1017 // ClusterNameLabel to the items for further testing. 1018 for _, m := range machineDeployments.Items { 1019 // If the machineDeployment doesn't have the ClusterTopologyOwnedLabel and the ClusterNameLabel ignore. 1020 md := m 1021 if err := assertClusterTopologyOwnedLabel(&md); err != nil { 1022 continue 1023 } 1024 if err := assertClusterNameLabel(&md, cluster.Name); err != nil { 1025 continue 1026 } 1027 clusterMDs = append(clusterMDs, md) 1028 } 1029 1030 // If the total number of machine deployments is not as expected return false. 1031 if len(clusterMDs) != len(cluster.Spec.Topology.Workers.MachineDeployments) { 1032 return fmt.Errorf("number of MachineDeployments %v does not match number expected %v", len(clusterMDs), len(cluster.Spec.Topology.Workers.MachineDeployments)) 1033 } 1034 for _, m := range clusterMDs { 1035 for _, topologyMD := range cluster.Spec.Topology.Workers.MachineDeployments { 1036 md := m 1037 // use the ClusterTopologyMachineDeploymentLabel to get the specific machineDeployment to compare to. 1038 if topologyMD.Name != md.GetLabels()[clusterv1.ClusterTopologyMachineDeploymentNameLabel] { 1039 continue 1040 } 1041 1042 // Check if the ClusterTopologyLabelName and ClusterTopologyOwnedLabel are set correctly. 1043 if err := assertClusterTopologyOwnedLabel(&md); err != nil { 1044 return err 1045 } 1046 1047 if err := assertClusterNameLabel(&md, cluster.Name); err != nil { 1048 return err 1049 } 1050 1051 // Check replicas and version for the MachineDeployment. 1052 if *md.Spec.Replicas != *topologyMD.Replicas { 1053 return fmt.Errorf("replicas %v does not match expected %v", md.Spec.Replicas, topologyMD.Replicas) 1054 } 1055 if *md.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { 1056 return fmt.Errorf("version %v does not match expected %v", *md.Spec.Template.Spec.Version, cluster.Spec.Topology.Version) 1057 } 1058 1059 // Check if the InfrastructureReference exists. 1060 if err := referenceExistsWithCorrectKindAndAPIVersion(&md.Spec.Template.Spec.InfrastructureRef, 1061 builder.TestInfrastructureMachineTemplateKind, 1062 builder.InfrastructureGroupVersion); err != nil { 1063 return err 1064 } 1065 1066 // Check if the InfrastructureReference has the expected labels and annotations. 1067 if _, err := getAndAssertLabelsAndAnnotations(md.Spec.Template.Spec.InfrastructureRef, cluster.Name); err != nil { 1068 return err 1069 } 1070 1071 // Check if the Bootstrap reference has the expected Kind and APIVersion. 1072 if err := referenceExistsWithCorrectKindAndAPIVersion(md.Spec.Template.Spec.Bootstrap.ConfigRef, 1073 builder.TestBootstrapConfigTemplateKind, 1074 builder.BootstrapGroupVersion); err != nil { 1075 return err 1076 } 1077 1078 // Check if the Bootstrap reference has the expected labels and annotations. 1079 if _, err := getAndAssertLabelsAndAnnotations(*md.Spec.Template.Spec.Bootstrap.ConfigRef, cluster.Name); err != nil { 1080 return err 1081 } 1082 } 1083 } 1084 return nil 1085 } 1086 1087 // assertMachinePoolsReconcile checks if the MachinePools: 1088 // 1) Are created in the correct number. 1089 // 2) Have the correct labels (TopologyOwned, ClusterName, MachinePoolName). 1090 // 3) Have the correct replicas and version. 1091 // 4) Have the correct Kind/APIVersion and Labels/Annotations for BoostrapRef and InfrastructureRef templates. 1092 func assertMachinePoolsReconcile(cluster *clusterv1.Cluster) error { 1093 // List all created machine pools to assert the expected numbers are created. 1094 machinePools := &expv1.MachinePoolList{} 1095 if err := env.List(ctx, machinePools, client.InNamespace(cluster.Namespace)); err != nil { 1096 return err 1097 } 1098 1099 // clusterMPs will hold the MachinePools that have labels associating them with the cluster. 1100 clusterMPs := []expv1.MachinePool{} 1101 1102 // Run through all machine pools and add only those with the TopologyOwnedLabel and the correct 1103 // ClusterNameLabel to the items for further testing. 1104 for _, m := range machinePools.Items { 1105 // If the machinePool doesn't have the ClusterTopologyOwnedLabel and the ClusterNameLabel ignore. 1106 mp := m 1107 if err := assertClusterTopologyOwnedLabel(&mp); err != nil { 1108 continue 1109 } 1110 if err := assertClusterNameLabel(&mp, cluster.Name); err != nil { 1111 continue 1112 } 1113 clusterMPs = append(clusterMPs, mp) 1114 } 1115 1116 // If the total number of machine pools is not as expected return false. 1117 if len(clusterMPs) != len(cluster.Spec.Topology.Workers.MachinePools) { 1118 return fmt.Errorf("number of MachinePools %v does not match number expected %v", len(clusterMPs), len(cluster.Spec.Topology.Workers.MachinePools)) 1119 } 1120 for _, m := range clusterMPs { 1121 for _, topologyMP := range cluster.Spec.Topology.Workers.MachinePools { 1122 mp := m 1123 // use the ClusterTopologyMachinePoolLabel to get the specific machinePool to compare to. 1124 if topologyMP.Name != mp.GetLabels()[clusterv1.ClusterTopologyMachinePoolNameLabel] { 1125 continue 1126 } 1127 1128 // Check if the ClusterTopologyLabelName and ClusterTopologyOwnedLabel are set correctly. 1129 if err := assertClusterTopologyOwnedLabel(&mp); err != nil { 1130 return err 1131 } 1132 1133 if err := assertClusterNameLabel(&mp, cluster.Name); err != nil { 1134 return err 1135 } 1136 1137 // Check replicas and version for the MachinePool. 1138 if *mp.Spec.Replicas != *topologyMP.Replicas { 1139 return fmt.Errorf("replicas %v does not match expected %v", mp.Spec.Replicas, topologyMP.Replicas) 1140 } 1141 if *mp.Spec.Template.Spec.Version != cluster.Spec.Topology.Version { 1142 return fmt.Errorf("version %v does not match expected %v", *mp.Spec.Template.Spec.Version, cluster.Spec.Topology.Version) 1143 } 1144 1145 // Check if the InfrastructureReference exists. 1146 if err := referenceExistsWithCorrectKindAndAPIVersion(&mp.Spec.Template.Spec.InfrastructureRef, 1147 builder.TestInfrastructureMachinePoolKind, 1148 builder.InfrastructureGroupVersion); err != nil { 1149 return err 1150 } 1151 1152 // Check if the InfrastructureReference has the expected labels and annotations. 1153 if _, err := getAndAssertLabelsAndAnnotations(mp.Spec.Template.Spec.InfrastructureRef, cluster.Name); err != nil { 1154 return err 1155 } 1156 1157 // Check if the Bootstrap reference has the expected Kind and APIVersion. 1158 if err := referenceExistsWithCorrectKindAndAPIVersion(mp.Spec.Template.Spec.Bootstrap.ConfigRef, 1159 builder.TestBootstrapConfigKind, 1160 builder.BootstrapGroupVersion); err != nil { 1161 return err 1162 } 1163 1164 // Check if the Bootstrap reference has the expected labels and annotations. 1165 if _, err := getAndAssertLabelsAndAnnotations(*mp.Spec.Template.Spec.Bootstrap.ConfigRef, cluster.Name); err != nil { 1166 return err 1167 } 1168 } 1169 } 1170 return nil 1171 } 1172 1173 // getAndAssertLabelsAndAnnotations pulls the template referenced in the ObjectReference from the API server, checks for: 1174 // 1) The ClusterTopologyOwnedLabel. 1175 // 2) The correct ClusterNameLabel. 1176 // 3) The annotation stating where the template was cloned from. 1177 // The function returns the unstructured object and a bool indicating if it passed all tests. 1178 func getAndAssertLabelsAndAnnotations(template corev1.ObjectReference, clusterName string) (*unstructured.Unstructured, error) { 1179 got := &unstructured.Unstructured{} 1180 got.SetKind(template.Kind) 1181 got.SetAPIVersion(template.APIVersion) 1182 1183 if err := env.Get(ctx, client.ObjectKey{Name: template.Name, Namespace: template.Namespace}, got); err != nil { 1184 return nil, err 1185 } 1186 1187 if err := assertLabelsAndAnnotations(got, clusterName); err != nil { 1188 return nil, err 1189 } 1190 return got, nil 1191 } 1192 1193 // assertLabelsAndAnnotations runs the specific label checks required to assert that an unstructured object has been 1194 // correctly created by a clusterClass reconciliation. 1195 func assertLabelsAndAnnotations(got client.Object, clusterName string) error { 1196 if err := assertClusterTopologyOwnedLabel(got); err != nil { 1197 return err 1198 } 1199 if err := assertClusterNameLabel(got, clusterName); err != nil { 1200 return err 1201 } 1202 return assertTemplateClonedFromNameAnnotation(got) 1203 } 1204 1205 // assertClusterTopologyOwnedLabel asserts the label exists. 1206 func assertClusterTopologyOwnedLabel(got client.Object) error { 1207 _, ok := got.GetLabels()[clusterv1.ClusterTopologyOwnedLabel] 1208 if !ok { 1209 return fmt.Errorf("%v not found on %v: %v", clusterv1.ClusterTopologyOwnedLabel, got.GetObjectKind().GroupVersionKind().Kind, got.GetName()) 1210 } 1211 return nil 1212 } 1213 1214 // assertClusterNameLabel asserts the label exists and is set to the correct value. 1215 func assertClusterNameLabel(got client.Object, clusterName string) error { 1216 v, ok := got.GetLabels()[clusterv1.ClusterNameLabel] 1217 if !ok { 1218 return fmt.Errorf("%v not found in %v: %v", clusterv1.ClusterNameLabel, got.GetObjectKind().GroupVersionKind().Kind, got.GetName()) 1219 } 1220 if v != clusterName { 1221 return fmt.Errorf("%v %v does not match expected %v", clusterv1.ClusterNameLabel, v, clusterName) 1222 } 1223 return nil 1224 } 1225 1226 // assertTemplateClonedFromNameAnnotation asserts the annotation exists. This check does not assert that the template 1227 // named in the annotation is as expected. 1228 func assertTemplateClonedFromNameAnnotation(got client.Object) error { 1229 _, ok := got.GetAnnotations()[clusterv1.TemplateClonedFromNameAnnotation] 1230 if !ok { 1231 return fmt.Errorf("%v not found in %v; %v", clusterv1.TemplateClonedFromNameAnnotation, got.GetObjectKind().GroupVersionKind().Kind, got.GetName()) 1232 } 1233 return nil 1234 } 1235 1236 // referenceExistsWithCorrectKindAndAPIVersion asserts that the passed ObjectReference is not nil and that it has the correct kind and apiVersion. 1237 func referenceExistsWithCorrectKindAndAPIVersion(reference *corev1.ObjectReference, kind string, apiVersion schema.GroupVersion) error { 1238 if reference == nil { 1239 return fmt.Errorf("object reference passed was nil") 1240 } 1241 if reference.Kind != kind { 1242 return fmt.Errorf("object reference kind %v does not match expected %v", reference.Kind, kind) 1243 } 1244 if reference.APIVersion != apiVersion.String() { 1245 return fmt.Errorf("apiVersion %v does not match expected %v", reference.APIVersion, apiVersion.String()) 1246 } 1247 return nil 1248 } 1249 1250 func TestReconciler_DefaultCluster(t *testing.T) { 1251 g := NewWithT(t) 1252 classBuilder := builder.ClusterClass(metav1.NamespaceDefault, clusterClassName1) 1253 topologyBase := builder.ClusterTopology(). 1254 WithClass(clusterClassName1). 1255 WithVersion("1.22.2"). 1256 WithControlPlaneReplicas(3) 1257 mdClass1 := builder.MachineDeploymentClass("worker1"). 1258 Build() 1259 mdTopologyBase := builder.MachineDeploymentTopology("md1"). 1260 WithClass("worker1"). 1261 WithReplicas(3) 1262 mpClass1 := builder.MachinePoolClass("worker1"). 1263 Build() 1264 mpTopologyBase := builder.MachinePoolTopology("mp1"). 1265 WithClass("worker1"). 1266 WithReplicas(3) 1267 clusterBuilder := builder.Cluster(metav1.NamespaceDefault, clusterName1). 1268 WithTopology(topologyBase.DeepCopy().Build()) 1269 1270 tests := []struct { 1271 name string 1272 clusterClass *clusterv1.ClusterClass 1273 initialCluster *clusterv1.Cluster 1274 wantCluster *clusterv1.Cluster 1275 }{ 1276 { 1277 name: "Default Cluster variables with values from ClusterClass", 1278 clusterClass: classBuilder.DeepCopy(). 1279 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1280 Name: "location", 1281 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1282 { 1283 Required: true, 1284 From: clusterv1.VariableDefinitionFromInline, 1285 Schema: clusterv1.VariableSchema{ 1286 OpenAPIV3Schema: clusterv1.JSONSchemaProps{ 1287 Type: "string", 1288 Default: &apiextensionsv1.JSON{Raw: []byte(`"us-east"`)}, 1289 }, 1290 }, 1291 }, 1292 }, 1293 }). 1294 Build(), 1295 initialCluster: clusterBuilder.DeepCopy(). 1296 Build(), 1297 wantCluster: clusterBuilder.DeepCopy(). 1298 WithTopology(topologyBase.DeepCopy().WithVariables( 1299 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-east"`)}, DefinitionFrom: ""}). 1300 Build()). 1301 Build(), 1302 }, 1303 { 1304 name: "Do not default variable if a value is defined in the Cluster", 1305 clusterClass: classBuilder.DeepCopy(). 1306 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1307 Name: "location", 1308 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1309 { 1310 Required: true, 1311 From: clusterv1.VariableDefinitionFromInline, 1312 Schema: clusterv1.VariableSchema{ 1313 OpenAPIV3Schema: clusterv1.JSONSchemaProps{ 1314 Type: "string", 1315 Default: &apiextensionsv1.JSON{Raw: []byte(`"us-east"`)}, 1316 }, 1317 }, 1318 }, 1319 }, 1320 }). 1321 Build(), 1322 initialCluster: clusterBuilder.DeepCopy().WithTopology(topologyBase.DeepCopy().WithVariables( 1323 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}). 1324 Build()). 1325 Build(), 1326 wantCluster: clusterBuilder.DeepCopy().WithTopology(topologyBase.DeepCopy().WithVariables( 1327 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}). 1328 Build()). 1329 Build(), 1330 }, 1331 { 1332 name: "Default nested values of Cluster variables with values from ClusterClass", 1333 clusterClass: classBuilder.DeepCopy(). 1334 WithWorkerMachineDeploymentClasses(*mdClass1). 1335 WithWorkerMachinePoolClasses(*mpClass1). 1336 WithStatusVariables([]clusterv1.ClusterClassStatusVariable{ 1337 { 1338 Name: "location", 1339 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1340 { 1341 Required: true, 1342 From: clusterv1.VariableDefinitionFromInline, 1343 Schema: clusterv1.VariableSchema{ 1344 OpenAPIV3Schema: clusterv1.JSONSchemaProps{ 1345 Type: "string", 1346 Default: &apiextensionsv1.JSON{Raw: []byte(`"us-east"`)}, 1347 }, 1348 }, 1349 }, 1350 }, 1351 }, 1352 { 1353 Name: "httpProxy", 1354 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1355 { 1356 Required: true, 1357 From: clusterv1.VariableDefinitionFromInline, 1358 Schema: clusterv1.VariableSchema{ 1359 OpenAPIV3Schema: clusterv1.JSONSchemaProps{ 1360 Type: "object", 1361 Properties: map[string]clusterv1.JSONSchemaProps{ 1362 "enabled": { 1363 Type: "boolean", 1364 }, 1365 "url": { 1366 Type: "string", 1367 Default: &apiextensionsv1.JSON{Raw: []byte(`"http://localhost:3128"`)}, 1368 }, 1369 }, 1370 }, 1371 }, 1372 }, 1373 }, 1374 }}...). 1375 Build(), 1376 initialCluster: clusterBuilder.DeepCopy(). 1377 WithTopology(topologyBase.DeepCopy(). 1378 WithVariables( 1379 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}, 1380 clusterv1.ClusterVariable{Name: "httpProxy", Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true}`)}}). 1381 WithMachineDeployment(mdTopologyBase.DeepCopy(). 1382 WithVariables(clusterv1.ClusterVariable{ 1383 Name: "httpProxy", 1384 Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true}`)}, 1385 }).Build()). 1386 WithMachinePool(mpTopologyBase.DeepCopy(). 1387 WithVariables(clusterv1.ClusterVariable{ 1388 Name: "httpProxy", 1389 Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true}`)}, 1390 }).Build()). 1391 Build()). 1392 Build(), 1393 wantCluster: clusterBuilder.DeepCopy().WithTopology( 1394 topologyBase.DeepCopy(). 1395 WithVariables( 1396 clusterv1.ClusterVariable{Name: "location", Value: apiextensionsv1.JSON{Raw: []byte(`"us-west"`)}}, 1397 clusterv1.ClusterVariable{Name: "httpProxy", Value: apiextensionsv1.JSON{Raw: []byte(`{"enabled":true,"url":"http://localhost:3128"}`)}}). 1398 WithMachineDeployment( 1399 mdTopologyBase.DeepCopy().WithVariables( 1400 clusterv1.ClusterVariable{ 1401 Name: "httpProxy", 1402 Value: apiextensionsv1.JSON{ 1403 // url has been added by defaulting. 1404 Raw: []byte(`{"enabled":true,"url":"http://localhost:3128"}`), 1405 }, 1406 }). 1407 Build()). 1408 WithMachinePool( 1409 mpTopologyBase.DeepCopy().WithVariables( 1410 clusterv1.ClusterVariable{ 1411 Name: "httpProxy", 1412 Value: apiextensionsv1.JSON{ 1413 // url has been added by defaulting. 1414 Raw: []byte(`{"enabled":true,"url":"http://localhost:3128"}`), 1415 }, 1416 }). 1417 Build()). 1418 Build()). 1419 Build(), 1420 }, 1421 } 1422 for _, tt := range tests { 1423 t.Run(tt.name, func(t *testing.T) { 1424 initObjects := []client.Object{tt.initialCluster, tt.clusterClass} 1425 fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(initObjects...).Build() 1426 r := &Reconciler{ 1427 Client: fakeClient, 1428 APIReader: fakeClient, 1429 } 1430 // Ignore the error here as we expect the ClusterClass to fail in reconciliation as its references do not exist. 1431 var _, _ = r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKey{Name: tt.initialCluster.Name, Namespace: tt.initialCluster.Namespace}}) 1432 got := &clusterv1.Cluster{} 1433 g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: tt.initialCluster.Name, Namespace: tt.initialCluster.Namespace}, got)).To(Succeed()) 1434 // Compare the spec of the two clusters to ensure that variables are defaulted correctly. 1435 g.Expect(reflect.DeepEqual(got.Spec, tt.wantCluster.Spec)).To(BeTrue(), cmp.Diff(got.Spec, tt.wantCluster.Spec)) 1436 }) 1437 } 1438 } 1439 1440 func TestReconciler_ValidateCluster(t *testing.T) { 1441 g := NewWithT(t) 1442 mdTopologyBase := builder.MachineDeploymentTopology("md1"). 1443 WithClass("worker1"). 1444 WithReplicas(3) 1445 mpTopologyBase := builder.MachinePoolTopology("mp1"). 1446 WithClass("worker1"). 1447 WithReplicas(3) 1448 classBuilder := builder.ClusterClass(metav1.NamespaceDefault, clusterClassName1) 1449 topologyBase := builder.ClusterTopology(). 1450 WithClass(clusterClassName1). 1451 WithVersion("1.22.2"). 1452 WithControlPlaneReplicas(3) 1453 clusterBuilder := builder.Cluster(metav1.NamespaceDefault, clusterName1). 1454 WithTopology( 1455 topologyBase.Build()) 1456 tests := []struct { 1457 name string 1458 clusterClass *clusterv1.ClusterClass 1459 cluster *clusterv1.Cluster 1460 wantValidationErr bool 1461 }{ 1462 { 1463 name: "Valid cluster should not throw validation error", 1464 clusterClass: classBuilder.DeepCopy(). 1465 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1466 Name: "httpProxy", 1467 }). 1468 Build(), 1469 cluster: clusterBuilder.DeepCopy(). 1470 Build(), 1471 wantValidationErr: false, 1472 }, 1473 { 1474 name: "Cluster invalid as it does not define a required variable", 1475 clusterClass: classBuilder.DeepCopy(). 1476 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1477 Name: "httpProxy", 1478 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1479 { 1480 Required: true, 1481 From: clusterv1.VariableDefinitionFromInline, 1482 }, 1483 }, 1484 }). 1485 Build(), 1486 cluster: clusterBuilder. 1487 Build(), 1488 wantValidationErr: true, 1489 }, 1490 { 1491 name: "Cluster invalid as it defines an MDTopology without a corresponding MDClass", 1492 clusterClass: classBuilder.DeepCopy(). 1493 WithStatusVariables(clusterv1.ClusterClassStatusVariable{ 1494 Name: "httpProxy", 1495 Definitions: []clusterv1.ClusterClassStatusVariableDefinition{ 1496 { 1497 Required: true, 1498 From: clusterv1.VariableDefinitionFromInline, 1499 }, 1500 }, 1501 }). 1502 Build(), 1503 cluster: clusterBuilder.WithTopology( 1504 builder.ClusterTopology().DeepCopy(). 1505 WithClass(clusterClassName1). 1506 WithVersion("1.22.2"). 1507 WithControlPlaneReplicas(3). 1508 WithMachineDeployment(mdTopologyBase.Build()). 1509 WithMachinePool(mpTopologyBase.Build()).Build(), 1510 ). 1511 Build(), 1512 wantValidationErr: true, 1513 }, 1514 } 1515 for _, tt := range tests { 1516 t.Run(tt.name, func(t *testing.T) { 1517 initObjects := []client.Object{tt.cluster, tt.clusterClass} 1518 fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(initObjects...).Build() 1519 r := &Reconciler{ 1520 Client: fakeClient, 1521 APIReader: fakeClient, 1522 } 1523 var _, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: client.ObjectKey{Name: tt.cluster.Name, Namespace: tt.cluster.Namespace}}) 1524 // Reconcile will always return an error here as the topology is incomplete. This test checks specifically for 1525 // validation errors. 1526 validationErrMessage := fmt.Sprintf("Cluster.cluster.x-k8s.io %q is invalid:", tt.cluster.Name) 1527 if tt.wantValidationErr { 1528 g.Expect(err.Error()).To(ContainSubstring(validationErrMessage)) 1529 return 1530 } 1531 g.Expect(err.Error()).ToNot(ContainSubstring(validationErrMessage)) 1532 }) 1533 } 1534 }