sigs.k8s.io/cluster-api@v1.7.1/internal/controllers/topology/cluster/reconcile_state_test.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cluster
    18  
    19  import (
    20  	"fmt"
    21  	"net/http"
    22  	"regexp"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/google/go-cmp/cmp"
    27  	. "github.com/onsi/gomega"
    28  	"github.com/pkg/errors"
    29  	corev1 "k8s.io/api/core/v1"
    30  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    31  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    32  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    33  	"k8s.io/apimachinery/pkg/runtime"
    34  	"k8s.io/apimachinery/pkg/types"
    35  	"k8s.io/apimachinery/pkg/util/intstr"
    36  	"k8s.io/utils/ptr"
    37  	"sigs.k8s.io/controller-runtime/pkg/client"
    38  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    39  	. "sigs.k8s.io/controller-runtime/pkg/envtest/komega"
    40  	"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
    41  
    42  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    43  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    44  	runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
    45  	runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
    46  	runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
    47  	"sigs.k8s.io/cluster-api/exp/topology/desiredstate"
    48  	"sigs.k8s.io/cluster-api/exp/topology/scope"
    49  	"sigs.k8s.io/cluster-api/internal/contract"
    50  	"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
    51  	"sigs.k8s.io/cluster-api/internal/hooks"
    52  	fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
    53  	"sigs.k8s.io/cluster-api/internal/test/builder"
    54  	"sigs.k8s.io/cluster-api/internal/topology/clustershim"
    55  	"sigs.k8s.io/cluster-api/internal/topology/names"
    56  	"sigs.k8s.io/cluster-api/internal/topology/ownerrefs"
    57  	"sigs.k8s.io/cluster-api/internal/topology/selectors"
    58  	"sigs.k8s.io/cluster-api/internal/util/ssa"
    59  	"sigs.k8s.io/cluster-api/internal/webhooks"
    60  )
    61  
    62  var (
    63  	IgnoreNameGenerated = IgnorePaths{
    64  		"metadata.name",
    65  	}
    66  )
    67  
    68  func TestReconcileShim(t *testing.T) {
    69  	infrastructureCluster := builder.TestInfrastructureCluster(metav1.NamespaceDefault, "infrastructure-cluster1").Build()
    70  	controlPlane := builder.TestControlPlane(metav1.NamespaceDefault, "controlplane-cluster1").Build()
    71  	cluster := builder.Cluster(metav1.NamespaceDefault, "cluster1").Build()
    72  	// cluster requires a UID because reconcileClusterShim will create a cluster shim
    73  	// which has the cluster set as Owner in an OwnerReference.
    74  	// A valid OwnerReferences requires a uid.
    75  	cluster.SetUID("foo")
    76  
    77  	t.Run("Shim gets created when InfrastructureCluster and ControlPlane object have to be created", func(t *testing.T) {
    78  		g := NewWithT(t)
    79  
    80  		// Create namespace and modify input to have correct namespace set
    81  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
    82  		g.Expect(err).ToNot(HaveOccurred())
    83  		cluster1 := cluster.DeepCopy()
    84  		cluster1.SetNamespace(namespace.GetName())
    85  		cluster1Shim := clustershim.New(cluster1)
    86  
    87  		// Create a scope with a cluster and InfrastructureCluster yet to be created.
    88  		s := scope.New(cluster1)
    89  		s.Desired = &scope.ClusterState{
    90  			InfrastructureCluster: infrastructureCluster.DeepCopy(),
    91  			ControlPlane: &scope.ControlPlaneState{
    92  				Object: controlPlane.DeepCopy(),
    93  			},
    94  		}
    95  
    96  		// Run reconcileClusterShim.
    97  		r := Reconciler{
    98  			Client:             env,
    99  			APIReader:          env.GetAPIReader(),
   100  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
   101  		}
   102  		err = r.reconcileClusterShim(ctx, s)
   103  		g.Expect(err).ToNot(HaveOccurred())
   104  
   105  		// Check cluster shim exists.
   106  		shim := cluster1Shim.DeepCopy()
   107  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(shim), shim)
   108  		g.Expect(err).ToNot(HaveOccurred())
   109  
   110  		// Check shim is assigned as owner for InfrastructureCluster and ControlPlane objects.
   111  		g.Expect(s.Desired.InfrastructureCluster.GetOwnerReferences()).To(HaveLen(1))
   112  		g.Expect(s.Desired.InfrastructureCluster.GetOwnerReferences()[0].Name).To(Equal(shim.Name))
   113  		g.Expect(s.Desired.ControlPlane.Object.GetOwnerReferences()).To(HaveLen(1))
   114  		g.Expect(s.Desired.ControlPlane.Object.GetOwnerReferences()[0].Name).To(Equal(shim.Name))
   115  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   116  	})
   117  	t.Run("Shim creation is re-entrant", func(t *testing.T) {
   118  		g := NewWithT(t)
   119  
   120  		// Create namespace and modify input to have correct namespace set
   121  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
   122  		g.Expect(err).ToNot(HaveOccurred())
   123  		cluster1 := cluster.DeepCopy()
   124  		cluster1.SetNamespace(namespace.GetName())
   125  		cluster1Shim := clustershim.New(cluster1)
   126  
   127  		// Create a scope with a cluster and InfrastructureCluster yet to be created.
   128  		s := scope.New(cluster1)
   129  		s.Desired = &scope.ClusterState{
   130  			InfrastructureCluster: infrastructureCluster.DeepCopy(),
   131  			ControlPlane: &scope.ControlPlaneState{
   132  				Object: controlPlane.DeepCopy(),
   133  			},
   134  		}
   135  
   136  		// Pre-create a shim
   137  		g.Expect(env.CreateAndWait(ctx, cluster1Shim.DeepCopy())).ToNot(HaveOccurred())
   138  
   139  		// Run reconcileClusterShim.
   140  		r := Reconciler{
   141  			Client:             env,
   142  			APIReader:          env.GetAPIReader(),
   143  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
   144  		}
   145  		err = r.reconcileClusterShim(ctx, s)
   146  		g.Expect(err).ToNot(HaveOccurred())
   147  
   148  		// Check cluster shim exists.
   149  		shim := cluster1Shim.DeepCopy()
   150  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(shim), shim)
   151  		g.Expect(err).ToNot(HaveOccurred())
   152  
   153  		// Check shim is assigned as owner for InfrastructureCluster and ControlPlane objects.
   154  		g.Expect(s.Desired.InfrastructureCluster.GetOwnerReferences()).To(HaveLen(1))
   155  		g.Expect(s.Desired.InfrastructureCluster.GetOwnerReferences()[0].Name).To(Equal(shim.Name))
   156  		g.Expect(s.Desired.ControlPlane.Object.GetOwnerReferences()).To(HaveLen(1))
   157  		g.Expect(s.Desired.ControlPlane.Object.GetOwnerReferences()[0].Name).To(Equal(shim.Name))
   158  
   159  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   160  	})
   161  	t.Run("Shim is not deleted if InfrastructureCluster and ControlPlane object are waiting to be reconciled", func(t *testing.T) {
   162  		g := NewWithT(t)
   163  
   164  		// Create namespace and modify input to have correct namespace set
   165  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
   166  		g.Expect(err).ToNot(HaveOccurred())
   167  		cluster1 := cluster.DeepCopy()
   168  		cluster1.SetNamespace(namespace.GetName())
   169  		cluster1Shim := clustershim.New(cluster1)
   170  
   171  		// Create a scope with a cluster and InfrastructureCluster created but not yet reconciled.
   172  		s := scope.New(cluster1)
   173  		s.Current.InfrastructureCluster = infrastructureCluster.DeepCopy()
   174  		s.Current.ControlPlane = &scope.ControlPlaneState{
   175  			Object: controlPlane.DeepCopy(),
   176  		}
   177  
   178  		// Add the shim as a temporary owner for the InfrastructureCluster and ControlPlane.
   179  		ownerRefs := s.Current.InfrastructureCluster.GetOwnerReferences()
   180  		ownerRefs = append(ownerRefs, *ownerrefs.OwnerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret")))
   181  		s.Current.InfrastructureCluster.SetOwnerReferences(ownerRefs)
   182  		ownerRefs = s.Current.ControlPlane.Object.GetOwnerReferences()
   183  		ownerRefs = append(ownerRefs, *ownerrefs.OwnerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret")))
   184  		s.Current.ControlPlane.Object.SetOwnerReferences(ownerRefs)
   185  
   186  		// Pre-create a shim
   187  		g.Expect(env.CreateAndWait(ctx, cluster1Shim.DeepCopy())).ToNot(HaveOccurred())
   188  
   189  		// Run reconcileClusterShim.
   190  		r := Reconciler{
   191  			Client:             env,
   192  			APIReader:          env.GetAPIReader(),
   193  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
   194  		}
   195  		err = r.reconcileClusterShim(ctx, s)
   196  		g.Expect(err).ToNot(HaveOccurred())
   197  
   198  		// Check cluster shim exists.
   199  		shim := cluster1Shim.DeepCopy()
   200  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(shim), shim)
   201  		g.Expect(err).ToNot(HaveOccurred())
   202  
   203  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   204  	})
   205  	t.Run("Shim gets deleted when InfrastructureCluster and ControlPlane object have been reconciled", func(t *testing.T) {
   206  		g := NewWithT(t)
   207  
   208  		// Create namespace and modify input to have correct namespace set
   209  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
   210  		g.Expect(err).ToNot(HaveOccurred())
   211  		cluster1 := cluster.DeepCopy()
   212  		cluster1.SetNamespace(namespace.GetName())
   213  		cluster1Shim := clustershim.New(cluster1)
   214  
   215  		// Create a scope with a cluster and InfrastructureCluster created and reconciled.
   216  		s := scope.New(cluster1)
   217  		s.Current.InfrastructureCluster = infrastructureCluster.DeepCopy()
   218  		s.Current.ControlPlane = &scope.ControlPlaneState{
   219  			Object: controlPlane.DeepCopy(),
   220  		}
   221  
   222  		// Add the shim as a temporary owner for the InfrastructureCluster and ControlPlane.
   223  		// Add the cluster as a final owner for the InfrastructureCluster and ControlPlane (reconciled).
   224  		ownerRefs := s.Current.InfrastructureCluster.GetOwnerReferences()
   225  		ownerRefs = append(
   226  			ownerRefs,
   227  			*ownerrefs.OwnerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret")),
   228  			*ownerrefs.OwnerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster")))
   229  		s.Current.InfrastructureCluster.SetOwnerReferences(ownerRefs)
   230  		ownerRefs = s.Current.ControlPlane.Object.GetOwnerReferences()
   231  		ownerRefs = append(
   232  			ownerRefs,
   233  			*ownerrefs.OwnerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret")),
   234  			*ownerrefs.OwnerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster")))
   235  		s.Current.ControlPlane.Object.SetOwnerReferences(ownerRefs)
   236  
   237  		// Pre-create a shim
   238  		g.Expect(env.CreateAndWait(ctx, cluster1Shim.DeepCopy())).ToNot(HaveOccurred())
   239  
   240  		// Run reconcileClusterShim.
   241  		r := Reconciler{
   242  			Client:             env,
   243  			APIReader:          env.GetAPIReader(),
   244  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
   245  		}
   246  		err = r.reconcileClusterShim(ctx, s)
   247  		g.Expect(err).ToNot(HaveOccurred())
   248  
   249  		// Check cluster shim exists.
   250  		shim := cluster1Shim.DeepCopy()
   251  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(shim), shim)
   252  		g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
   253  
   254  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   255  	})
   256  	t.Run("No op if InfrastructureCluster and ControlPlane object have been reconciled and shim is gone", func(t *testing.T) {
   257  		g := NewWithT(t)
   258  
   259  		// Create namespace and modify input to have correct namespace set
   260  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
   261  		g.Expect(err).ToNot(HaveOccurred())
   262  		cluster1 := cluster.DeepCopy()
   263  		cluster1.SetNamespace(namespace.GetName())
   264  		cluster1Shim := clustershim.New(cluster1)
   265  
   266  		// Create a scope with a cluster and InfrastructureCluster created and reconciled.
   267  		s := scope.New(cluster1)
   268  		s.Current.InfrastructureCluster = infrastructureCluster.DeepCopy()
   269  		s.Current.ControlPlane = &scope.ControlPlaneState{
   270  			Object: controlPlane.DeepCopy(),
   271  		}
   272  
   273  		// Add the cluster as a final owner for the InfrastructureCluster and ControlPlane (reconciled).
   274  		ownerRefs := s.Current.InfrastructureCluster.GetOwnerReferences()
   275  		ownerRefs = append(ownerRefs, *ownerrefs.OwnerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster")))
   276  		s.Current.InfrastructureCluster.SetOwnerReferences(ownerRefs)
   277  		ownerRefs = s.Current.ControlPlane.Object.GetOwnerReferences()
   278  		ownerRefs = append(ownerRefs, *ownerrefs.OwnerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster")))
   279  		s.Current.ControlPlane.Object.SetOwnerReferences(ownerRefs)
   280  
   281  		// Run reconcileClusterShim using a nil client, so an error will be triggered if any operation is attempted
   282  		r := Reconciler{
   283  			Client:             nil,
   284  			APIReader:          env.GetAPIReader(),
   285  			patchHelperFactory: serverSideApplyPatchHelperFactory(nil, ssa.NewCache()),
   286  		}
   287  		err = r.reconcileClusterShim(ctx, s)
   288  		g.Expect(err).ToNot(HaveOccurred())
   289  
   290  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   291  	})
   292  }
   293  
   294  func TestReconcile_callAfterControlPlaneInitialized(t *testing.T) {
   295  	catalog := runtimecatalog.New()
   296  	_ = runtimehooksv1.AddToCatalog(catalog)
   297  
   298  	afterControlPlaneInitializedGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterControlPlaneInitialized)
   299  	if err != nil {
   300  		panic(err)
   301  	}
   302  
   303  	successResponse := &runtimehooksv1.AfterControlPlaneInitializedResponse{
   304  
   305  		CommonResponse: runtimehooksv1.CommonResponse{
   306  			Status: runtimehooksv1.ResponseStatusSuccess,
   307  		},
   308  	}
   309  	failureResponse := &runtimehooksv1.AfterControlPlaneInitializedResponse{
   310  		CommonResponse: runtimehooksv1.CommonResponse{
   311  			Status: runtimehooksv1.ResponseStatusFailure,
   312  		},
   313  	}
   314  
   315  	tests := []struct {
   316  		name               string
   317  		cluster            *clusterv1.Cluster
   318  		hookResponse       *runtimehooksv1.AfterControlPlaneInitializedResponse
   319  		wantMarked         bool
   320  		wantHookToBeCalled bool
   321  		wantError          bool
   322  	}{
   323  		{
   324  			name: "hook should be marked if the cluster is about to be created",
   325  			cluster: &clusterv1.Cluster{
   326  				ObjectMeta: metav1.ObjectMeta{
   327  					Name:      "test-cluster",
   328  					Namespace: "test-ns",
   329  				},
   330  				Spec: clusterv1.ClusterSpec{},
   331  			},
   332  			hookResponse:       successResponse,
   333  			wantMarked:         true,
   334  			wantHookToBeCalled: false,
   335  			wantError:          false,
   336  		},
   337  		{
   338  			name: "hook should be called if it is marked and the control plane is ready - the hook should become unmarked for a success response",
   339  			cluster: &clusterv1.Cluster{
   340  				ObjectMeta: metav1.ObjectMeta{
   341  					Name:      "test-cluster",
   342  					Namespace: "test-ns",
   343  					Annotations: map[string]string{
   344  						runtimev1.PendingHooksAnnotation: "AfterControlPlaneInitialized",
   345  					},
   346  				},
   347  				Spec: clusterv1.ClusterSpec{
   348  					ControlPlaneRef:   &corev1.ObjectReference{},
   349  					InfrastructureRef: &corev1.ObjectReference{},
   350  				},
   351  				Status: clusterv1.ClusterStatus{
   352  					Conditions: clusterv1.Conditions{
   353  						clusterv1.Condition{
   354  							Type:   clusterv1.ControlPlaneInitializedCondition,
   355  							Status: corev1.ConditionTrue,
   356  						},
   357  					},
   358  				},
   359  			},
   360  			hookResponse:       successResponse,
   361  			wantMarked:         false,
   362  			wantHookToBeCalled: true,
   363  			wantError:          false,
   364  		},
   365  		{
   366  			name: "hook should be called if it is marked and the control plane is ready - the hook should remain marked for a failure response",
   367  			cluster: &clusterv1.Cluster{
   368  				ObjectMeta: metav1.ObjectMeta{
   369  					Name:      "test-cluster",
   370  					Namespace: "test-ns",
   371  					Annotations: map[string]string{
   372  						runtimev1.PendingHooksAnnotation: "AfterControlPlaneInitialized",
   373  					},
   374  				},
   375  				Spec: clusterv1.ClusterSpec{
   376  					ControlPlaneRef:   &corev1.ObjectReference{},
   377  					InfrastructureRef: &corev1.ObjectReference{},
   378  				},
   379  				Status: clusterv1.ClusterStatus{
   380  					Conditions: clusterv1.Conditions{
   381  						clusterv1.Condition{
   382  							Type:   clusterv1.ControlPlaneInitializedCondition,
   383  							Status: corev1.ConditionTrue,
   384  						},
   385  					},
   386  				},
   387  			},
   388  			hookResponse:       failureResponse,
   389  			wantMarked:         true,
   390  			wantHookToBeCalled: true,
   391  			wantError:          true,
   392  		},
   393  		{
   394  			name: "hook should not be called if it is marked and the control plane is not ready - the hook should remain marked",
   395  			cluster: &clusterv1.Cluster{
   396  				ObjectMeta: metav1.ObjectMeta{
   397  					Name:      "test-cluster",
   398  					Namespace: "test-ns",
   399  					Annotations: map[string]string{
   400  						runtimev1.PendingHooksAnnotation: "AfterControlPlaneInitialized",
   401  					},
   402  				},
   403  				Spec: clusterv1.ClusterSpec{
   404  					ControlPlaneRef:   &corev1.ObjectReference{},
   405  					InfrastructureRef: &corev1.ObjectReference{},
   406  				},
   407  				Status: clusterv1.ClusterStatus{
   408  					Conditions: clusterv1.Conditions{
   409  						clusterv1.Condition{
   410  							Type:   clusterv1.ControlPlaneInitializedCondition,
   411  							Status: corev1.ConditionFalse,
   412  						},
   413  					},
   414  				},
   415  			},
   416  			hookResponse:       failureResponse,
   417  			wantMarked:         true,
   418  			wantHookToBeCalled: false,
   419  			wantError:          false,
   420  		},
   421  		{
   422  			name: "hook should not be called if it is not marked",
   423  			cluster: &clusterv1.Cluster{
   424  				ObjectMeta: metav1.ObjectMeta{
   425  					Name:      "test-cluster",
   426  					Namespace: "test-ns",
   427  				},
   428  				Spec: clusterv1.ClusterSpec{
   429  					ControlPlaneRef:   &corev1.ObjectReference{},
   430  					InfrastructureRef: &corev1.ObjectReference{},
   431  				},
   432  				Status: clusterv1.ClusterStatus{
   433  					Conditions: clusterv1.Conditions{
   434  						clusterv1.Condition{
   435  							Type:   clusterv1.ControlPlaneInitializedCondition,
   436  							Status: corev1.ConditionTrue,
   437  						},
   438  					},
   439  				},
   440  			},
   441  			hookResponse:       failureResponse,
   442  			wantMarked:         false,
   443  			wantHookToBeCalled: false,
   444  			wantError:          false,
   445  		},
   446  	}
   447  
   448  	for _, tt := range tests {
   449  		t.Run(tt.name, func(t *testing.T) {
   450  			g := NewWithT(t)
   451  
   452  			s := &scope.Scope{
   453  				Current: &scope.ClusterState{
   454  					Cluster: tt.cluster,
   455  				},
   456  				HookResponseTracker: scope.NewHookResponseTracker(),
   457  			}
   458  
   459  			fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
   460  				WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
   461  					afterControlPlaneInitializedGVH: tt.hookResponse,
   462  				}).
   463  				WithCatalog(catalog).
   464  				Build()
   465  
   466  			fakeClient := fake.NewClientBuilder().WithObjects(tt.cluster).Build()
   467  
   468  			r := &Reconciler{
   469  				Client:        fakeClient,
   470  				APIReader:     fakeClient,
   471  				RuntimeClient: fakeRuntimeClient,
   472  			}
   473  
   474  			err := r.callAfterControlPlaneInitialized(ctx, s)
   475  			g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.AfterControlPlaneInitialized) == 1).To(Equal(tt.wantHookToBeCalled))
   476  			g.Expect(hooks.IsPending(runtimehooksv1.AfterControlPlaneInitialized, tt.cluster)).To(Equal(tt.wantMarked))
   477  			g.Expect(err != nil).To(Equal(tt.wantError))
   478  		})
   479  	}
   480  }
   481  
   482  func TestReconcile_callAfterClusterUpgrade(t *testing.T) {
   483  	catalog := runtimecatalog.New()
   484  	_ = runtimehooksv1.AddToCatalog(catalog)
   485  
   486  	afterClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterClusterUpgrade)
   487  	if err != nil {
   488  		panic(err)
   489  	}
   490  
   491  	successResponse := &runtimehooksv1.AfterClusterUpgradeResponse{
   492  
   493  		CommonResponse: runtimehooksv1.CommonResponse{
   494  			Status: runtimehooksv1.ResponseStatusSuccess,
   495  		},
   496  	}
   497  	failureResponse := &runtimehooksv1.AfterClusterUpgradeResponse{
   498  		CommonResponse: runtimehooksv1.CommonResponse{
   499  			Status: runtimehooksv1.ResponseStatusFailure,
   500  		},
   501  	}
   502  
   503  	topologyVersion := "v1.2.3"
   504  	controlPlaneObj := builder.ControlPlane("test1", "cp1").
   505  		Build()
   506  
   507  	tests := []struct {
   508  		name               string
   509  		s                  *scope.Scope
   510  		hookResponse       *runtimehooksv1.AfterClusterUpgradeResponse
   511  		wantMarked         bool
   512  		wantHookToBeCalled bool
   513  		wantError          bool
   514  	}{
   515  		{
   516  			name: "hook should not be called if it is not marked",
   517  			s: &scope.Scope{
   518  				Blueprint: &scope.ClusterBlueprint{
   519  					Topology: &clusterv1.Topology{
   520  						ControlPlane: clusterv1.ControlPlaneTopology{
   521  							Replicas: ptr.To[int32](2),
   522  						},
   523  					},
   524  				},
   525  				Current: &scope.ClusterState{
   526  					Cluster: &clusterv1.Cluster{
   527  						ObjectMeta: metav1.ObjectMeta{
   528  							Name:      "test-cluster",
   529  							Namespace: "test-ns",
   530  						},
   531  						Spec: clusterv1.ClusterSpec{},
   532  					},
   533  					ControlPlane: &scope.ControlPlaneState{
   534  						Object: controlPlaneObj,
   535  					},
   536  				},
   537  				HookResponseTracker: scope.NewHookResponseTracker(),
   538  				UpgradeTracker:      scope.NewUpgradeTracker(),
   539  			},
   540  			wantMarked:         false,
   541  			hookResponse:       successResponse,
   542  			wantHookToBeCalled: false,
   543  			wantError:          false,
   544  		},
   545  		{
   546  			name: "hook should not be called if the control plane is starting a new upgrade - hook is marked",
   547  			s: &scope.Scope{
   548  				Blueprint: &scope.ClusterBlueprint{
   549  					Topology: &clusterv1.Topology{
   550  						ControlPlane: clusterv1.ControlPlaneTopology{
   551  							Replicas: ptr.To[int32](2),
   552  						},
   553  					},
   554  				},
   555  				Current: &scope.ClusterState{
   556  					Cluster: &clusterv1.Cluster{
   557  						ObjectMeta: metav1.ObjectMeta{
   558  							Name:      "test-cluster",
   559  							Namespace: "test-ns",
   560  							Annotations: map[string]string{
   561  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   562  							},
   563  						},
   564  						Spec: clusterv1.ClusterSpec{},
   565  					},
   566  					ControlPlane: &scope.ControlPlaneState{
   567  						Object: controlPlaneObj,
   568  					},
   569  				},
   570  				HookResponseTracker: scope.NewHookResponseTracker(),
   571  				UpgradeTracker: func() *scope.UpgradeTracker {
   572  					ut := scope.NewUpgradeTracker()
   573  					ut.ControlPlane.IsStartingUpgrade = true
   574  					return ut
   575  				}(),
   576  			},
   577  			wantMarked:         true,
   578  			hookResponse:       successResponse,
   579  			wantHookToBeCalled: false,
   580  			wantError:          false,
   581  		},
   582  		{
   583  			name: "hook should not be called if the control plane is upgrading - hook is marked",
   584  			s: &scope.Scope{
   585  				Blueprint: &scope.ClusterBlueprint{
   586  					Topology: &clusterv1.Topology{
   587  						ControlPlane: clusterv1.ControlPlaneTopology{
   588  							Replicas: ptr.To[int32](2),
   589  						},
   590  					},
   591  				},
   592  				Current: &scope.ClusterState{
   593  					Cluster: &clusterv1.Cluster{
   594  						ObjectMeta: metav1.ObjectMeta{
   595  							Name:      "test-cluster",
   596  							Namespace: "test-ns",
   597  							Annotations: map[string]string{
   598  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   599  							},
   600  						},
   601  						Spec: clusterv1.ClusterSpec{},
   602  					},
   603  					ControlPlane: &scope.ControlPlaneState{
   604  						Object: controlPlaneObj,
   605  					},
   606  				},
   607  				HookResponseTracker: scope.NewHookResponseTracker(),
   608  				UpgradeTracker: func() *scope.UpgradeTracker {
   609  					ut := scope.NewUpgradeTracker()
   610  					ut.ControlPlane.IsUpgrading = true
   611  					return ut
   612  				}(),
   613  			},
   614  			wantMarked:         true,
   615  			hookResponse:       successResponse,
   616  			wantHookToBeCalled: false,
   617  			wantError:          false,
   618  		},
   619  		{
   620  			name: "hook should not be called if the control plane is scaling - hook is marked",
   621  			s: &scope.Scope{
   622  				Blueprint: &scope.ClusterBlueprint{
   623  					Topology: &clusterv1.Topology{
   624  						ControlPlane: clusterv1.ControlPlaneTopology{
   625  							Replicas: ptr.To[int32](2),
   626  						},
   627  					},
   628  				},
   629  				Current: &scope.ClusterState{
   630  					Cluster: &clusterv1.Cluster{
   631  						ObjectMeta: metav1.ObjectMeta{
   632  							Name:      "test-cluster",
   633  							Namespace: "test-ns",
   634  							Annotations: map[string]string{
   635  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   636  							},
   637  						},
   638  						Spec: clusterv1.ClusterSpec{},
   639  					},
   640  					ControlPlane: &scope.ControlPlaneState{
   641  						Object: controlPlaneObj,
   642  					},
   643  				},
   644  				HookResponseTracker: scope.NewHookResponseTracker(),
   645  				UpgradeTracker: func() *scope.UpgradeTracker {
   646  					ut := scope.NewUpgradeTracker()
   647  					ut.ControlPlane.IsScaling = true
   648  					return ut
   649  				}(),
   650  			},
   651  			wantMarked:         true,
   652  			hookResponse:       successResponse,
   653  			wantHookToBeCalled: false,
   654  			wantError:          false,
   655  		},
   656  		{
   657  			name: "hook should not be called if the control plane is pending an upgrade - hook is marked",
   658  			s: &scope.Scope{
   659  				Blueprint: &scope.ClusterBlueprint{
   660  					Topology: &clusterv1.Topology{
   661  						ControlPlane: clusterv1.ControlPlaneTopology{
   662  							Replicas: ptr.To[int32](2),
   663  						},
   664  					},
   665  				},
   666  				Current: &scope.ClusterState{
   667  					Cluster: &clusterv1.Cluster{
   668  						ObjectMeta: metav1.ObjectMeta{
   669  							Name:      "test-cluster",
   670  							Namespace: "test-ns",
   671  							Annotations: map[string]string{
   672  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   673  							},
   674  						},
   675  						Spec: clusterv1.ClusterSpec{},
   676  					},
   677  					ControlPlane: &scope.ControlPlaneState{
   678  						Object: controlPlaneObj,
   679  					},
   680  				},
   681  				HookResponseTracker: scope.NewHookResponseTracker(),
   682  				UpgradeTracker: func() *scope.UpgradeTracker {
   683  					ut := scope.NewUpgradeTracker()
   684  					ut.ControlPlane.IsPendingUpgrade = true
   685  					return ut
   686  				}(),
   687  			},
   688  			wantMarked:         true,
   689  			hookResponse:       successResponse,
   690  			wantHookToBeCalled: false,
   691  			wantError:          false,
   692  		},
   693  		{
   694  			name: "hook should not be called if the control plane is stable at desired version but MDs are upgrading - hook is marked",
   695  			s: &scope.Scope{
   696  				Blueprint: &scope.ClusterBlueprint{
   697  					Topology: &clusterv1.Topology{
   698  						ControlPlane: clusterv1.ControlPlaneTopology{
   699  							Replicas: ptr.To[int32](2),
   700  						},
   701  					},
   702  				},
   703  				Current: &scope.ClusterState{
   704  					Cluster: &clusterv1.Cluster{
   705  						ObjectMeta: metav1.ObjectMeta{
   706  							Name:      "test-cluster",
   707  							Namespace: "test-ns",
   708  							Annotations: map[string]string{
   709  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   710  							},
   711  						},
   712  						Spec: clusterv1.ClusterSpec{},
   713  					},
   714  					ControlPlane: &scope.ControlPlaneState{
   715  						Object: controlPlaneObj,
   716  					},
   717  				},
   718  				HookResponseTracker: scope.NewHookResponseTracker(),
   719  				UpgradeTracker: func() *scope.UpgradeTracker {
   720  					ut := scope.NewUpgradeTracker()
   721  					ut.ControlPlane.IsPendingUpgrade = false
   722  					ut.MachineDeployments.MarkUpgrading("md1")
   723  					return ut
   724  				}(),
   725  			},
   726  			wantMarked:         true,
   727  			hookResponse:       successResponse,
   728  			wantHookToBeCalled: false,
   729  			wantError:          false,
   730  		},
   731  		{
   732  			name: "hook should not be called if the control plane is stable at desired version but MPs are upgrading - hook is marked",
   733  			s: &scope.Scope{
   734  				Blueprint: &scope.ClusterBlueprint{
   735  					Topology: &clusterv1.Topology{
   736  						ControlPlane: clusterv1.ControlPlaneTopology{
   737  							Replicas: ptr.To[int32](2),
   738  						},
   739  					},
   740  				},
   741  				Current: &scope.ClusterState{
   742  					Cluster: &clusterv1.Cluster{
   743  						ObjectMeta: metav1.ObjectMeta{
   744  							Name:      "test-cluster",
   745  							Namespace: "test-ns",
   746  							Annotations: map[string]string{
   747  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   748  							},
   749  						},
   750  						Spec: clusterv1.ClusterSpec{},
   751  					},
   752  					ControlPlane: &scope.ControlPlaneState{
   753  						Object: controlPlaneObj,
   754  					},
   755  				},
   756  				HookResponseTracker: scope.NewHookResponseTracker(),
   757  				UpgradeTracker: func() *scope.UpgradeTracker {
   758  					ut := scope.NewUpgradeTracker()
   759  					ut.ControlPlane.IsPendingUpgrade = false
   760  					ut.MachinePools.MarkUpgrading("mp1")
   761  					return ut
   762  				}(),
   763  			},
   764  			wantMarked:         true,
   765  			hookResponse:       successResponse,
   766  			wantHookToBeCalled: false,
   767  			wantError:          false,
   768  		},
   769  		{
   770  			name: "hook should not be called if the control plane is stable at desired version but MDs are pending create - hook is marked",
   771  			s: &scope.Scope{
   772  				Blueprint: &scope.ClusterBlueprint{
   773  					Topology: &clusterv1.Topology{
   774  						ControlPlane: clusterv1.ControlPlaneTopology{
   775  							Replicas: ptr.To[int32](2),
   776  						},
   777  					},
   778  				},
   779  				Current: &scope.ClusterState{
   780  					Cluster: &clusterv1.Cluster{
   781  						ObjectMeta: metav1.ObjectMeta{
   782  							Name:      "test-cluster",
   783  							Namespace: "test-ns",
   784  							Annotations: map[string]string{
   785  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   786  							},
   787  						},
   788  						Spec: clusterv1.ClusterSpec{},
   789  					},
   790  					ControlPlane: &scope.ControlPlaneState{
   791  						Object: controlPlaneObj,
   792  					}},
   793  				HookResponseTracker: scope.NewHookResponseTracker(),
   794  				UpgradeTracker: func() *scope.UpgradeTracker {
   795  					ut := scope.NewUpgradeTracker()
   796  					ut.ControlPlane.IsPendingUpgrade = false
   797  					ut.MachineDeployments.MarkPendingCreate("md-topology-1")
   798  					return ut
   799  				}(),
   800  			},
   801  			wantMarked:         true,
   802  			hookResponse:       successResponse,
   803  			wantHookToBeCalled: false,
   804  			wantError:          false,
   805  		},
   806  		{
   807  			name: "hook should not be called if the control plane is stable at desired version but MPs are pending create - hook is marked",
   808  			s: &scope.Scope{
   809  				Blueprint: &scope.ClusterBlueprint{
   810  					Topology: &clusterv1.Topology{
   811  						ControlPlane: clusterv1.ControlPlaneTopology{
   812  							Replicas: ptr.To[int32](2),
   813  						},
   814  					},
   815  				},
   816  				Current: &scope.ClusterState{
   817  					Cluster: &clusterv1.Cluster{
   818  						ObjectMeta: metav1.ObjectMeta{
   819  							Name:      "test-cluster",
   820  							Namespace: "test-ns",
   821  							Annotations: map[string]string{
   822  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   823  							},
   824  						},
   825  						Spec: clusterv1.ClusterSpec{},
   826  					},
   827  					ControlPlane: &scope.ControlPlaneState{
   828  						Object: controlPlaneObj,
   829  					}},
   830  				HookResponseTracker: scope.NewHookResponseTracker(),
   831  				UpgradeTracker: func() *scope.UpgradeTracker {
   832  					ut := scope.NewUpgradeTracker()
   833  					ut.ControlPlane.IsPendingUpgrade = false
   834  					ut.MachinePools.MarkPendingCreate("mp-topology-1")
   835  					return ut
   836  				}(),
   837  			},
   838  			wantMarked:         true,
   839  			hookResponse:       successResponse,
   840  			wantHookToBeCalled: false,
   841  			wantError:          false,
   842  		},
   843  		{
   844  			name: "hook should not be called if the control plane is stable at desired version but MDs are pending upgrade - hook is marked",
   845  			s: &scope.Scope{
   846  				Blueprint: &scope.ClusterBlueprint{
   847  					Topology: &clusterv1.Topology{
   848  						ControlPlane: clusterv1.ControlPlaneTopology{
   849  							Replicas: ptr.To[int32](2),
   850  						},
   851  					},
   852  				},
   853  				Current: &scope.ClusterState{
   854  					Cluster: &clusterv1.Cluster{
   855  						ObjectMeta: metav1.ObjectMeta{
   856  							Name:      "test-cluster",
   857  							Namespace: "test-ns",
   858  							Annotations: map[string]string{
   859  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   860  							},
   861  						},
   862  						Spec: clusterv1.ClusterSpec{},
   863  					},
   864  					ControlPlane: &scope.ControlPlaneState{
   865  						Object: controlPlaneObj,
   866  					}},
   867  				HookResponseTracker: scope.NewHookResponseTracker(),
   868  				UpgradeTracker: func() *scope.UpgradeTracker {
   869  					ut := scope.NewUpgradeTracker()
   870  					ut.ControlPlane.IsPendingUpgrade = false
   871  					ut.MachineDeployments.MarkPendingUpgrade("md1")
   872  					return ut
   873  				}(),
   874  			},
   875  			wantMarked:         true,
   876  			hookResponse:       successResponse,
   877  			wantHookToBeCalled: false,
   878  			wantError:          false,
   879  		},
   880  		{
   881  			name: "hook should not be called if the control plane is stable at desired version but MPs are pending upgrade - hook is marked",
   882  			s: &scope.Scope{
   883  				Blueprint: &scope.ClusterBlueprint{
   884  					Topology: &clusterv1.Topology{
   885  						ControlPlane: clusterv1.ControlPlaneTopology{
   886  							Replicas: ptr.To[int32](2),
   887  						},
   888  					},
   889  				},
   890  				Current: &scope.ClusterState{
   891  					Cluster: &clusterv1.Cluster{
   892  						ObjectMeta: metav1.ObjectMeta{
   893  							Name:      "test-cluster",
   894  							Namespace: "test-ns",
   895  							Annotations: map[string]string{
   896  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   897  							},
   898  						},
   899  						Spec: clusterv1.ClusterSpec{},
   900  					},
   901  					ControlPlane: &scope.ControlPlaneState{
   902  						Object: controlPlaneObj,
   903  					}},
   904  				HookResponseTracker: scope.NewHookResponseTracker(),
   905  				UpgradeTracker: func() *scope.UpgradeTracker {
   906  					ut := scope.NewUpgradeTracker()
   907  					ut.ControlPlane.IsPendingUpgrade = false
   908  					ut.MachinePools.MarkPendingUpgrade("mp1")
   909  					return ut
   910  				}(),
   911  			},
   912  			wantMarked:         true,
   913  			hookResponse:       successResponse,
   914  			wantHookToBeCalled: false,
   915  			wantError:          false,
   916  		},
   917  		{
   918  			name: "hook should not be called if the control plane is stable at desired version but MDs upgrade is deferred - hook is marked",
   919  			s: &scope.Scope{
   920  				Blueprint: &scope.ClusterBlueprint{
   921  					Topology: &clusterv1.Topology{
   922  						ControlPlane: clusterv1.ControlPlaneTopology{
   923  							Replicas: ptr.To[int32](2),
   924  						},
   925  					},
   926  				},
   927  				Current: &scope.ClusterState{
   928  					Cluster: &clusterv1.Cluster{
   929  						ObjectMeta: metav1.ObjectMeta{
   930  							Name:      "test-cluster",
   931  							Namespace: "test-ns",
   932  							Annotations: map[string]string{
   933  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   934  							},
   935  						},
   936  						Spec: clusterv1.ClusterSpec{},
   937  					},
   938  					ControlPlane: &scope.ControlPlaneState{
   939  						Object: controlPlaneObj,
   940  					},
   941  				},
   942  				HookResponseTracker: scope.NewHookResponseTracker(),
   943  				UpgradeTracker: func() *scope.UpgradeTracker {
   944  					ut := scope.NewUpgradeTracker()
   945  					ut.ControlPlane.IsPendingUpgrade = false
   946  					ut.MachineDeployments.MarkDeferredUpgrade("md1")
   947  					return ut
   948  				}(),
   949  			},
   950  			wantMarked:         true,
   951  			hookResponse:       successResponse,
   952  			wantHookToBeCalled: false,
   953  			wantError:          false,
   954  		},
   955  		{
   956  			name: "hook should not be called if the control plane is stable at desired version but MPs upgrade is deferred - hook is marked",
   957  			s: &scope.Scope{
   958  				Blueprint: &scope.ClusterBlueprint{
   959  					Topology: &clusterv1.Topology{
   960  						ControlPlane: clusterv1.ControlPlaneTopology{
   961  							Replicas: ptr.To[int32](2),
   962  						},
   963  					},
   964  				},
   965  				Current: &scope.ClusterState{
   966  					Cluster: &clusterv1.Cluster{
   967  						ObjectMeta: metav1.ObjectMeta{
   968  							Name:      "test-cluster",
   969  							Namespace: "test-ns",
   970  							Annotations: map[string]string{
   971  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   972  							},
   973  						},
   974  						Spec: clusterv1.ClusterSpec{},
   975  					},
   976  					ControlPlane: &scope.ControlPlaneState{
   977  						Object: controlPlaneObj,
   978  					},
   979  				},
   980  				HookResponseTracker: scope.NewHookResponseTracker(),
   981  				UpgradeTracker: func() *scope.UpgradeTracker {
   982  					ut := scope.NewUpgradeTracker()
   983  					ut.ControlPlane.IsPendingUpgrade = false
   984  					ut.MachinePools.MarkDeferredUpgrade("mp1")
   985  					return ut
   986  				}(),
   987  			},
   988  			wantMarked:         true,
   989  			hookResponse:       successResponse,
   990  			wantHookToBeCalled: false,
   991  			wantError:          false,
   992  		},
   993  		{
   994  			name: "hook should be called if the control plane, MDs, and MPs are stable at the topology version - success response should unmark the hook",
   995  			s: &scope.Scope{
   996  				Blueprint: &scope.ClusterBlueprint{
   997  					Topology: &clusterv1.Topology{
   998  						ControlPlane: clusterv1.ControlPlaneTopology{
   999  							Replicas: ptr.To[int32](2),
  1000  						},
  1001  					},
  1002  				},
  1003  				Current: &scope.ClusterState{
  1004  					Cluster: &clusterv1.Cluster{
  1005  						ObjectMeta: metav1.ObjectMeta{
  1006  							Name:      "test-cluster",
  1007  							Namespace: "test-ns",
  1008  							Annotations: map[string]string{
  1009  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
  1010  							},
  1011  						},
  1012  						Spec: clusterv1.ClusterSpec{
  1013  							Topology: &clusterv1.Topology{
  1014  								Version: topologyVersion,
  1015  							},
  1016  						},
  1017  					},
  1018  					ControlPlane: &scope.ControlPlaneState{
  1019  						Object: controlPlaneObj,
  1020  					},
  1021  				},
  1022  				HookResponseTracker: scope.NewHookResponseTracker(),
  1023  				UpgradeTracker:      scope.NewUpgradeTracker(),
  1024  			},
  1025  			wantMarked:         false,
  1026  			hookResponse:       successResponse,
  1027  			wantHookToBeCalled: true,
  1028  			wantError:          false,
  1029  		},
  1030  		{
  1031  			name: "hook should be called if the control plane, MDs, and MPs are stable at the topology version - failure response should leave the hook marked",
  1032  			s: &scope.Scope{
  1033  				Blueprint: &scope.ClusterBlueprint{
  1034  					Topology: &clusterv1.Topology{
  1035  						ControlPlane: clusterv1.ControlPlaneTopology{
  1036  							Replicas: ptr.To[int32](2),
  1037  						},
  1038  					},
  1039  				},
  1040  				Current: &scope.ClusterState{
  1041  					Cluster: &clusterv1.Cluster{
  1042  						ObjectMeta: metav1.ObjectMeta{
  1043  							Name:      "test-cluster",
  1044  							Namespace: "test-ns",
  1045  							Annotations: map[string]string{
  1046  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
  1047  							},
  1048  						},
  1049  						Spec: clusterv1.ClusterSpec{
  1050  							Topology: &clusterv1.Topology{
  1051  								Version: topologyVersion,
  1052  							},
  1053  						},
  1054  					},
  1055  					ControlPlane: &scope.ControlPlaneState{
  1056  						Object: controlPlaneObj,
  1057  					},
  1058  				},
  1059  				HookResponseTracker: scope.NewHookResponseTracker(),
  1060  				UpgradeTracker:      scope.NewUpgradeTracker(),
  1061  			},
  1062  			wantMarked:         true,
  1063  			hookResponse:       failureResponse,
  1064  			wantHookToBeCalled: true,
  1065  			wantError:          true,
  1066  		},
  1067  	}
  1068  
  1069  	for _, tt := range tests {
  1070  		t.Run(tt.name, func(t *testing.T) {
  1071  			g := NewWithT(t)
  1072  
  1073  			fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
  1074  				WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
  1075  					afterClusterUpgradeGVH: tt.hookResponse,
  1076  				}).
  1077  				WithCatalog(catalog).
  1078  				Build()
  1079  
  1080  			fakeClient := fake.NewClientBuilder().WithObjects(tt.s.Current.Cluster).Build()
  1081  
  1082  			r := &Reconciler{
  1083  				Client:                fakeClient,
  1084  				APIReader:             fakeClient,
  1085  				RuntimeClient:         fakeRuntimeClient,
  1086  				desiredStateGenerator: desiredstate.NewGenerator(fakeClient, nil, fakeRuntimeClient),
  1087  			}
  1088  
  1089  			err := r.callAfterClusterUpgrade(ctx, tt.s)
  1090  			g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.AfterClusterUpgrade) == 1).To(Equal(tt.wantHookToBeCalled))
  1091  			g.Expect(hooks.IsPending(runtimehooksv1.AfterClusterUpgrade, tt.s.Current.Cluster)).To(Equal(tt.wantMarked))
  1092  			g.Expect(err != nil).To(Equal(tt.wantError))
  1093  		})
  1094  	}
  1095  }
  1096  
  1097  func TestReconcileCluster(t *testing.T) {
  1098  	cluster1 := builder.Cluster(metav1.NamespaceDefault, "cluster1").
  1099  		Build()
  1100  	cluster1WithReferences := builder.Cluster(metav1.NamespaceDefault, "cluster1").
  1101  		WithInfrastructureCluster(builder.TestInfrastructureCluster(metav1.NamespaceDefault, "infrastructure-cluster1").
  1102  			Build()).
  1103  		WithControlPlane(builder.TestControlPlane(metav1.NamespaceDefault, "control-plane1").Build()).
  1104  		Build()
  1105  	cluster2WithReferences := cluster1WithReferences.DeepCopy()
  1106  	cluster2WithReferences.SetGroupVersionKind(cluster1WithReferences.GroupVersionKind())
  1107  	cluster2WithReferences.Name = "cluster2"
  1108  
  1109  	tests := []struct {
  1110  		name    string
  1111  		current *clusterv1.Cluster
  1112  		desired *clusterv1.Cluster
  1113  		want    *clusterv1.Cluster
  1114  		wantErr bool
  1115  	}{
  1116  		{
  1117  			name:    "Should update the cluster if infrastructure and control plane references are not set",
  1118  			current: cluster1,
  1119  			desired: cluster1WithReferences,
  1120  			want:    cluster1WithReferences,
  1121  			wantErr: false,
  1122  		},
  1123  		{
  1124  			name:    "Should be a no op if infrastructure and control plane references are already set",
  1125  			current: cluster2WithReferences,
  1126  			desired: cluster2WithReferences,
  1127  			want:    cluster2WithReferences,
  1128  			wantErr: false,
  1129  		},
  1130  	}
  1131  	for _, tt := range tests {
  1132  		t.Run(tt.name, func(t *testing.T) {
  1133  			g := NewWithT(t)
  1134  
  1135  			// Create namespace and modify input to have correct namespace set
  1136  			namespace, err := env.CreateNamespace(ctx, "reconcile-cluster")
  1137  			g.Expect(err).ToNot(HaveOccurred())
  1138  			if tt.desired != nil {
  1139  				tt.desired = prepareCluster(tt.desired, namespace.GetName())
  1140  			}
  1141  			if tt.want != nil {
  1142  				tt.want = prepareCluster(tt.want, namespace.GetName())
  1143  			}
  1144  			if tt.current != nil {
  1145  				tt.current = prepareCluster(tt.current, namespace.GetName())
  1146  			}
  1147  
  1148  			if tt.current != nil {
  1149  				// NOTE: it is ok to use create given that the Cluster are created by user.
  1150  				g.Expect(env.CreateAndWait(ctx, tt.current)).To(Succeed())
  1151  			}
  1152  
  1153  			s := scope.New(tt.current)
  1154  
  1155  			s.Desired = &scope.ClusterState{Cluster: tt.desired}
  1156  
  1157  			r := Reconciler{
  1158  				Client:             env,
  1159  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  1160  				recorder:           env.GetEventRecorderFor("test"),
  1161  			}
  1162  			err = r.reconcileCluster(ctx, s)
  1163  			if tt.wantErr {
  1164  				g.Expect(err).To(HaveOccurred())
  1165  				return
  1166  			}
  1167  			g.Expect(err).ToNot(HaveOccurred())
  1168  
  1169  			got := tt.want.DeepCopy()
  1170  			err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.want), got)
  1171  			g.Expect(err).ToNot(HaveOccurred())
  1172  
  1173  			g.Expect(got.Spec.InfrastructureRef).To(EqualObject(tt.want.Spec.InfrastructureRef))
  1174  			g.Expect(got.Spec.ControlPlaneRef).To(EqualObject(tt.want.Spec.ControlPlaneRef))
  1175  
  1176  			if tt.current != nil {
  1177  				g.Expect(env.CleanupAndWait(ctx, tt.current)).To(Succeed())
  1178  			}
  1179  		})
  1180  	}
  1181  }
  1182  
  1183  func TestReconcileInfrastructureCluster(t *testing.T) {
  1184  	g := NewWithT(t)
  1185  
  1186  	// build an infrastructure cluster with a field managed by the topology controller (derived from the template).
  1187  	clusterInfrastructure1 := builder.TestInfrastructureCluster(metav1.NamespaceDefault, "infrastructure-cluster1").
  1188  		WithSpecFields(map[string]interface{}{"spec.foo": "foo"}).
  1189  		Build()
  1190  
  1191  	// build a patch used to simulate instance specific changes made by an external controller, and build the expected cluster infrastructure object.
  1192  	clusterInfrastructure1ExternalChanges := "{ \"spec\": { \"bar\": \"bar\" }}"
  1193  	clusterInfrastructure1WithExternalChanges := clusterInfrastructure1.DeepCopy()
  1194  	g.Expect(unstructured.SetNestedField(clusterInfrastructure1WithExternalChanges.UnstructuredContent(), "bar", "spec", "bar")).To(Succeed())
  1195  
  1196  	// build a patch used to simulate an external controller overriding a field managed by the topology controller.
  1197  	clusterInfrastructure1TemplateOverridingChanges := "{ \"spec\": { \"foo\": \"foo-override\" }}"
  1198  
  1199  	// build a desired infrastructure cluster with incompatible changes.
  1200  	clusterInfrastructure1WithIncompatibleChanges := clusterInfrastructure1.DeepCopy()
  1201  	clusterInfrastructure1WithIncompatibleChanges.SetName("infrastructure-cluster1-changed")
  1202  
  1203  	tests := []struct {
  1204  		name            string
  1205  		original        *unstructured.Unstructured
  1206  		externalChanges string
  1207  		desired         *unstructured.Unstructured
  1208  		want            *unstructured.Unstructured
  1209  		wantCreated     bool
  1210  		wantErr         bool
  1211  	}{
  1212  		{
  1213  			name:        "Should create desired InfrastructureCluster if the current does not exists yet",
  1214  			original:    nil,
  1215  			desired:     clusterInfrastructure1,
  1216  			want:        clusterInfrastructure1,
  1217  			wantCreated: true,
  1218  			wantErr:     false,
  1219  		},
  1220  		{
  1221  			name:     "No-op if current InfrastructureCluster is equal to desired",
  1222  			original: clusterInfrastructure1,
  1223  			desired:  clusterInfrastructure1,
  1224  			want:     clusterInfrastructure1,
  1225  			wantErr:  false,
  1226  		},
  1227  		{
  1228  			name:            "Should preserve changes from external controllers",
  1229  			original:        clusterInfrastructure1,
  1230  			externalChanges: clusterInfrastructure1ExternalChanges,
  1231  			desired:         clusterInfrastructure1,
  1232  			want:            clusterInfrastructure1WithExternalChanges,
  1233  			wantErr:         false,
  1234  		},
  1235  		{
  1236  			name:            "Should restore template values if overridden by external controllers",
  1237  			original:        clusterInfrastructure1,
  1238  			externalChanges: clusterInfrastructure1TemplateOverridingChanges,
  1239  			desired:         clusterInfrastructure1,
  1240  			want:            clusterInfrastructure1,
  1241  			wantErr:         false,
  1242  		},
  1243  		{
  1244  			name:     "Fails for incompatible changes",
  1245  			original: clusterInfrastructure1,
  1246  			desired:  clusterInfrastructure1WithIncompatibleChanges,
  1247  			wantErr:  true,
  1248  		},
  1249  	}
  1250  	for _, tt := range tests {
  1251  		t.Run(tt.name, func(t *testing.T) {
  1252  			g := NewWithT(t)
  1253  
  1254  			// Create namespace and modify input to have correct namespace set
  1255  			namespace, err := env.CreateNamespace(ctx, "reconcile-infrastructure-cluster")
  1256  			g.Expect(err).ToNot(HaveOccurred())
  1257  			if tt.original != nil {
  1258  				tt.original.SetNamespace(namespace.GetName())
  1259  			}
  1260  			if tt.desired != nil {
  1261  				tt.desired.SetNamespace(namespace.GetName())
  1262  			}
  1263  			if tt.want != nil {
  1264  				tt.want.SetNamespace(namespace.GetName())
  1265  			}
  1266  
  1267  			if tt.original != nil {
  1268  				// NOTE: it is required to use server side apply to creat the object in order to ensure consistency with the topology controller behaviour.
  1269  				g.Expect(env.PatchAndWait(ctx, tt.original.DeepCopy(), client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1270  				// NOTE: it is required to apply instance specific changes with a "plain" Patch operation to simulate a different manger.
  1271  				if tt.externalChanges != "" {
  1272  					g.Expect(env.Patch(ctx, tt.original.DeepCopy(), client.RawPatch(types.MergePatchType, []byte(tt.externalChanges)))).To(Succeed())
  1273  				}
  1274  			}
  1275  
  1276  			s := scope.New(&clusterv1.Cluster{})
  1277  			if tt.original != nil {
  1278  				current := builder.TestInfrastructureCluster("", "").Build()
  1279  				g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.original), current)).To(Succeed())
  1280  				s.Current.InfrastructureCluster = current
  1281  			}
  1282  			s.Desired = &scope.ClusterState{InfrastructureCluster: tt.desired.DeepCopy()}
  1283  
  1284  			r := Reconciler{
  1285  				Client:             env,
  1286  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  1287  				recorder:           env.GetEventRecorderFor("test"),
  1288  			}
  1289  			created, err := r.reconcileInfrastructureCluster(ctx, s)
  1290  			if tt.wantErr {
  1291  				g.Expect(err).To(HaveOccurred())
  1292  				return
  1293  			}
  1294  			g.Expect(err).ToNot(HaveOccurred())
  1295  			g.Expect(created).To(Equal(tt.wantCreated))
  1296  
  1297  			got := tt.want.DeepCopy() // this is required otherwise Get will modify tt.want
  1298  			err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.want), got)
  1299  			g.Expect(err).ToNot(HaveOccurred())
  1300  
  1301  			// Spec
  1302  			wantSpec, ok, err := unstructured.NestedMap(tt.want.UnstructuredContent(), "spec")
  1303  			g.Expect(err).ToNot(HaveOccurred())
  1304  			g.Expect(ok).To(BeTrue())
  1305  
  1306  			gotSpec, ok, err := unstructured.NestedMap(got.UnstructuredContent(), "spec")
  1307  			g.Expect(err).ToNot(HaveOccurred())
  1308  			g.Expect(ok).To(BeTrue())
  1309  			for k, v := range wantSpec {
  1310  				g.Expect(gotSpec).To(HaveKeyWithValue(k, v))
  1311  			}
  1312  
  1313  			if tt.desired != nil {
  1314  				g.Expect(env.CleanupAndWait(ctx, tt.desired)).To(Succeed())
  1315  			}
  1316  		})
  1317  	}
  1318  }
  1319  
  1320  func TestReconcileControlPlane(t *testing.T) {
  1321  	g := NewWithT(t)
  1322  
  1323  	// Objects for testing reconciliation of a control plane without machines.
  1324  
  1325  	// Create cluster class which does not require controlPlaneInfrastructure.
  1326  	ccWithoutControlPlaneInfrastructure := &scope.ControlPlaneBlueprint{}
  1327  
  1328  	// Create ControlPlaneObject without machine templates.
  1329  	controlPlaneWithoutInfrastructure := builder.TestControlPlane(metav1.NamespaceDefault, "cp1").
  1330  		WithSpecFields(map[string]interface{}{"spec.foo": "foo"}).
  1331  		Build()
  1332  
  1333  	// Create desired ControlPlaneObject without machine templates but introducing some change.
  1334  	controlPlaneWithoutInfrastructureWithChanges := controlPlaneWithoutInfrastructure.DeepCopy()
  1335  	g.Expect(unstructured.SetNestedField(controlPlaneWithoutInfrastructureWithChanges.UnstructuredContent(), "foo-changed", "spec", "foo")).To(Succeed())
  1336  
  1337  	// Build a patch used to simulate instance specific changes made by an external controller, and build the expected control plane object.
  1338  	controlPlaneWithoutInfrastructureExternalChanges := "{ \"spec\": { \"bar\": \"bar\" }}"
  1339  	controlPlaneWithoutInfrastructureWithExternalChanges := controlPlaneWithoutInfrastructure.DeepCopy()
  1340  	g.Expect(unstructured.SetNestedField(controlPlaneWithoutInfrastructureWithExternalChanges.UnstructuredContent(), "bar", "spec", "bar")).To(Succeed())
  1341  
  1342  	// Build a patch used to simulate an external controller overriding a field managed by the topology controller.
  1343  	controlPlaneWithoutInfrastructureWithExternalOverridingChanges := "{ \"spec\": { \"foo\": \"foo-override\" }}"
  1344  
  1345  	// Create a desired ControlPlaneObject without machine templates but introducing incompatible changes.
  1346  	controlPlaneWithoutInfrastructureWithIncompatibleChanges := controlPlaneWithoutInfrastructure.DeepCopy()
  1347  	controlPlaneWithoutInfrastructureWithIncompatibleChanges.SetName("cp1-changed")
  1348  
  1349  	// Objects for testing reconciliation of a control plane with machines.
  1350  
  1351  	// Create cluster class which does not require controlPlaneInfrastructure.
  1352  	infrastructureMachineTemplate := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").
  1353  		WithSpecFields(map[string]interface{}{"spec.template.spec.foo": "foo"}).
  1354  		Build()
  1355  	ccWithControlPlaneInfrastructure := &scope.ControlPlaneBlueprint{InfrastructureMachineTemplate: infrastructureMachineTemplate}
  1356  
  1357  	// Create ControlPlaneObject with machine templates.
  1358  	controlPlaneWithInfrastructure := builder.TestControlPlane(metav1.NamespaceDefault, "cp1").
  1359  		WithInfrastructureMachineTemplate(infrastructureMachineTemplate).
  1360  		WithSpecFields(map[string]interface{}{"spec.foo": "foo"}).
  1361  		Build()
  1362  
  1363  	// Create desired controlPlaneInfrastructure with some change.
  1364  	infrastructureMachineTemplateWithChanges := infrastructureMachineTemplate.DeepCopy()
  1365  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplateWithChanges.UnstructuredContent(), "foo-changed", "spec", "template", "spec", "foo")).To(Succeed())
  1366  
  1367  	// Build a patch used to simulate instance specific changes made by an external controller, and build the expected machine infrastructure object.
  1368  	infrastructureMachineTemplateExternalChanges := "{ \"spec\": { \"template\": { \"spec\": { \"bar\": \"bar\" } } }}"
  1369  	infrastructureMachineTemplateWithExternalChanges := infrastructureMachineTemplate.DeepCopy()
  1370  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplateWithExternalChanges.UnstructuredContent(), "bar", "spec", "template", "spec", "bar")).To(Succeed())
  1371  
  1372  	// Build a patch used to simulate an external controller overriding a field managed by the topology controller.
  1373  	infrastructureMachineTemplateExternalOverridingChanges := "{ \"spec\": { \"template\": { \"spec\": { \"foo\": \"foo-override\" } } }}"
  1374  
  1375  	// Create a desired infrastructure machine template with incompatible changes.
  1376  	infrastructureMachineTemplateWithIncompatibleChanges := infrastructureMachineTemplate.DeepCopy()
  1377  	gvk := infrastructureMachineTemplateWithIncompatibleChanges.GroupVersionKind()
  1378  	gvk.Kind = "KindChanged"
  1379  	infrastructureMachineTemplateWithIncompatibleChanges.SetGroupVersionKind(gvk)
  1380  
  1381  	upgradeTrackerWithControlPlanePendingUpgrade := scope.NewUpgradeTracker()
  1382  	upgradeTrackerWithControlPlanePendingUpgrade.ControlPlane.IsPendingUpgrade = true
  1383  
  1384  	tests := []struct {
  1385  		name                                 string
  1386  		class                                *scope.ControlPlaneBlueprint
  1387  		original                             *scope.ControlPlaneState
  1388  		controlPlaneExternalChanges          string
  1389  		machineInfrastructureExternalChanges string
  1390  		upgradeTracker                       *scope.UpgradeTracker
  1391  		desired                              *scope.ControlPlaneState
  1392  		want                                 *scope.ControlPlaneState
  1393  		wantCreated                          bool
  1394  		wantRotation                         bool
  1395  		wantErr                              bool
  1396  	}{
  1397  		// Testing reconciliation of a control plane without machines.
  1398  		{
  1399  			name:        "Should create desired ControlPlane without machine infrastructure if the current does not exist",
  1400  			class:       ccWithoutControlPlaneInfrastructure,
  1401  			original:    nil,
  1402  			desired:     &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1403  			want:        &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1404  			wantCreated: true,
  1405  			wantErr:     false,
  1406  		},
  1407  		{
  1408  			name:     "Should update the ControlPlane without machine infrastructure",
  1409  			class:    ccWithoutControlPlaneInfrastructure,
  1410  			original: &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1411  			desired:  &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithChanges.DeepCopy()},
  1412  			want:     &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithChanges.DeepCopy()},
  1413  			wantErr:  false,
  1414  		},
  1415  		{
  1416  			name:           "Should not update the ControlPlane if ControlPlane is pending upgrade",
  1417  			class:          ccWithoutControlPlaneInfrastructure,
  1418  			upgradeTracker: upgradeTrackerWithControlPlanePendingUpgrade,
  1419  			original:       &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1420  			desired:        &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithChanges.DeepCopy()},
  1421  			want:           &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1422  			wantErr:        false,
  1423  		},
  1424  		{
  1425  			name:                        "Should preserve external changes to ControlPlane without machine infrastructure",
  1426  			class:                       ccWithoutControlPlaneInfrastructure,
  1427  			original:                    &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1428  			controlPlaneExternalChanges: controlPlaneWithoutInfrastructureExternalChanges,
  1429  			desired:                     &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1430  			want:                        &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithExternalChanges.DeepCopy()},
  1431  			wantErr:                     false,
  1432  		},
  1433  		{
  1434  			name:                        "Should restore template values if overridden by external controllers into a ControlPlane without machine infrastructure",
  1435  			class:                       ccWithoutControlPlaneInfrastructure,
  1436  			original:                    &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1437  			controlPlaneExternalChanges: controlPlaneWithoutInfrastructureWithExternalOverridingChanges,
  1438  			desired:                     &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1439  			want:                        &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1440  			wantErr:                     false,
  1441  		},
  1442  		{
  1443  			name:     "Fail on updating ControlPlane without machine infrastructure in case of incompatible changes",
  1444  			class:    ccWithoutControlPlaneInfrastructure,
  1445  			original: &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1446  			desired:  &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithIncompatibleChanges.DeepCopy()},
  1447  			wantErr:  true,
  1448  		},
  1449  
  1450  		// Testing reconciliation of a control plane with machines.
  1451  		{
  1452  			name:        "Should create desired ControlPlane with machine infrastructure if the current does not exist",
  1453  			class:       ccWithControlPlaneInfrastructure,
  1454  			original:    nil,
  1455  			desired:     &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1456  			want:        &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1457  			wantCreated: true,
  1458  			wantErr:     false,
  1459  		},
  1460  		{
  1461  			name:         "Should rotate machine infrastructure in case of changes to the desired template",
  1462  			class:        ccWithControlPlaneInfrastructure,
  1463  			original:     &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1464  			desired:      &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplateWithChanges.DeepCopy()},
  1465  			want:         &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplateWithChanges.DeepCopy()},
  1466  			wantRotation: true,
  1467  			wantErr:      false,
  1468  		},
  1469  		{
  1470  			name:                                 "Should preserve external changes to ControlPlane's machine infrastructure", // NOTE: template are not expected to mutate, this is for extra safety.
  1471  			class:                                ccWithControlPlaneInfrastructure,
  1472  			original:                             &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1473  			machineInfrastructureExternalChanges: infrastructureMachineTemplateExternalChanges,
  1474  			desired:                              &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1475  			want:                                 &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplateWithExternalChanges.DeepCopy()},
  1476  			wantRotation:                         false,
  1477  			wantErr:                              false,
  1478  		},
  1479  		{
  1480  			name:                                 "Should restore template values if overridden by external controllers into the ControlPlane's machine infrastructure", // NOTE: template are not expected to mutate, this is for extra safety.
  1481  			class:                                ccWithControlPlaneInfrastructure,
  1482  			original:                             &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1483  			machineInfrastructureExternalChanges: infrastructureMachineTemplateExternalOverridingChanges,
  1484  			desired:                              &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1485  			want:                                 &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1486  			wantRotation:                         true,
  1487  			wantErr:                              false,
  1488  		},
  1489  		{
  1490  			name:     "Fail on updating ControlPlane with a machine infrastructure in case of incompatible changes",
  1491  			class:    ccWithControlPlaneInfrastructure,
  1492  			original: &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1493  			desired:  &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplateWithIncompatibleChanges.DeepCopy()},
  1494  			wantErr:  true,
  1495  		},
  1496  	}
  1497  	for _, tt := range tests {
  1498  		t.Run(tt.name, func(t *testing.T) {
  1499  			g := NewWithT(t)
  1500  
  1501  			// Create namespace and modify input to have correct namespace set
  1502  			namespace, err := env.CreateNamespace(ctx, "reconcile-control-plane")
  1503  			g.Expect(err).ToNot(HaveOccurred())
  1504  			if tt.class != nil { // *scope.ControlPlaneBlueprint
  1505  				tt.class = prepareControlPlaneBluePrint(tt.class, namespace.GetName())
  1506  			}
  1507  			if tt.original != nil { // *scope.ControlPlaneState
  1508  				tt.original = prepareControlPlaneState(g, tt.original, namespace.GetName())
  1509  			}
  1510  			if tt.desired != nil { // *scope.ControlPlaneState
  1511  				tt.desired = prepareControlPlaneState(g, tt.desired, namespace.GetName())
  1512  			}
  1513  			if tt.want != nil { // *scope.ControlPlaneState
  1514  				tt.want = prepareControlPlaneState(g, tt.want, namespace.GetName())
  1515  			}
  1516  
  1517  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster1").Build())
  1518  			s.Blueprint = &scope.ClusterBlueprint{
  1519  				ClusterClass: &clusterv1.ClusterClass{},
  1520  			}
  1521  			if tt.class.InfrastructureMachineTemplate != nil {
  1522  				s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure = &clusterv1.LocalObjectTemplate{
  1523  					Ref: contract.ObjToRef(tt.class.InfrastructureMachineTemplate),
  1524  				}
  1525  			}
  1526  			if tt.upgradeTracker != nil {
  1527  				s.UpgradeTracker = tt.upgradeTracker
  1528  			}
  1529  
  1530  			s.Current.ControlPlane = &scope.ControlPlaneState{}
  1531  			if tt.original != nil {
  1532  				if tt.original.InfrastructureMachineTemplate != nil {
  1533  					// NOTE: it is required to use server side apply to creat the object in order to ensure consistency with the topology controller behaviour.
  1534  					g.Expect(env.PatchAndWait(ctx, tt.original.InfrastructureMachineTemplate.DeepCopy(), client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1535  					// NOTE: it is required to apply instance specific changes with a "plain" Patch operation to simulate a different manger.
  1536  					if tt.machineInfrastructureExternalChanges != "" {
  1537  						g.Expect(env.Patch(ctx, tt.original.InfrastructureMachineTemplate.DeepCopy(), client.RawPatch(types.MergePatchType, []byte(tt.machineInfrastructureExternalChanges)))).To(Succeed())
  1538  					}
  1539  
  1540  					current := builder.TestInfrastructureMachineTemplate("", "").Build()
  1541  					g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.original.InfrastructureMachineTemplate), current)).To(Succeed())
  1542  					s.Current.ControlPlane.InfrastructureMachineTemplate = current
  1543  				}
  1544  				if tt.original.Object != nil {
  1545  					// NOTE: it is required to use server side apply to creat the object in order to ensure consistency with the topology controller behaviour.
  1546  					g.Expect(env.PatchAndWait(ctx, tt.original.Object.DeepCopy(), client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1547  					// NOTE: it is required to apply instance specific changes with a "plain" Patch operation to simulate a different manger.
  1548  					if tt.controlPlaneExternalChanges != "" {
  1549  						g.Expect(env.Patch(ctx, tt.original.Object.DeepCopy(), client.RawPatch(types.MergePatchType, []byte(tt.controlPlaneExternalChanges)))).To(Succeed())
  1550  					}
  1551  
  1552  					current := builder.TestControlPlane("", "").Build()
  1553  					g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.original.Object), current)).To(Succeed())
  1554  					s.Current.ControlPlane.Object = current
  1555  				}
  1556  			}
  1557  
  1558  			r := Reconciler{
  1559  				Client:             env,
  1560  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  1561  				recorder:           env.GetEventRecorderFor("test"),
  1562  			}
  1563  
  1564  			s.Desired = &scope.ClusterState{
  1565  				ControlPlane: &scope.ControlPlaneState{
  1566  					Object:                        tt.desired.Object,
  1567  					InfrastructureMachineTemplate: tt.desired.InfrastructureMachineTemplate,
  1568  				},
  1569  			}
  1570  
  1571  			// Run reconcileControlPlane with the states created in the initial section of the test.
  1572  			created, err := r.reconcileControlPlane(ctx, s)
  1573  			if tt.wantErr {
  1574  				g.Expect(err).To(HaveOccurred())
  1575  				return
  1576  			}
  1577  			g.Expect(err).ToNot(HaveOccurred())
  1578  			g.Expect(created).To(Equal(tt.wantCreated))
  1579  
  1580  			// Create ControlPlane object for fetching data into
  1581  			gotControlPlaneObject := builder.TestControlPlane("", "").Build()
  1582  			err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.want.Object), gotControlPlaneObject)
  1583  			g.Expect(err).ToNot(HaveOccurred())
  1584  
  1585  			// check for template rotation.
  1586  			gotRotation := false
  1587  			var gotInfrastructureMachineRef *corev1.ObjectReference
  1588  			if tt.class.InfrastructureMachineTemplate != nil {
  1589  				gotInfrastructureMachineRef, err = contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(gotControlPlaneObject)
  1590  				g.Expect(err).ToNot(HaveOccurred())
  1591  				if tt.original != nil {
  1592  					if tt.original.InfrastructureMachineTemplate != nil && tt.original.InfrastructureMachineTemplate.GetName() != gotInfrastructureMachineRef.Name {
  1593  						gotRotation = true
  1594  						// if template has been rotated, fixup infrastructureRef in the wantControlPlaneObjectSpec before comparison.
  1595  						g.Expect(contract.ControlPlane().MachineTemplate().InfrastructureRef().Set(tt.want.Object, refToUnstructured(gotInfrastructureMachineRef))).To(Succeed())
  1596  					}
  1597  				}
  1598  			}
  1599  			g.Expect(gotRotation).To(Equal(tt.wantRotation))
  1600  
  1601  			// Get the spec from the ControlPlaneObject we are expecting
  1602  			wantControlPlaneObjectSpec, ok, err := unstructured.NestedMap(tt.want.Object.UnstructuredContent(), "spec")
  1603  			g.Expect(err).ToNot(HaveOccurred())
  1604  			g.Expect(ok).To(BeTrue())
  1605  
  1606  			// Get the spec from the ControlPlaneObject we got from the client.Get
  1607  			gotControlPlaneObjectSpec, ok, err := unstructured.NestedMap(gotControlPlaneObject.UnstructuredContent(), "spec")
  1608  			g.Expect(err).ToNot(HaveOccurred())
  1609  			g.Expect(ok).To(BeTrue())
  1610  
  1611  			for k, v := range wantControlPlaneObjectSpec {
  1612  				g.Expect(gotControlPlaneObjectSpec).To(HaveKeyWithValue(k, v))
  1613  			}
  1614  			for k, v := range tt.want.Object.GetLabels() {
  1615  				g.Expect(gotControlPlaneObject.GetLabels()).To(HaveKeyWithValue(k, v))
  1616  			}
  1617  
  1618  			// Check the infrastructure template
  1619  			if tt.want.InfrastructureMachineTemplate != nil {
  1620  				// Check to see if the controlPlaneObject has been updated with a new template.
  1621  				// This check is just for the naming format uses by generated templates - here it's templateName-*
  1622  				// This check is only performed when we had an initial template that has been changed
  1623  				if gotRotation {
  1624  					pattern := fmt.Sprintf("%s.*", names.ControlPlaneInfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name))
  1625  					ok, err := regexp.Match(pattern, []byte(gotInfrastructureMachineRef.Name))
  1626  					g.Expect(err).ToNot(HaveOccurred())
  1627  					g.Expect(ok).To(BeTrue())
  1628  				}
  1629  
  1630  				// Create object to hold the queried InfrastructureMachineTemplate
  1631  				gotInfrastructureMachineTemplateKey := client.ObjectKey{Namespace: gotInfrastructureMachineRef.Namespace, Name: gotInfrastructureMachineRef.Name}
  1632  				gotInfrastructureMachineTemplate := builder.TestInfrastructureMachineTemplate("", "").Build()
  1633  				err = env.GetAPIReader().Get(ctx, gotInfrastructureMachineTemplateKey, gotInfrastructureMachineTemplate)
  1634  				g.Expect(err).ToNot(HaveOccurred())
  1635  
  1636  				// Get the spec from the InfrastructureMachineTemplate we are expecting
  1637  				wantInfrastructureMachineTemplateSpec, ok, err := unstructured.NestedMap(tt.want.InfrastructureMachineTemplate.UnstructuredContent(), "spec")
  1638  				g.Expect(err).ToNot(HaveOccurred())
  1639  				g.Expect(ok).To(BeTrue())
  1640  
  1641  				// Get the spec from the InfrastructureMachineTemplate we got from the client.Get
  1642  				gotInfrastructureMachineTemplateSpec, ok, err := unstructured.NestedMap(gotInfrastructureMachineTemplate.UnstructuredContent(), "spec")
  1643  				g.Expect(err).ToNot(HaveOccurred())
  1644  				g.Expect(ok).To(BeTrue())
  1645  
  1646  				// Compare all keys and values in the InfrastructureMachineTemplate Spec
  1647  				for k, v := range wantInfrastructureMachineTemplateSpec {
  1648  					g.Expect(gotInfrastructureMachineTemplateSpec).To(HaveKeyWithValue(k, v))
  1649  				}
  1650  
  1651  				// Check to see that labels are as expected on the object
  1652  				for k, v := range tt.want.InfrastructureMachineTemplate.GetLabels() {
  1653  					g.Expect(gotInfrastructureMachineTemplate.GetLabels()).To(HaveKeyWithValue(k, v))
  1654  				}
  1655  
  1656  				// If the template was rotated during the reconcile we want to make sure the old template was deleted.
  1657  				if gotRotation {
  1658  					obj := &unstructured.Unstructured{}
  1659  					obj.SetAPIVersion(builder.InfrastructureGroupVersion.String())
  1660  					obj.SetKind(builder.GenericInfrastructureMachineTemplateKind)
  1661  					err := r.Client.Get(ctx, client.ObjectKey{
  1662  						Namespace: tt.original.InfrastructureMachineTemplate.GetNamespace(),
  1663  						Name:      tt.original.InfrastructureMachineTemplate.GetName(),
  1664  					}, obj)
  1665  					g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
  1666  				}
  1667  			}
  1668  		})
  1669  	}
  1670  }
  1671  
  1672  func TestReconcileControlPlaneCleanup(t *testing.T) {
  1673  	infrastructureMachineTemplate := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1-cluster-class").
  1674  		WithSpecFields(map[string]interface{}{"spec.template.spec.foo": "foo"}).
  1675  		Build()
  1676  	ccWithControlPlaneInfrastructure := &scope.ControlPlaneBlueprint{InfrastructureMachineTemplate: infrastructureMachineTemplate}
  1677  
  1678  	infrastructureMachineTemplateCopy := infrastructureMachineTemplate.DeepCopy()
  1679  	infrastructureMachineTemplateCopy.SetName("infrav1-cluster")
  1680  	controlPlane := builder.TestControlPlane(metav1.NamespaceDefault, "cp1").
  1681  		WithInfrastructureMachineTemplate(infrastructureMachineTemplateCopy).
  1682  		WithSpecFields(map[string]interface{}{"spec.foo": "foo"}).
  1683  		Build()
  1684  
  1685  	t.Run("cleanup InfrastructureMachineTemplate in case of errors", func(t *testing.T) {
  1686  		g := NewWithT(t)
  1687  
  1688  		// Create namespace and modify input to have correct namespace set
  1689  		namespace, err := env.CreateNamespace(ctx, "reconcile-control-plane")
  1690  		g.Expect(err).ToNot(HaveOccurred())
  1691  		ccWithControlPlaneInfrastructure = prepareControlPlaneBluePrint(ccWithControlPlaneInfrastructure, namespace.GetName())
  1692  
  1693  		s := scope.New(builder.Cluster(namespace.GetName(), "cluster1").Build())
  1694  		s.Blueprint = &scope.ClusterBlueprint{
  1695  			ClusterClass: &clusterv1.ClusterClass{
  1696  				Spec: clusterv1.ClusterClassSpec{
  1697  					ControlPlane: clusterv1.ControlPlaneClass{
  1698  						MachineInfrastructure: &clusterv1.LocalObjectTemplate{
  1699  							Ref: contract.ObjToRef(infrastructureMachineTemplate),
  1700  						},
  1701  					},
  1702  				},
  1703  			},
  1704  		}
  1705  		s.Current.ControlPlane = &scope.ControlPlaneState{}
  1706  		s.Desired = &scope.ClusterState{
  1707  			ControlPlane: &scope.ControlPlaneState{Object: controlPlane, InfrastructureMachineTemplate: infrastructureMachineTemplateCopy},
  1708  		}
  1709  		s.Desired.ControlPlane = prepareControlPlaneState(g, s.Desired.ControlPlane, namespace.GetName())
  1710  
  1711  		// Force control plane creation to fail
  1712  		s.Desired.ControlPlane.Object.SetNamespace("do-not-exist")
  1713  
  1714  		r := Reconciler{
  1715  			Client:             env,
  1716  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  1717  			recorder:           env.GetEventRecorderFor("test"),
  1718  		}
  1719  		created, err := r.reconcileControlPlane(ctx, s)
  1720  		g.Expect(err).To(HaveOccurred())
  1721  		g.Expect(created).To(BeFalse())
  1722  
  1723  		gotInfrastructureMachineTemplate := infrastructureMachineTemplateCopy.DeepCopy()
  1724  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(infrastructureMachineTemplateCopy), gotInfrastructureMachineTemplate)
  1725  
  1726  		g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
  1727  	})
  1728  }
  1729  
  1730  func TestReconcileControlPlaneMachineHealthCheck(t *testing.T) {
  1731  	// Create InfrastructureMachineTemplates for test cases
  1732  	infrastructureMachineTemplate := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()
  1733  
  1734  	mhcClass := &clusterv1.MachineHealthCheckClass{
  1735  		UnhealthyConditions: []clusterv1.UnhealthyCondition{
  1736  			{
  1737  				Type:    corev1.NodeReady,
  1738  				Status:  corev1.ConditionUnknown,
  1739  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  1740  			},
  1741  		},
  1742  	}
  1743  	maxUnhealthy := intstr.Parse("45%")
  1744  	// Create clusterClasses requiring controlPlaneInfrastructure and one not.
  1745  	ccWithControlPlaneInfrastructure := &scope.ControlPlaneBlueprint{
  1746  		InfrastructureMachineTemplate: infrastructureMachineTemplate,
  1747  		MachineHealthCheck:            mhcClass,
  1748  	}
  1749  	ccWithoutControlPlaneInfrastructure := &scope.ControlPlaneBlueprint{
  1750  		MachineHealthCheck: mhcClass,
  1751  	}
  1752  
  1753  	// Create ControlPlane Object.
  1754  	controlPlane1 := builder.TestControlPlane(metav1.NamespaceDefault, "cp1").
  1755  		WithInfrastructureMachineTemplate(infrastructureMachineTemplate).
  1756  		Build()
  1757  
  1758  	mhcBuilder := builder.MachineHealthCheck(metav1.NamespaceDefault, "cp1").
  1759  		WithSelector(*selectors.ForControlPlaneMHC()).
  1760  		WithUnhealthyConditions(mhcClass.UnhealthyConditions).
  1761  		WithClusterName("cluster1")
  1762  
  1763  	tests := []struct {
  1764  		name    string
  1765  		class   *scope.ControlPlaneBlueprint
  1766  		current *scope.ControlPlaneState
  1767  		desired *scope.ControlPlaneState
  1768  		want    *clusterv1.MachineHealthCheck
  1769  	}{
  1770  		{
  1771  			name:    "Should create desired ControlPlane MachineHealthCheck for a new ControlPlane",
  1772  			class:   ccWithControlPlaneInfrastructure,
  1773  			current: nil,
  1774  			desired: &scope.ControlPlaneState{
  1775  				Object:                        controlPlane1.DeepCopy(),
  1776  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1777  				MachineHealthCheck:            mhcBuilder.Build()},
  1778  			want: mhcBuilder.DeepCopy().
  1779  				Build(),
  1780  		},
  1781  		{
  1782  			name:  "Should not create ControlPlane MachineHealthCheck when no MachineInfrastructure is defined",
  1783  			class: ccWithoutControlPlaneInfrastructure,
  1784  			current: &scope.ControlPlaneState{
  1785  				Object: controlPlane1.DeepCopy(),
  1786  				// Note this creation would be blocked by the validation Webhook. MHC with no MachineInfrastructure is not allowed.
  1787  				MachineHealthCheck: mhcBuilder.Build()},
  1788  			desired: &scope.ControlPlaneState{
  1789  				Object: controlPlane1.DeepCopy(),
  1790  				// ControlPlane does not have defined MachineInfrastructure.
  1791  				// InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1792  			},
  1793  			want: nil,
  1794  		},
  1795  		{
  1796  			name:  "Should update ControlPlane MachineHealthCheck when changed in desired state",
  1797  			class: ccWithControlPlaneInfrastructure,
  1798  			current: &scope.ControlPlaneState{
  1799  				Object:                        controlPlane1.DeepCopy(),
  1800  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1801  				MachineHealthCheck:            mhcBuilder.Build()},
  1802  			desired: &scope.ControlPlaneState{
  1803  				Object:                        controlPlane1.DeepCopy(),
  1804  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1805  				MachineHealthCheck:            mhcBuilder.WithMaxUnhealthy(&maxUnhealthy).Build(),
  1806  			},
  1807  			// Want to get the updated version of the MachineHealthCheck after reconciliation.
  1808  			want: mhcBuilder.DeepCopy().WithMaxUnhealthy(&maxUnhealthy).
  1809  				Build(),
  1810  		},
  1811  		{
  1812  			name:  "Should delete ControlPlane MachineHealthCheck when removed from desired state",
  1813  			class: ccWithControlPlaneInfrastructure,
  1814  			current: &scope.ControlPlaneState{
  1815  				Object:                        controlPlane1.DeepCopy(),
  1816  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1817  				MachineHealthCheck:            mhcBuilder.Build()},
  1818  			desired: &scope.ControlPlaneState{
  1819  				Object:                        controlPlane1.DeepCopy(),
  1820  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1821  				// MachineHealthCheck removed from the desired state of the ControlPlane
  1822  			},
  1823  			want: nil,
  1824  		},
  1825  	}
  1826  	for _, tt := range tests {
  1827  		t.Run(tt.name, func(t *testing.T) {
  1828  			g := NewWithT(t)
  1829  
  1830  			// Create namespace and modify input to have correct namespace set
  1831  			namespace, err := env.CreateNamespace(ctx, "reconcile-control-plane")
  1832  			g.Expect(err).ToNot(HaveOccurred())
  1833  			if tt.class != nil {
  1834  				tt.class = prepareControlPlaneBluePrint(tt.class, namespace.GetName())
  1835  			}
  1836  			if tt.current != nil {
  1837  				tt.current = prepareControlPlaneState(g, tt.current, namespace.GetName())
  1838  			}
  1839  			if tt.desired != nil {
  1840  				tt.desired = prepareControlPlaneState(g, tt.desired, namespace.GetName())
  1841  			}
  1842  			if tt.want != nil {
  1843  				tt.want.SetNamespace(namespace.GetName())
  1844  			}
  1845  
  1846  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster1").Build())
  1847  			s.Blueprint = &scope.ClusterBlueprint{
  1848  				ClusterClass: &clusterv1.ClusterClass{},
  1849  			}
  1850  			if tt.class.InfrastructureMachineTemplate != nil {
  1851  				s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure = &clusterv1.LocalObjectTemplate{
  1852  					Ref: contract.ObjToRef(tt.class.InfrastructureMachineTemplate),
  1853  				}
  1854  			}
  1855  
  1856  			s.Current.ControlPlane = &scope.ControlPlaneState{}
  1857  			if tt.current != nil {
  1858  				s.Current.ControlPlane = tt.current
  1859  				if tt.current.Object != nil {
  1860  					g.Expect(env.PatchAndWait(ctx, tt.current.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1861  				}
  1862  				if tt.current.InfrastructureMachineTemplate != nil {
  1863  					g.Expect(env.PatchAndWait(ctx, tt.current.InfrastructureMachineTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1864  				}
  1865  				if tt.current.MachineHealthCheck != nil {
  1866  					g.Expect(env.PatchAndWait(ctx, tt.current.MachineHealthCheck, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1867  				}
  1868  			}
  1869  
  1870  			// copy over uid of created and desired ControlPlane
  1871  			if tt.current != nil && tt.current.Object != nil && tt.desired != nil && tt.desired.Object != nil {
  1872  				tt.desired.Object.SetUID(tt.current.Object.GetUID())
  1873  			}
  1874  
  1875  			r := Reconciler{
  1876  				Client:             env,
  1877  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  1878  				recorder:           env.GetEventRecorderFor("test"),
  1879  			}
  1880  
  1881  			s.Desired = &scope.ClusterState{
  1882  				ControlPlane: tt.desired,
  1883  			}
  1884  
  1885  			// Run reconcileControlPlane with the states created in the initial section of the test.
  1886  			_, err = r.reconcileControlPlane(ctx, s)
  1887  			g.Expect(err).ToNot(HaveOccurred())
  1888  
  1889  			gotCP := s.Desired.ControlPlane.Object.DeepCopy()
  1890  			g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: namespace.GetName(), Name: controlPlane1.GetName()}, gotCP)).To(Succeed())
  1891  
  1892  			// Create MachineHealthCheck object for fetching data into
  1893  			gotMHC := &clusterv1.MachineHealthCheck{}
  1894  			err = env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: namespace.GetName(), Name: controlPlane1.GetName()}, gotMHC)
  1895  
  1896  			// Nil case: If we want to find nothing (i.e. delete or MHC not created) and the Get call returns a NotFound error from the API the test succeeds.
  1897  			if tt.want == nil && apierrors.IsNotFound(err) {
  1898  				return
  1899  			}
  1900  
  1901  			want := tt.want.DeepCopy()
  1902  			g.Expect((&webhooks.MachineHealthCheck{}).Default(ctx, want)).To(Succeed())
  1903  
  1904  			g.Expect(err).ToNot(HaveOccurred())
  1905  			g.Expect(gotMHC).To(EqualObject(want, IgnoreAutogeneratedMetadata, IgnorePaths{".kind", ".apiVersion"}))
  1906  		})
  1907  	}
  1908  }
  1909  
  1910  func TestReconcileMachineDeployments(t *testing.T) {
  1911  	g := NewWithT(t)
  1912  
  1913  	// Write the config file to access the test env for debugging.
  1914  	// g.Expect(os.WriteFile("test.conf", kubeconfig.FromEnvTestConfig(env.Config, &clusterv1.Cluster{
  1915  	// 	ObjectMeta: metav1.ObjectMeta{Name: "test"},
  1916  	// }), 0777)).To(Succeed())
  1917  
  1918  	infrastructureMachineTemplate1 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-1").Build()
  1919  	bootstrapTemplate1 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-1").Build()
  1920  	md1 := newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate1, bootstrapTemplate1, nil)
  1921  
  1922  	upgradeTrackerWithMD1PendingCreate := scope.NewUpgradeTracker()
  1923  	upgradeTrackerWithMD1PendingCreate.MachineDeployments.MarkPendingCreate("md-1-topology")
  1924  
  1925  	infrastructureMachineTemplate2 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-2").Build()
  1926  	bootstrapTemplate2 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-2").Build()
  1927  	md2 := newFakeMachineDeploymentTopologyState("md-2", infrastructureMachineTemplate2, bootstrapTemplate2, nil)
  1928  	infrastructureMachineTemplate2WithChanges := infrastructureMachineTemplate2.DeepCopy()
  1929  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplate2WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1930  	md2WithRotatedInfrastructureMachineTemplate := newFakeMachineDeploymentTopologyState("md-2", infrastructureMachineTemplate2WithChanges, bootstrapTemplate2, nil)
  1931  	upgradeTrackerWithMD2PendingUpgrade := scope.NewUpgradeTracker()
  1932  	upgradeTrackerWithMD2PendingUpgrade.MachineDeployments.MarkPendingUpgrade("md-2")
  1933  
  1934  	infrastructureMachineTemplate3 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-3").Build()
  1935  	bootstrapTemplate3 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-3").Build()
  1936  	md3 := newFakeMachineDeploymentTopologyState("md-3", infrastructureMachineTemplate3, bootstrapTemplate3, nil)
  1937  	bootstrapTemplate3WithChanges := bootstrapTemplate3.DeepCopy()
  1938  	g.Expect(unstructured.SetNestedField(bootstrapTemplate3WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1939  	md3WithRotatedBootstrapTemplate := newFakeMachineDeploymentTopologyState("md-3", infrastructureMachineTemplate3, bootstrapTemplate3WithChanges, nil)
  1940  	bootstrapTemplate3WithChangeKind := bootstrapTemplate3.DeepCopy()
  1941  	bootstrapTemplate3WithChangeKind.SetKind("AnotherGenericBootstrapTemplate")
  1942  	md3WithRotatedBootstrapTemplateChangedKind := newFakeMachineDeploymentTopologyState("md-3", infrastructureMachineTemplate3, bootstrapTemplate3WithChanges, nil)
  1943  
  1944  	infrastructureMachineTemplate4 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-4").Build()
  1945  	bootstrapTemplate4 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-4").Build()
  1946  	md4 := newFakeMachineDeploymentTopologyState("md-4", infrastructureMachineTemplate4, bootstrapTemplate4, nil)
  1947  	infrastructureMachineTemplate4WithChanges := infrastructureMachineTemplate4.DeepCopy()
  1948  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplate4WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1949  	bootstrapTemplate4WithChanges := bootstrapTemplate4.DeepCopy()
  1950  	g.Expect(unstructured.SetNestedField(bootstrapTemplate4WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1951  	md4WithRotatedTemplates := newFakeMachineDeploymentTopologyState("md-4", infrastructureMachineTemplate4WithChanges, bootstrapTemplate4WithChanges, nil)
  1952  
  1953  	infrastructureMachineTemplate4m := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-4m").Build()
  1954  	bootstrapTemplate4m := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-4m").Build()
  1955  	md4m := newFakeMachineDeploymentTopologyState("md-4m", infrastructureMachineTemplate4m, bootstrapTemplate4m, nil)
  1956  	infrastructureMachineTemplate4mWithChanges := infrastructureMachineTemplate4m.DeepCopy()
  1957  	infrastructureMachineTemplate4mWithChanges.SetLabels(map[string]string{"foo": "bar"})
  1958  	bootstrapTemplate4mWithChanges := bootstrapTemplate4m.DeepCopy()
  1959  	bootstrapTemplate4mWithChanges.SetLabels(map[string]string{"foo": "bar"})
  1960  	md4mWithInPlaceUpdatedTemplates := newFakeMachineDeploymentTopologyState("md-4m", infrastructureMachineTemplate4mWithChanges, bootstrapTemplate4mWithChanges, nil)
  1961  
  1962  	infrastructureMachineTemplate5 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-5").Build()
  1963  	bootstrapTemplate5 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-5").Build()
  1964  	md5 := newFakeMachineDeploymentTopologyState("md-5", infrastructureMachineTemplate5, bootstrapTemplate5, nil)
  1965  	infrastructureMachineTemplate5WithChangedKind := infrastructureMachineTemplate5.DeepCopy()
  1966  	infrastructureMachineTemplate5WithChangedKind.SetKind("ChangedKind")
  1967  	md5WithChangedInfrastructureMachineTemplateKind := newFakeMachineDeploymentTopologyState("md-4", infrastructureMachineTemplate5WithChangedKind, bootstrapTemplate5, nil)
  1968  
  1969  	infrastructureMachineTemplate6 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-6").Build()
  1970  	bootstrapTemplate6 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-6").Build()
  1971  	md6 := newFakeMachineDeploymentTopologyState("md-6", infrastructureMachineTemplate6, bootstrapTemplate6, nil)
  1972  	bootstrapTemplate6WithChangedNamespace := bootstrapTemplate6.DeepCopy()
  1973  	bootstrapTemplate6WithChangedNamespace.SetNamespace("ChangedNamespace")
  1974  	md6WithChangedBootstrapTemplateNamespace := newFakeMachineDeploymentTopologyState("md-6", infrastructureMachineTemplate6, bootstrapTemplate6WithChangedNamespace, nil)
  1975  
  1976  	infrastructureMachineTemplate7 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-7").Build()
  1977  	bootstrapTemplate7 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-7").Build()
  1978  	md7 := newFakeMachineDeploymentTopologyState("md-7", infrastructureMachineTemplate7, bootstrapTemplate7, nil)
  1979  
  1980  	infrastructureMachineTemplate8Create := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-8-create").Build()
  1981  	bootstrapTemplate8Create := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-8-create").Build()
  1982  	md8Create := newFakeMachineDeploymentTopologyState("md-8-create", infrastructureMachineTemplate8Create, bootstrapTemplate8Create, nil)
  1983  	infrastructureMachineTemplate8Delete := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-8-delete").Build()
  1984  	bootstrapTemplate8Delete := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-8-delete").Build()
  1985  	md8Delete := newFakeMachineDeploymentTopologyState("md-8-delete", infrastructureMachineTemplate8Delete, bootstrapTemplate8Delete, nil)
  1986  	infrastructureMachineTemplate8Update := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-8-update").Build()
  1987  	bootstrapTemplate8Update := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-8-update").Build()
  1988  	md8Update := newFakeMachineDeploymentTopologyState("md-8-update", infrastructureMachineTemplate8Update, bootstrapTemplate8Update, nil)
  1989  	infrastructureMachineTemplate8UpdateWithChanges := infrastructureMachineTemplate8Update.DeepCopy()
  1990  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplate8UpdateWithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1991  	bootstrapTemplate8UpdateWithChanges := bootstrapTemplate8Update.DeepCopy()
  1992  	g.Expect(unstructured.SetNestedField(bootstrapTemplate8UpdateWithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1993  	md8UpdateWithRotatedTemplates := newFakeMachineDeploymentTopologyState("md-8-update", infrastructureMachineTemplate8UpdateWithChanges, bootstrapTemplate8UpdateWithChanges, nil)
  1994  
  1995  	infrastructureMachineTemplate9m := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-9m").Build()
  1996  	bootstrapTemplate9m := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-9m").Build()
  1997  	md9 := newFakeMachineDeploymentTopologyState("md-9m", infrastructureMachineTemplate9m, bootstrapTemplate9m, nil)
  1998  	md9.Object.Spec.Template.ObjectMeta.Labels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"}
  1999  	md9.Object.Spec.Selector.MatchLabels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"}
  2000  	md9WithInstanceSpecificTemplateMetadataAndSelector := newFakeMachineDeploymentTopologyState("md-9m", infrastructureMachineTemplate9m, bootstrapTemplate9m, nil)
  2001  	md9WithInstanceSpecificTemplateMetadataAndSelector.Object.Spec.Template.ObjectMeta.Labels = map[string]string{"foo": "bar"}
  2002  	md9WithInstanceSpecificTemplateMetadataAndSelector.Object.Spec.Selector.MatchLabels = map[string]string{"foo": "bar"}
  2003  
  2004  	tests := []struct {
  2005  		name                                      string
  2006  		current                                   []*scope.MachineDeploymentState
  2007  		currentOnlyAPIServer                      []*scope.MachineDeploymentState
  2008  		desired                                   []*scope.MachineDeploymentState
  2009  		upgradeTracker                            *scope.UpgradeTracker
  2010  		want                                      []*scope.MachineDeploymentState
  2011  		wantInfrastructureMachineTemplateRotation map[string]bool
  2012  		wantBootstrapTemplateRotation             map[string]bool
  2013  		wantErr                                   bool
  2014  	}{
  2015  		{
  2016  			name:    "Should create desired MachineDeployment if the current does not exists yet",
  2017  			current: nil,
  2018  			desired: []*scope.MachineDeploymentState{md1},
  2019  			want:    []*scope.MachineDeploymentState{md1},
  2020  			wantErr: false,
  2021  		},
  2022  		{
  2023  			name:                 "Should skip creating desired MachineDeployment if it already exists in the apiserver (even if it is not in current state)",
  2024  			current:              nil,
  2025  			currentOnlyAPIServer: []*scope.MachineDeploymentState{md1},
  2026  			desired:              []*scope.MachineDeploymentState{md1},
  2027  			want:                 []*scope.MachineDeploymentState{md1},
  2028  			wantErr:              false,
  2029  		},
  2030  		{
  2031  			name:           "Should not create desired MachineDeployment if the current does not exists yet and it marked as pending create",
  2032  			current:        nil,
  2033  			upgradeTracker: upgradeTrackerWithMD1PendingCreate,
  2034  			desired:        []*scope.MachineDeploymentState{md1},
  2035  			want:           nil,
  2036  			wantErr:        false,
  2037  		},
  2038  		{
  2039  			name:    "No-op if current MachineDeployment is equal to desired",
  2040  			current: []*scope.MachineDeploymentState{md1},
  2041  			desired: []*scope.MachineDeploymentState{md1},
  2042  			want:    []*scope.MachineDeploymentState{md1},
  2043  			wantErr: false,
  2044  		},
  2045  		{
  2046  			name:    "Should update MachineDeployment with InfrastructureMachineTemplate rotation",
  2047  			current: []*scope.MachineDeploymentState{md2},
  2048  			desired: []*scope.MachineDeploymentState{md2WithRotatedInfrastructureMachineTemplate},
  2049  			want:    []*scope.MachineDeploymentState{md2WithRotatedInfrastructureMachineTemplate},
  2050  			wantInfrastructureMachineTemplateRotation: map[string]bool{"md-2": true},
  2051  			wantErr: false,
  2052  		},
  2053  		{
  2054  			name:           "Should not update MachineDeployment if MachineDeployment is pending upgrade",
  2055  			current:        []*scope.MachineDeploymentState{md2},
  2056  			desired:        []*scope.MachineDeploymentState{md2WithRotatedInfrastructureMachineTemplate},
  2057  			upgradeTracker: upgradeTrackerWithMD2PendingUpgrade,
  2058  			want:           []*scope.MachineDeploymentState{md2},
  2059  			wantInfrastructureMachineTemplateRotation: map[string]bool{"md-2": false},
  2060  			wantErr: false,
  2061  		},
  2062  		{
  2063  			name:                          "Should update MachineDeployment with BootstrapTemplate rotation",
  2064  			current:                       []*scope.MachineDeploymentState{md3},
  2065  			desired:                       []*scope.MachineDeploymentState{md3WithRotatedBootstrapTemplate},
  2066  			want:                          []*scope.MachineDeploymentState{md3WithRotatedBootstrapTemplate},
  2067  			wantBootstrapTemplateRotation: map[string]bool{"md-3": true},
  2068  			wantErr:                       false,
  2069  		},
  2070  		{
  2071  			name:                          "Should update MachineDeployment with BootstrapTemplate rotation with changed kind",
  2072  			current:                       []*scope.MachineDeploymentState{md3},
  2073  			desired:                       []*scope.MachineDeploymentState{md3WithRotatedBootstrapTemplateChangedKind},
  2074  			want:                          []*scope.MachineDeploymentState{md3WithRotatedBootstrapTemplateChangedKind},
  2075  			wantBootstrapTemplateRotation: map[string]bool{"md-3": true},
  2076  			wantErr:                       false,
  2077  		},
  2078  		{
  2079  			name:    "Should update MachineDeployment with InfrastructureMachineTemplate and BootstrapTemplate rotation",
  2080  			current: []*scope.MachineDeploymentState{md4},
  2081  			desired: []*scope.MachineDeploymentState{md4WithRotatedTemplates},
  2082  			want:    []*scope.MachineDeploymentState{md4WithRotatedTemplates},
  2083  			wantInfrastructureMachineTemplateRotation: map[string]bool{"md-4": true},
  2084  			wantBootstrapTemplateRotation:             map[string]bool{"md-4": true},
  2085  			wantErr:                                   false,
  2086  		},
  2087  		{
  2088  			name:    "Should update MachineDeployment with InfrastructureMachineTemplate and BootstrapTemplate without rotation",
  2089  			current: []*scope.MachineDeploymentState{md4m},
  2090  			desired: []*scope.MachineDeploymentState{md4mWithInPlaceUpdatedTemplates},
  2091  			want:    []*scope.MachineDeploymentState{md4mWithInPlaceUpdatedTemplates},
  2092  			wantErr: false,
  2093  		},
  2094  		{
  2095  			name:    "Should fail update MachineDeployment because of changed InfrastructureMachineTemplate kind",
  2096  			current: []*scope.MachineDeploymentState{md5},
  2097  			desired: []*scope.MachineDeploymentState{md5WithChangedInfrastructureMachineTemplateKind},
  2098  			wantErr: true,
  2099  		},
  2100  		{
  2101  			name:    "Should fail update MachineDeployment because of changed BootstrapTemplate namespace",
  2102  			current: []*scope.MachineDeploymentState{md6},
  2103  			desired: []*scope.MachineDeploymentState{md6WithChangedBootstrapTemplateNamespace},
  2104  			wantErr: true,
  2105  		},
  2106  		{
  2107  			name:    "Should delete MachineDeployment",
  2108  			current: []*scope.MachineDeploymentState{md7},
  2109  			desired: []*scope.MachineDeploymentState{},
  2110  			want:    []*scope.MachineDeploymentState{},
  2111  			wantErr: false,
  2112  		},
  2113  		{
  2114  			name:    "Should create, update and delete MachineDeployments",
  2115  			current: []*scope.MachineDeploymentState{md8Update, md8Delete},
  2116  			desired: []*scope.MachineDeploymentState{md8Create, md8UpdateWithRotatedTemplates},
  2117  			want:    []*scope.MachineDeploymentState{md8Create, md8UpdateWithRotatedTemplates},
  2118  			wantInfrastructureMachineTemplateRotation: map[string]bool{"md-8-update": true},
  2119  			wantBootstrapTemplateRotation:             map[string]bool{"md-8-update": true},
  2120  			wantErr:                                   false,
  2121  		},
  2122  		{
  2123  			name:    "Enforce template metadata",
  2124  			current: []*scope.MachineDeploymentState{md9WithInstanceSpecificTemplateMetadataAndSelector},
  2125  			desired: []*scope.MachineDeploymentState{md9},
  2126  			want:    []*scope.MachineDeploymentState{md9},
  2127  			wantErr: false,
  2128  		},
  2129  	}
  2130  	for _, tt := range tests {
  2131  		t.Run(tt.name, func(t *testing.T) {
  2132  			g := NewWithT(t)
  2133  
  2134  			// Create namespace and modify input to have correct namespace set
  2135  			namespace, err := env.CreateNamespace(ctx, "reconcile-machine-deployments")
  2136  			g.Expect(err).ToNot(HaveOccurred())
  2137  			for i, s := range tt.current {
  2138  				tt.current[i] = prepareMachineDeploymentState(s, namespace.GetName())
  2139  			}
  2140  			for i, s := range tt.desired {
  2141  				tt.desired[i] = prepareMachineDeploymentState(s, namespace.GetName())
  2142  			}
  2143  			for i, s := range tt.want {
  2144  				tt.want[i] = prepareMachineDeploymentState(s, namespace.GetName())
  2145  			}
  2146  
  2147  			for _, s := range tt.current {
  2148  				g.Expect(env.PatchAndWait(ctx, s.InfrastructureMachineTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2149  				g.Expect(env.PatchAndWait(ctx, s.BootstrapTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2150  				g.Expect(env.PatchAndWait(ctx, s.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2151  			}
  2152  
  2153  			currentMachineDeploymentStates := toMachineDeploymentTopologyStateMap(tt.current)
  2154  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build())
  2155  			s.Current.MachineDeployments = currentMachineDeploymentStates
  2156  
  2157  			// currentOnlyAPIServer MDs only exist in the APIserver but are not part of s.Current.
  2158  			// This simulates that getCurrentMachineDeploymentState in current_state.go read a stale MD list.
  2159  			for _, s := range tt.currentOnlyAPIServer {
  2160  				mdState := prepareMachineDeploymentState(s, namespace.GetName())
  2161  
  2162  				g.Expect(env.PatchAndWait(ctx, mdState.InfrastructureMachineTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2163  				g.Expect(env.PatchAndWait(ctx, mdState.BootstrapTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2164  				g.Expect(env.PatchAndWait(ctx, mdState.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2165  			}
  2166  
  2167  			s.Desired = &scope.ClusterState{MachineDeployments: toMachineDeploymentTopologyStateMap(tt.desired)}
  2168  
  2169  			if tt.upgradeTracker != nil {
  2170  				s.UpgradeTracker = tt.upgradeTracker
  2171  			}
  2172  
  2173  			r := Reconciler{
  2174  				Client:             env.GetClient(),
  2175  				APIReader:          env.GetAPIReader(),
  2176  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  2177  				recorder:           env.GetEventRecorderFor("test"),
  2178  			}
  2179  			err = r.reconcileMachineDeployments(ctx, s)
  2180  			if tt.wantErr {
  2181  				g.Expect(err).To(HaveOccurred())
  2182  				return
  2183  			}
  2184  			g.Expect(err).ToNot(HaveOccurred())
  2185  
  2186  			var gotMachineDeploymentList clusterv1.MachineDeploymentList
  2187  			g.Expect(env.GetAPIReader().List(ctx, &gotMachineDeploymentList, &client.ListOptions{Namespace: namespace.GetName()})).To(Succeed())
  2188  			g.Expect(gotMachineDeploymentList.Items).To(HaveLen(len(tt.want)))
  2189  
  2190  			if tt.want == nil {
  2191  				// No machine deployments should exist.
  2192  				g.Expect(gotMachineDeploymentList.Items).To(BeEmpty())
  2193  			}
  2194  
  2195  			for _, wantMachineDeploymentState := range tt.want {
  2196  				for _, gotMachineDeployment := range gotMachineDeploymentList.Items {
  2197  					if wantMachineDeploymentState.Object.Name != gotMachineDeployment.Name {
  2198  						continue
  2199  					}
  2200  					currentMachineDeploymentTopologyName := wantMachineDeploymentState.Object.ObjectMeta.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel]
  2201  					currentMachineDeploymentState := currentMachineDeploymentStates[currentMachineDeploymentTopologyName]
  2202  
  2203  					// Copy over the name of the newly created InfrastructureRef and Bootsrap.ConfigRef because they get a generated name
  2204  					wantMachineDeploymentState.Object.Spec.Template.Spec.InfrastructureRef.Name = gotMachineDeployment.Spec.Template.Spec.InfrastructureRef.Name
  2205  					if gotMachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef != nil {
  2206  						wantMachineDeploymentState.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Name = gotMachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef.Name
  2207  					}
  2208  
  2209  					// Compare MachineDeployment.
  2210  					// Note: We're intentionally only comparing Spec as otherwise we would have to account for
  2211  					// empty vs. filled out TypeMeta.
  2212  					g.Expect(gotMachineDeployment.Spec).To(BeComparableTo(wantMachineDeploymentState.Object.Spec))
  2213  
  2214  					// Compare BootstrapTemplate.
  2215  					gotBootstrapTemplateRef := gotMachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef
  2216  					gotBootstrapTemplate := unstructured.Unstructured{}
  2217  					gotBootstrapTemplate.SetKind(gotBootstrapTemplateRef.Kind)
  2218  					gotBootstrapTemplate.SetAPIVersion(gotBootstrapTemplateRef.APIVersion)
  2219  
  2220  					err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2221  						Namespace: gotBootstrapTemplateRef.Namespace,
  2222  						Name:      gotBootstrapTemplateRef.Name,
  2223  					}, &gotBootstrapTemplate)
  2224  
  2225  					g.Expect(err).ToNot(HaveOccurred())
  2226  
  2227  					g.Expect(&gotBootstrapTemplate).To(EqualObject(wantMachineDeploymentState.BootstrapTemplate, IgnoreAutogeneratedMetadata, IgnoreNameGenerated))
  2228  
  2229  					// Check BootstrapTemplate rotation if there was a previous MachineDeployment/Template.
  2230  					if currentMachineDeploymentState != nil && currentMachineDeploymentState.BootstrapTemplate != nil {
  2231  						if tt.wantBootstrapTemplateRotation[gotMachineDeployment.Name] {
  2232  							g.Expect(currentMachineDeploymentState.BootstrapTemplate.GetName()).ToNot(Equal(gotBootstrapTemplate.GetName()))
  2233  						} else {
  2234  							g.Expect(currentMachineDeploymentState.BootstrapTemplate.GetName()).To(Equal(gotBootstrapTemplate.GetName()))
  2235  						}
  2236  					}
  2237  
  2238  					// Compare InfrastructureMachineTemplate.
  2239  					gotInfrastructureMachineTemplateRef := gotMachineDeployment.Spec.Template.Spec.InfrastructureRef
  2240  					gotInfrastructureMachineTemplate := unstructured.Unstructured{}
  2241  					gotInfrastructureMachineTemplate.SetKind(gotInfrastructureMachineTemplateRef.Kind)
  2242  					gotInfrastructureMachineTemplate.SetAPIVersion(gotInfrastructureMachineTemplateRef.APIVersion)
  2243  
  2244  					err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2245  						Namespace: gotInfrastructureMachineTemplateRef.Namespace,
  2246  						Name:      gotInfrastructureMachineTemplateRef.Name,
  2247  					}, &gotInfrastructureMachineTemplate)
  2248  
  2249  					g.Expect(err).ToNot(HaveOccurred())
  2250  
  2251  					g.Expect(&gotInfrastructureMachineTemplate).To(EqualObject(wantMachineDeploymentState.InfrastructureMachineTemplate, IgnoreAutogeneratedMetadata, IgnoreNameGenerated))
  2252  
  2253  					// Check InfrastructureMachineTemplate rotation if there was a previous MachineDeployment/Template.
  2254  					if currentMachineDeploymentState != nil && currentMachineDeploymentState.InfrastructureMachineTemplate != nil {
  2255  						if tt.wantInfrastructureMachineTemplateRotation[gotMachineDeployment.Name] {
  2256  							g.Expect(currentMachineDeploymentState.InfrastructureMachineTemplate.GetName()).ToNot(Equal(gotInfrastructureMachineTemplate.GetName()))
  2257  						} else {
  2258  							g.Expect(currentMachineDeploymentState.InfrastructureMachineTemplate.GetName()).To(Equal(gotInfrastructureMachineTemplate.GetName()))
  2259  						}
  2260  					}
  2261  				}
  2262  			}
  2263  		})
  2264  	}
  2265  }
  2266  
  2267  func TestReconcileMachineDeploymentsCleanup(t *testing.T) {
  2268  	t.Run("cleanup InfrastructureMachineTemplate and BootstrapTemplate in case of errors on creation", func(t *testing.T) {
  2269  		g := NewWithT(t)
  2270  
  2271  		infrastructureMachineTemplate1 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-1").Build()
  2272  		bootstrapTemplate1 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-1").Build()
  2273  		md1 := newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate1, bootstrapTemplate1, nil)
  2274  
  2275  		// Create namespace and modify input to have correct namespace set
  2276  		namespace, err := env.CreateNamespace(ctx, "reconcile-machine-deployments")
  2277  		g.Expect(err).ToNot(HaveOccurred())
  2278  		md1 = prepareMachineDeploymentState(md1, namespace.GetName())
  2279  
  2280  		s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build())
  2281  		s.Current.MachineDeployments = map[string]*scope.MachineDeploymentState{}
  2282  		s.Desired = &scope.ClusterState{
  2283  			MachineDeployments: map[string]*scope.MachineDeploymentState{
  2284  				md1.Object.Name: md1,
  2285  			},
  2286  		}
  2287  
  2288  		// Force md creation to fail
  2289  		s.Desired.MachineDeployments[md1.Object.Name].Object.Namespace = "do-not-exist"
  2290  
  2291  		r := Reconciler{
  2292  			Client:             env.GetClient(),
  2293  			APIReader:          env.GetAPIReader(),
  2294  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  2295  			recorder:           env.GetEventRecorderFor("test"),
  2296  		}
  2297  		err = r.reconcileMachineDeployments(ctx, s)
  2298  		g.Expect(err).To(HaveOccurred())
  2299  
  2300  		gotBootstrapTemplateRef := md1.Object.Spec.Template.Spec.Bootstrap.ConfigRef
  2301  		gotBootstrapTemplate := unstructured.Unstructured{}
  2302  		gotBootstrapTemplate.SetKind(gotBootstrapTemplateRef.Kind)
  2303  		gotBootstrapTemplate.SetAPIVersion(gotBootstrapTemplateRef.APIVersion)
  2304  
  2305  		err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2306  			Namespace: gotBootstrapTemplateRef.Namespace,
  2307  			Name:      gotBootstrapTemplateRef.Name,
  2308  		}, &gotBootstrapTemplate)
  2309  
  2310  		g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
  2311  
  2312  		gotInfrastructureMachineTemplateRef := md1.Object.Spec.Template.Spec.InfrastructureRef
  2313  		gotInfrastructureMachineTemplate := unstructured.Unstructured{}
  2314  		gotInfrastructureMachineTemplate.SetKind(gotInfrastructureMachineTemplateRef.Kind)
  2315  		gotInfrastructureMachineTemplate.SetAPIVersion(gotInfrastructureMachineTemplateRef.APIVersion)
  2316  
  2317  		err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2318  			Namespace: gotInfrastructureMachineTemplateRef.Namespace,
  2319  			Name:      gotInfrastructureMachineTemplateRef.Name,
  2320  		}, &gotInfrastructureMachineTemplate)
  2321  
  2322  		g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
  2323  	})
  2324  	t.Run("cleanup InfrastructureMachineTemplate and BootstrapTemplate in case of errors on upgrade", func(t *testing.T) {
  2325  		g := NewWithT(t)
  2326  
  2327  		infrastructureMachineTemplate2 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-2").Build()
  2328  		bootstrapTemplate2 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-2").Build()
  2329  		md2 := newFakeMachineDeploymentTopologyState("md-2", infrastructureMachineTemplate2, bootstrapTemplate2, nil)
  2330  
  2331  		bootstrapTemplate2WithChanges := bootstrapTemplate2.DeepCopy()
  2332  		g.Expect(unstructured.SetNestedField(bootstrapTemplate2WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  2333  		infrastructureMachineTemplate2WithChanges := infrastructureMachineTemplate2.DeepCopy()
  2334  		g.Expect(unstructured.SetNestedField(infrastructureMachineTemplate2WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  2335  		md2WithTemplateChanges := newFakeMachineDeploymentTopologyState(md2.Object.Name, infrastructureMachineTemplate2WithChanges, bootstrapTemplate2WithChanges, nil)
  2336  
  2337  		// Create namespace and modify input to have correct namespace set
  2338  		namespace, err := env.CreateNamespace(ctx, "reconcile-machine-deployments")
  2339  		g.Expect(err).ToNot(HaveOccurred())
  2340  		md2 = prepareMachineDeploymentState(md2, namespace.GetName())
  2341  		md2WithTemplateChanges = prepareMachineDeploymentState(md2WithTemplateChanges, namespace.GetName())
  2342  
  2343  		s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build())
  2344  		s.Current.MachineDeployments = map[string]*scope.MachineDeploymentState{
  2345  			md2.Object.Name: md2,
  2346  		}
  2347  		s.Desired = &scope.ClusterState{
  2348  			MachineDeployments: map[string]*scope.MachineDeploymentState{
  2349  				md2WithTemplateChanges.Object.Name: md2WithTemplateChanges,
  2350  			},
  2351  		}
  2352  
  2353  		// Force md upgrade to fail
  2354  		s.Desired.MachineDeployments[md2WithTemplateChanges.Object.Name].Object.Namespace = "do-not-exist"
  2355  
  2356  		r := Reconciler{
  2357  			Client:             env.GetClient(),
  2358  			APIReader:          env.GetAPIReader(),
  2359  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  2360  			recorder:           env.GetEventRecorderFor("test"),
  2361  		}
  2362  		err = r.reconcileMachineDeployments(ctx, s)
  2363  		g.Expect(err).To(HaveOccurred())
  2364  
  2365  		newBootstrapTemplateRef := md2WithTemplateChanges.Object.Spec.Template.Spec.Bootstrap.ConfigRef
  2366  		newBootstrapTemplate := unstructured.Unstructured{}
  2367  		newBootstrapTemplate.SetKind(newBootstrapTemplateRef.Kind)
  2368  		newBootstrapTemplate.SetAPIVersion(newBootstrapTemplateRef.APIVersion)
  2369  
  2370  		err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2371  			Namespace: newBootstrapTemplateRef.Namespace,
  2372  			Name:      newBootstrapTemplateRef.Name,
  2373  		}, &newBootstrapTemplate)
  2374  
  2375  		g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
  2376  
  2377  		newInfrastructureMachineTemplateRef := md2WithTemplateChanges.Object.Spec.Template.Spec.InfrastructureRef
  2378  		newInfrastructureMachineTemplate := unstructured.Unstructured{}
  2379  		newInfrastructureMachineTemplate.SetKind(newInfrastructureMachineTemplateRef.Kind)
  2380  		newInfrastructureMachineTemplate.SetAPIVersion(newInfrastructureMachineTemplateRef.APIVersion)
  2381  
  2382  		err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2383  			Namespace: newInfrastructureMachineTemplateRef.Namespace,
  2384  			Name:      newInfrastructureMachineTemplateRef.Name,
  2385  		}, &newInfrastructureMachineTemplate)
  2386  
  2387  		g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
  2388  	})
  2389  }
  2390  
  2391  func TestReconcileMachinePools(t *testing.T) {
  2392  	g := NewWithT(t)
  2393  
  2394  	infrastructureMachinePool1 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-1").Build()
  2395  	bootstrapConfig1 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-1").Build()
  2396  	mp1 := newFakeMachinePoolTopologyState("mp-1", infrastructureMachinePool1, bootstrapConfig1)
  2397  
  2398  	upgradeTrackerWithmp1PendingCreate := scope.NewUpgradeTracker()
  2399  	upgradeTrackerWithmp1PendingCreate.MachinePools.MarkPendingCreate("mp-1-topology")
  2400  
  2401  	infrastructureMachinePool2 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-2").Build()
  2402  	bootstrapConfig2 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-2").Build()
  2403  	mp2 := newFakeMachinePoolTopologyState("mp-2", infrastructureMachinePool2, bootstrapConfig2)
  2404  	infrastructureMachinePool2WithChanges := infrastructureMachinePool2.DeepCopy()
  2405  	g.Expect(unstructured.SetNestedField(infrastructureMachinePool2WithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2406  	mp2WithChangedInfrastructureMachinePool := newFakeMachinePoolTopologyState("mp-2", infrastructureMachinePool2WithChanges, bootstrapConfig2)
  2407  	upgradeTrackerWithmp2PendingUpgrade := scope.NewUpgradeTracker()
  2408  	upgradeTrackerWithmp2PendingUpgrade.MachinePools.MarkPendingUpgrade("mp-2")
  2409  
  2410  	infrastructureMachinePool3 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-3").Build()
  2411  	bootstrapConfig3 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-3").Build()
  2412  	mp3 := newFakeMachinePoolTopologyState("mp-3", infrastructureMachinePool3, bootstrapConfig3)
  2413  	bootstrapConfig3WithChanges := bootstrapConfig3.DeepCopy()
  2414  	g.Expect(unstructured.SetNestedField(bootstrapConfig3WithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2415  	mp3WithChangedbootstrapConfig := newFakeMachinePoolTopologyState("mp-3", infrastructureMachinePool3, bootstrapConfig3WithChanges)
  2416  	bootstrapConfig3WithChangeKind := bootstrapConfig3.DeepCopy()
  2417  	bootstrapConfig3WithChangeKind.SetKind("AnotherGenericbootstrapConfig")
  2418  	mp3WithChangedbootstrapConfigChangedKind := newFakeMachinePoolTopologyState("mp-3", infrastructureMachinePool3, bootstrapConfig3WithChangeKind)
  2419  
  2420  	infrastructureMachinePool4 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-4").Build()
  2421  	bootstrapConfig4 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-4").Build()
  2422  	mp4 := newFakeMachinePoolTopologyState("mp-4", infrastructureMachinePool4, bootstrapConfig4)
  2423  	infrastructureMachinePool4WithChanges := infrastructureMachinePool4.DeepCopy()
  2424  	g.Expect(unstructured.SetNestedField(infrastructureMachinePool4WithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2425  	bootstrapConfig4WithChanges := bootstrapConfig4.DeepCopy()
  2426  	g.Expect(unstructured.SetNestedField(bootstrapConfig4WithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2427  	mp4WithChangedObjects := newFakeMachinePoolTopologyState("mp-4", infrastructureMachinePool4WithChanges, bootstrapConfig4WithChanges)
  2428  
  2429  	infrastructureMachinePool5 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-5").Build()
  2430  	bootstrapConfig5 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-5").Build()
  2431  	mp5 := newFakeMachinePoolTopologyState("mp-5", infrastructureMachinePool5, bootstrapConfig5)
  2432  	infrastructureMachinePool5WithChangedKind := infrastructureMachinePool5.DeepCopy()
  2433  	infrastructureMachinePool5WithChangedKind.SetKind("ChangedKind")
  2434  	mp5WithChangedinfrastructureMachinePoolKind := newFakeMachinePoolTopologyState("mp-4", infrastructureMachinePool5WithChangedKind, bootstrapConfig5)
  2435  
  2436  	infrastructureMachinePool6 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-6").Build()
  2437  	bootstrapConfig6 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-6").Build()
  2438  	mp6 := newFakeMachinePoolTopologyState("mp-6", infrastructureMachinePool6, bootstrapConfig6)
  2439  	bootstrapConfig6WithChangedNamespace := bootstrapConfig6.DeepCopy()
  2440  	bootstrapConfig6WithChangedNamespace.SetNamespace("ChangedNamespace")
  2441  	mp6WithChangedbootstrapConfigNamespace := newFakeMachinePoolTopologyState("mp-6", infrastructureMachinePool6, bootstrapConfig6WithChangedNamespace)
  2442  
  2443  	infrastructureMachinePool7 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-7").Build()
  2444  	bootstrapConfig7 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-7").Build()
  2445  	mp7 := newFakeMachinePoolTopologyState("mp-7", infrastructureMachinePool7, bootstrapConfig7)
  2446  
  2447  	infrastructureMachinePool8Create := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-8-create").Build()
  2448  	bootstrapConfig8Create := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-8-create").Build()
  2449  	mp8Create := newFakeMachinePoolTopologyState("mp-8-create", infrastructureMachinePool8Create, bootstrapConfig8Create)
  2450  	infrastructureMachinePool8Delete := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-8-delete").Build()
  2451  	bootstrapConfig8Delete := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-8-delete").Build()
  2452  	mp8Delete := newFakeMachinePoolTopologyState("mp-8-delete", infrastructureMachinePool8Delete, bootstrapConfig8Delete)
  2453  	infrastructureMachinePool8Update := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-8-update").Build()
  2454  	bootstrapConfig8Update := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-8-update").Build()
  2455  	mp8Update := newFakeMachinePoolTopologyState("mp-8-update", infrastructureMachinePool8Update, bootstrapConfig8Update)
  2456  	infrastructureMachinePool8UpdateWithChanges := infrastructureMachinePool8Update.DeepCopy()
  2457  	g.Expect(unstructured.SetNestedField(infrastructureMachinePool8UpdateWithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2458  	bootstrapConfig8UpdateWithChanges := bootstrapConfig8Update.DeepCopy()
  2459  	g.Expect(unstructured.SetNestedField(bootstrapConfig8UpdateWithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2460  	mp8UpdateWithChangedObjects := newFakeMachinePoolTopologyState("mp-8-update", infrastructureMachinePool8UpdateWithChanges, bootstrapConfig8UpdateWithChanges)
  2461  
  2462  	infrastructureMachinePool9m := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-9m").Build()
  2463  	bootstrapConfig9m := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-9m").Build()
  2464  	mp9 := newFakeMachinePoolTopologyState("mp-9m", infrastructureMachinePool9m, bootstrapConfig9m)
  2465  	mp9.Object.Spec.Template.ObjectMeta.Labels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"}
  2466  	mp9WithInstanceSpecificTemplateMetadata := newFakeMachinePoolTopologyState("mp-9m", infrastructureMachinePool9m, bootstrapConfig9m)
  2467  	mp9WithInstanceSpecificTemplateMetadata.Object.Spec.Template.ObjectMeta.Labels = map[string]string{"foo": "bar"}
  2468  
  2469  	tests := []struct {
  2470  		name                                      string
  2471  		current                                   []*scope.MachinePoolState
  2472  		currentOnlyAPIServer                      []*scope.MachinePoolState
  2473  		desired                                   []*scope.MachinePoolState
  2474  		upgradeTracker                            *scope.UpgradeTracker
  2475  		want                                      []*scope.MachinePoolState
  2476  		wantInfrastructureMachinePoolObjectUpdate map[string]bool
  2477  		wantBootstrapObjectUpdate                 map[string]bool
  2478  		wantErr                                   bool
  2479  	}{
  2480  		{
  2481  			name:    "Should create desired MachinePool if the current does not exists yet",
  2482  			current: nil,
  2483  			desired: []*scope.MachinePoolState{mp1},
  2484  			want:    []*scope.MachinePoolState{mp1},
  2485  			wantErr: false,
  2486  		},
  2487  		{
  2488  			name:                 "Should skip creating desired MachinePool if it already exists in the apiserver (even if it is not in current state)",
  2489  			current:              nil,
  2490  			currentOnlyAPIServer: []*scope.MachinePoolState{mp1},
  2491  			desired:              []*scope.MachinePoolState{mp1},
  2492  			want:                 []*scope.MachinePoolState{mp1},
  2493  			wantErr:              false,
  2494  		},
  2495  		{
  2496  			name:           "Should not create desired MachinePool if the current does not exists yet and it marked as pending create",
  2497  			current:        nil,
  2498  			upgradeTracker: upgradeTrackerWithmp1PendingCreate,
  2499  			desired:        []*scope.MachinePoolState{mp1},
  2500  			want:           nil,
  2501  			wantErr:        false,
  2502  		},
  2503  		{
  2504  			name:    "No-op if current MachinePool is equal to desired",
  2505  			current: []*scope.MachinePoolState{mp1},
  2506  			desired: []*scope.MachinePoolState{mp1},
  2507  			want:    []*scope.MachinePoolState{mp1},
  2508  			wantErr: false,
  2509  		},
  2510  		{
  2511  			name:    "Should update InfrastructureMachinePool",
  2512  			current: []*scope.MachinePoolState{mp2},
  2513  			desired: []*scope.MachinePoolState{mp2WithChangedInfrastructureMachinePool},
  2514  			want:    []*scope.MachinePoolState{mp2WithChangedInfrastructureMachinePool},
  2515  			wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-2": true},
  2516  			wantErr: false,
  2517  		},
  2518  		{
  2519  			name:           "Should not update InfrastructureMachinePool if MachinePool is pending upgrade",
  2520  			current:        []*scope.MachinePoolState{mp2},
  2521  			desired:        []*scope.MachinePoolState{mp2WithChangedInfrastructureMachinePool},
  2522  			upgradeTracker: upgradeTrackerWithmp2PendingUpgrade,
  2523  			want:           []*scope.MachinePoolState{mp2},
  2524  			wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-2": false},
  2525  			wantErr: false,
  2526  		},
  2527  		{
  2528  			name:                      "Should update BootstrapConfig",
  2529  			current:                   []*scope.MachinePoolState{mp3},
  2530  			desired:                   []*scope.MachinePoolState{mp3WithChangedbootstrapConfig},
  2531  			want:                      []*scope.MachinePoolState{mp3WithChangedbootstrapConfig},
  2532  			wantBootstrapObjectUpdate: map[string]bool{"mp-3": true},
  2533  			wantErr:                   false,
  2534  		},
  2535  		{
  2536  			name:    "Should fail update MachinePool because of changed BootstrapConfig kind",
  2537  			current: []*scope.MachinePoolState{mp3},
  2538  			desired: []*scope.MachinePoolState{mp3WithChangedbootstrapConfigChangedKind},
  2539  			wantErr: true,
  2540  		},
  2541  		{
  2542  			name:    "Should update InfrastructureMachinePool and BootstrapConfig",
  2543  			current: []*scope.MachinePoolState{mp4},
  2544  			desired: []*scope.MachinePoolState{mp4WithChangedObjects},
  2545  			want:    []*scope.MachinePoolState{mp4WithChangedObjects},
  2546  			wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-4": true},
  2547  			wantBootstrapObjectUpdate:                 map[string]bool{"mp-4": true},
  2548  			wantErr:                                   false,
  2549  		},
  2550  		{
  2551  			name:    "Should fail update MachinePool because of changed InfrastructureMachinePool kind",
  2552  			current: []*scope.MachinePoolState{mp5},
  2553  			desired: []*scope.MachinePoolState{mp5WithChangedinfrastructureMachinePoolKind},
  2554  			wantErr: true,
  2555  		},
  2556  		{
  2557  			name:    "Should fail update MachinePool because of changed bootstrapConfig namespace",
  2558  			current: []*scope.MachinePoolState{mp6},
  2559  			desired: []*scope.MachinePoolState{mp6WithChangedbootstrapConfigNamespace},
  2560  			wantErr: true,
  2561  		},
  2562  		{
  2563  			name:    "Should delete MachinePool",
  2564  			current: []*scope.MachinePoolState{mp7},
  2565  			desired: []*scope.MachinePoolState{},
  2566  			want:    []*scope.MachinePoolState{},
  2567  			wantErr: false,
  2568  		},
  2569  		{
  2570  			name:    "Should create, update and delete MachinePools",
  2571  			current: []*scope.MachinePoolState{mp8Update, mp8Delete},
  2572  			desired: []*scope.MachinePoolState{mp8Create, mp8UpdateWithChangedObjects},
  2573  			want:    []*scope.MachinePoolState{mp8Create, mp8UpdateWithChangedObjects},
  2574  			wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-8-update": true},
  2575  			wantBootstrapObjectUpdate:                 map[string]bool{"mp-8-update": true},
  2576  			wantErr:                                   false,
  2577  		},
  2578  		{
  2579  			name:    "Enforce template metadata",
  2580  			current: []*scope.MachinePoolState{mp9WithInstanceSpecificTemplateMetadata},
  2581  			desired: []*scope.MachinePoolState{mp9},
  2582  			want:    []*scope.MachinePoolState{mp9},
  2583  			wantErr: false,
  2584  		},
  2585  	}
  2586  	for _, tt := range tests {
  2587  		t.Run(tt.name, func(t *testing.T) {
  2588  			g := NewWithT(t)
  2589  
  2590  			// Create namespace and modify input to have correct namespace set
  2591  			namespace, err := env.CreateNamespace(ctx, "reconcile-machine-pools")
  2592  			g.Expect(err).ToNot(HaveOccurred())
  2593  			for i, s := range tt.current {
  2594  				tt.current[i] = prepareMachinePoolState(s, namespace.GetName())
  2595  			}
  2596  			for i, s := range tt.desired {
  2597  				tt.desired[i] = prepareMachinePoolState(s, namespace.GetName())
  2598  			}
  2599  			for i, s := range tt.want {
  2600  				tt.want[i] = prepareMachinePoolState(s, namespace.GetName())
  2601  			}
  2602  
  2603  			for _, s := range tt.current {
  2604  				g.Expect(env.PatchAndWait(ctx, s.InfrastructureMachinePoolObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2605  				g.Expect(env.PatchAndWait(ctx, s.BootstrapObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2606  				g.Expect(env.PatchAndWait(ctx, s.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2607  			}
  2608  
  2609  			currentMachinePoolStates := toMachinePoolTopologyStateMap(tt.current)
  2610  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build())
  2611  			s.Current.MachinePools = currentMachinePoolStates
  2612  
  2613  			// currentOnlyAPIServer mps only exist in the APIserver but are not part of s.Current.
  2614  			// This simulates that getCurrentMachinePoolState in current_state.go read a stale mp list.
  2615  			for _, s := range tt.currentOnlyAPIServer {
  2616  				mpState := prepareMachinePoolState(s, namespace.GetName())
  2617  
  2618  				g.Expect(env.PatchAndWait(ctx, mpState.InfrastructureMachinePoolObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2619  				g.Expect(env.PatchAndWait(ctx, mpState.BootstrapObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2620  				g.Expect(env.PatchAndWait(ctx, mpState.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2621  			}
  2622  
  2623  			s.Desired = &scope.ClusterState{MachinePools: toMachinePoolTopologyStateMap(tt.desired)}
  2624  
  2625  			if tt.upgradeTracker != nil {
  2626  				s.UpgradeTracker = tt.upgradeTracker
  2627  			}
  2628  
  2629  			r := Reconciler{
  2630  				Client:             env.GetClient(),
  2631  				APIReader:          env.GetAPIReader(),
  2632  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  2633  				recorder:           env.GetEventRecorderFor("test"),
  2634  			}
  2635  			err = r.reconcileMachinePools(ctx, s)
  2636  			if tt.wantErr {
  2637  				g.Expect(err).To(HaveOccurred())
  2638  				return
  2639  			}
  2640  			g.Expect(err).ToNot(HaveOccurred())
  2641  
  2642  			var gotMachinePoolList expv1.MachinePoolList
  2643  			g.Expect(env.GetAPIReader().List(ctx, &gotMachinePoolList, &client.ListOptions{Namespace: namespace.GetName()})).To(Succeed())
  2644  			g.Expect(gotMachinePoolList.Items).To(HaveLen(len(tt.want)))
  2645  
  2646  			if tt.want == nil {
  2647  				// No machine Pools should exist.
  2648  				g.Expect(gotMachinePoolList.Items).To(BeEmpty())
  2649  			}
  2650  
  2651  			for _, wantMachinePoolState := range tt.want {
  2652  				for _, gotMachinePool := range gotMachinePoolList.Items {
  2653  					if wantMachinePoolState.Object.Name != gotMachinePool.Name {
  2654  						continue
  2655  					}
  2656  					currentMachinePoolTopologyName := wantMachinePoolState.Object.ObjectMeta.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]
  2657  					currentMachinePoolState := currentMachinePoolStates[currentMachinePoolTopologyName]
  2658  
  2659  					// Copy over the name of the newly created InfrastructureRef and Bootsrap.ConfigRef because they get a generated name
  2660  					wantMachinePoolState.Object.Spec.Template.Spec.InfrastructureRef.Name = gotMachinePool.Spec.Template.Spec.InfrastructureRef.Name
  2661  					if gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef != nil {
  2662  						wantMachinePoolState.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Name = gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef.Name
  2663  					}
  2664  
  2665  					// Compare MachinePool.
  2666  					// Note: We're intentionally only comparing Spec as otherwise we would have to account for
  2667  					// empty vs. filled out TypeMeta.
  2668  					g.Expect(gotMachinePool.Spec).To(BeComparableTo(wantMachinePoolState.Object.Spec))
  2669  
  2670  					// Compare BootstrapObject.
  2671  					gotBootstrapObjectRef := gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef
  2672  					gotBootstrapObject := unstructured.Unstructured{}
  2673  					gotBootstrapObject.SetKind(gotBootstrapObjectRef.Kind)
  2674  					gotBootstrapObject.SetAPIVersion(gotBootstrapObjectRef.APIVersion)
  2675  
  2676  					err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2677  						Namespace: gotBootstrapObjectRef.Namespace,
  2678  						Name:      gotBootstrapObjectRef.Name,
  2679  					}, &gotBootstrapObject)
  2680  
  2681  					g.Expect(err).ToNot(HaveOccurred())
  2682  
  2683  					g.Expect(&gotBootstrapObject).To(EqualObject(wantMachinePoolState.BootstrapObject, IgnoreAutogeneratedMetadata, IgnoreNameGenerated))
  2684  
  2685  					// Check BootstrapObject update.
  2686  					if currentMachinePoolState != nil && currentMachinePoolState.BootstrapObject != nil {
  2687  						if tt.wantBootstrapObjectUpdate[gotMachinePool.Name] {
  2688  							g.Expect(currentMachinePoolState.BootstrapObject.GetResourceVersion()).ToNot(Equal(gotBootstrapObject.GetResourceVersion()))
  2689  						} else {
  2690  							g.Expect(currentMachinePoolState.BootstrapObject.GetResourceVersion()).To(Equal(gotBootstrapObject.GetResourceVersion()))
  2691  						}
  2692  					}
  2693  
  2694  					// Compare InfrastructureMachinePoolObject.
  2695  					gotInfrastructureMachinePoolObjectRef := gotMachinePool.Spec.Template.Spec.InfrastructureRef
  2696  					gotInfrastructureMachinePoolObject := unstructured.Unstructured{}
  2697  					gotInfrastructureMachinePoolObject.SetKind(gotInfrastructureMachinePoolObjectRef.Kind)
  2698  					gotInfrastructureMachinePoolObject.SetAPIVersion(gotInfrastructureMachinePoolObjectRef.APIVersion)
  2699  
  2700  					err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2701  						Namespace: gotInfrastructureMachinePoolObjectRef.Namespace,
  2702  						Name:      gotInfrastructureMachinePoolObjectRef.Name,
  2703  					}, &gotInfrastructureMachinePoolObject)
  2704  
  2705  					g.Expect(err).ToNot(HaveOccurred())
  2706  
  2707  					g.Expect(&gotInfrastructureMachinePoolObject).To(EqualObject(wantMachinePoolState.InfrastructureMachinePoolObject, IgnoreAutogeneratedMetadata, IgnoreNameGenerated))
  2708  
  2709  					// Check InfrastructureMachinePoolObject update.
  2710  					if currentMachinePoolState != nil && currentMachinePoolState.InfrastructureMachinePoolObject != nil {
  2711  						if tt.wantInfrastructureMachinePoolObjectUpdate[gotMachinePool.Name] {
  2712  							g.Expect(currentMachinePoolState.InfrastructureMachinePoolObject.GetResourceVersion()).ToNot(Equal(gotInfrastructureMachinePoolObject.GetResourceVersion()))
  2713  						} else {
  2714  							g.Expect(currentMachinePoolState.InfrastructureMachinePoolObject.GetResourceVersion()).To(Equal(gotInfrastructureMachinePoolObject.GetResourceVersion()))
  2715  						}
  2716  					}
  2717  				}
  2718  			}
  2719  		})
  2720  	}
  2721  }
  2722  
  2723  func TestReconcileMachinePoolsCleanup(t *testing.T) {
  2724  	infrastructureMachinePool1 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-1").Build()
  2725  	bootstrapConfig1 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-1").Build()
  2726  	mp1 := newFakeMachinePoolTopologyState("mp-1", infrastructureMachinePool1, bootstrapConfig1)
  2727  
  2728  	t.Run("cleanup InfrastructureMachinePool and BootstrapConfig in case of errors", func(t *testing.T) {
  2729  		g := NewWithT(t)
  2730  
  2731  		// Create namespace and modify input to have correct namespace set
  2732  		namespace, err := env.CreateNamespace(ctx, "reconcile-machine-pools")
  2733  		g.Expect(err).ToNot(HaveOccurred())
  2734  		mp1 = prepareMachinePoolState(mp1, namespace.GetName())
  2735  
  2736  		s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build())
  2737  		s.Current.MachinePools = map[string]*scope.MachinePoolState{}
  2738  		s.Desired = &scope.ClusterState{
  2739  			MachinePools: map[string]*scope.MachinePoolState{
  2740  				mp1.Object.Name: mp1,
  2741  			},
  2742  		}
  2743  
  2744  		// Force mp creation to fail
  2745  		s.Desired.MachinePools[mp1.Object.Name].Object.Namespace = "do-not-exist"
  2746  
  2747  		r := Reconciler{
  2748  			Client:             env.GetClient(),
  2749  			APIReader:          env.GetAPIReader(),
  2750  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  2751  			recorder:           env.GetEventRecorderFor("test"),
  2752  		}
  2753  		err = r.reconcileMachinePools(ctx, s)
  2754  		g.Expect(err).To(HaveOccurred())
  2755  
  2756  		gotBootstrapObjectRef := mp1.Object.Spec.Template.Spec.Bootstrap.ConfigRef
  2757  		gotBootstrapObject := unstructured.Unstructured{}
  2758  		gotBootstrapObject.SetKind(gotBootstrapObjectRef.Kind)
  2759  		gotBootstrapObject.SetAPIVersion(gotBootstrapObjectRef.APIVersion)
  2760  
  2761  		err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2762  			Namespace: gotBootstrapObjectRef.Namespace,
  2763  			Name:      gotBootstrapObjectRef.Name,
  2764  		}, &gotBootstrapObject)
  2765  
  2766  		g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
  2767  
  2768  		gotInfrastructureMachinePoolObjectRef := mp1.Object.Spec.Template.Spec.InfrastructureRef
  2769  		gotInfrastructureMachinePoolObject := unstructured.Unstructured{}
  2770  		gotInfrastructureMachinePoolObject.SetKind(gotInfrastructureMachinePoolObjectRef.Kind)
  2771  		gotInfrastructureMachinePoolObject.SetAPIVersion(gotInfrastructureMachinePoolObjectRef.APIVersion)
  2772  
  2773  		err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2774  			Namespace: gotInfrastructureMachinePoolObjectRef.Namespace,
  2775  			Name:      gotInfrastructureMachinePoolObjectRef.Name,
  2776  		}, &gotInfrastructureMachinePoolObject)
  2777  
  2778  		g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
  2779  	})
  2780  }
  2781  
  2782  // TestReconcileReferencedObjectSequences tests multiple subsequent calls to reconcileReferencedObject
  2783  // for a control-plane object to verify that the objects are reconciled as expected by tracking managed fields correctly.
  2784  // NOTE: by Extension this tests validates managed field handling in mergePatches, and thus its usage in other parts of the
  2785  // codebase.
  2786  func TestReconcileReferencedObjectSequences(t *testing.T) {
  2787  	// g := NewWithT(t)
  2788  	// Write the config file to access the test env for debugging.
  2789  	// g.Expect(os.WriteFile("test.conf", kubeconfig.FromEnvTestConfig(env.Config, &clusterv1.Cluster{
  2790  	// 	ObjectMeta: metav1.ObjectMeta{Name: "test"},
  2791  	// }), 0777)).To(Succeed())
  2792  
  2793  	type object struct {
  2794  		spec map[string]interface{}
  2795  	}
  2796  
  2797  	type externalStep struct {
  2798  		name string
  2799  		// object is the state of the control-plane object after an external modification.
  2800  		object object
  2801  	}
  2802  	type reconcileStep struct {
  2803  		name string
  2804  		// desired is the desired control-plane object handed over to reconcileReferencedObject.
  2805  		desired object
  2806  		// want is the expected control-plane object after calling reconcileReferencedObject.
  2807  		want        object
  2808  		wantCreated bool
  2809  	}
  2810  
  2811  	tests := []struct {
  2812  		name           string
  2813  		reconcileSteps []interface{}
  2814  	}{
  2815  		{
  2816  			name: "Should drop nested field",
  2817  			// Note: This test verifies that reconcileReferencedObject treats changes to fields existing in templates as authoritative
  2818  			// and most specifically it verifies that when a field in a template is deleted, it gets deleted
  2819  			// from the generated object (and it is not being treated as instance specific value).
  2820  			reconcileSteps: []interface{}{
  2821  				reconcileStep{
  2822  					name: "Initially reconcile KCP",
  2823  					desired: object{
  2824  						spec: map[string]interface{}{
  2825  							"kubeadmConfigSpec": map[string]interface{}{
  2826  								"clusterConfiguration": map[string]interface{}{
  2827  									"controllerManager": map[string]interface{}{
  2828  										"extraArgs": map[string]interface{}{
  2829  											"v": "4",
  2830  										},
  2831  									},
  2832  								},
  2833  							},
  2834  						},
  2835  					},
  2836  					want: object{
  2837  						spec: map[string]interface{}{
  2838  							"kubeadmConfigSpec": map[string]interface{}{
  2839  								"clusterConfiguration": map[string]interface{}{
  2840  									"controllerManager": map[string]interface{}{
  2841  										"extraArgs": map[string]interface{}{
  2842  											"v": "4",
  2843  										},
  2844  									},
  2845  								},
  2846  							},
  2847  						},
  2848  					},
  2849  					wantCreated: true,
  2850  				},
  2851  				reconcileStep{
  2852  					name: "Drop v",
  2853  					desired: object{
  2854  						spec: nil,
  2855  					},
  2856  					want: object{
  2857  						spec: nil,
  2858  					},
  2859  				},
  2860  			},
  2861  		},
  2862  		{
  2863  			name: "Should drop label",
  2864  			// Note: This test verifies that reconcileReferencedObject treats changes to fields existing in templates as authoritative
  2865  			// and most specifically it verifies that when a template label is deleted, it gets deleted
  2866  			// from the generated object (and it is not being treated as instance specific value).
  2867  			reconcileSteps: []interface{}{
  2868  				reconcileStep{
  2869  					name: "Initially reconcile KCP",
  2870  					desired: object{
  2871  						spec: map[string]interface{}{
  2872  							"machineTemplate": map[string]interface{}{
  2873  								"metadata": map[string]interface{}{
  2874  									"labels": map[string]interface{}{
  2875  										"label.with.dots/owned": "true",
  2876  										"anotherLabel":          "true",
  2877  									},
  2878  								},
  2879  							},
  2880  						},
  2881  					},
  2882  					want: object{
  2883  						spec: map[string]interface{}{
  2884  							"machineTemplate": map[string]interface{}{
  2885  								"metadata": map[string]interface{}{
  2886  									"labels": map[string]interface{}{
  2887  										"label.with.dots/owned": "true",
  2888  										"anotherLabel":          "true",
  2889  									},
  2890  								},
  2891  							},
  2892  						},
  2893  					},
  2894  					wantCreated: true,
  2895  				},
  2896  				reconcileStep{
  2897  					name: "Drop the label with dots",
  2898  					desired: object{
  2899  						spec: map[string]interface{}{
  2900  							"machineTemplate": map[string]interface{}{
  2901  								"metadata": map[string]interface{}{
  2902  									"labels": map[string]interface{}{
  2903  										// label.with.dots/owned has been removed by e.g a change in Cluster.Topology.ControlPlane.Labels.
  2904  										"anotherLabel": "true",
  2905  									},
  2906  								},
  2907  							},
  2908  						},
  2909  					},
  2910  					want: object{
  2911  						spec: map[string]interface{}{
  2912  							"machineTemplate": map[string]interface{}{
  2913  								"metadata": map[string]interface{}{
  2914  									"labels": map[string]interface{}{
  2915  										// Reconcile to drop label.with.dots/owned label.
  2916  										"anotherLabel": "true",
  2917  									},
  2918  								},
  2919  							},
  2920  						},
  2921  					},
  2922  				},
  2923  			},
  2924  		},
  2925  		{
  2926  			name: "Should enforce field",
  2927  			// Note: This test verifies that reconcileReferencedObject treats changes to fields existing in templates as authoritative
  2928  			// by reverting user changes to a manged field.
  2929  			reconcileSteps: []interface{}{
  2930  				reconcileStep{
  2931  					name: "Initially reconcile",
  2932  					desired: object{
  2933  						spec: map[string]interface{}{
  2934  							"foo": "ccValue",
  2935  						},
  2936  					},
  2937  					want: object{
  2938  						spec: map[string]interface{}{
  2939  							"foo": "ccValue",
  2940  						},
  2941  					},
  2942  					wantCreated: true,
  2943  				},
  2944  				externalStep{
  2945  					name: "User changes value",
  2946  					object: object{
  2947  						spec: map[string]interface{}{
  2948  							"foo": "userValue",
  2949  						},
  2950  					},
  2951  				},
  2952  				reconcileStep{
  2953  					name: "Reconcile overwrites value",
  2954  					desired: object{
  2955  						spec: map[string]interface{}{
  2956  							// ClusterClass still proposing the old value.
  2957  							"foo": "ccValue",
  2958  						},
  2959  					},
  2960  					want: object{
  2961  						spec: map[string]interface{}{
  2962  							// Reconcile to restore the old value.
  2963  							"foo": "ccValue",
  2964  						},
  2965  					},
  2966  				},
  2967  			},
  2968  		},
  2969  		{
  2970  			name: "Should preserve user-defined field while dropping managed field",
  2971  			// Note: This test verifies that Topology treats changes to fields existing in templates as authoritative
  2972  			// but allows setting additional/instance-specific values.
  2973  			reconcileSteps: []interface{}{
  2974  				reconcileStep{
  2975  					name: "Initially reconcile KCP",
  2976  					desired: object{
  2977  						spec: map[string]interface{}{
  2978  							"kubeadmConfigSpec": map[string]interface{}{
  2979  								"clusterConfiguration": map[string]interface{}{
  2980  									"controllerManager": map[string]interface{}{
  2981  										"extraArgs": map[string]interface{}{
  2982  											"v": "4",
  2983  										},
  2984  									},
  2985  								},
  2986  							},
  2987  						},
  2988  					},
  2989  					want: object{
  2990  						spec: map[string]interface{}{
  2991  							"kubeadmConfigSpec": map[string]interface{}{
  2992  								"clusterConfiguration": map[string]interface{}{
  2993  									"controllerManager": map[string]interface{}{
  2994  										"extraArgs": map[string]interface{}{
  2995  											"v": "4",
  2996  										},
  2997  									},
  2998  								},
  2999  							},
  3000  						},
  3001  					},
  3002  					wantCreated: true,
  3003  				},
  3004  				externalStep{
  3005  					name: "User adds an additional extraArg",
  3006  					object: object{
  3007  						spec: map[string]interface{}{
  3008  							"kubeadmConfigSpec": map[string]interface{}{
  3009  								"clusterConfiguration": map[string]interface{}{
  3010  									"controllerManager": map[string]interface{}{
  3011  										"extraArgs": map[string]interface{}{
  3012  											// User adds enable-garbage-collector.
  3013  											"enable-garbage-collector": "true",
  3014  										},
  3015  									},
  3016  								},
  3017  							},
  3018  						},
  3019  					},
  3020  				},
  3021  				reconcileStep{
  3022  					name: "Previously set extraArgs is dropped from KCP, user-specified field is preserved.",
  3023  					desired: object{
  3024  						spec: map[string]interface{}{
  3025  							"kubeadmConfigSpec": map[string]interface{}{
  3026  								"clusterConfiguration": map[string]interface{}{},
  3027  							},
  3028  						},
  3029  					},
  3030  					want: object{
  3031  						spec: map[string]interface{}{
  3032  							"kubeadmConfigSpec": map[string]interface{}{
  3033  								"clusterConfiguration": map[string]interface{}{
  3034  									"controllerManager": map[string]interface{}{
  3035  										"extraArgs": map[string]interface{}{
  3036  											// Reconcile to drop v field,
  3037  											// while preserving user-defined enable-garbage-collector field.
  3038  											"enable-garbage-collector": "true",
  3039  										},
  3040  									},
  3041  								},
  3042  							},
  3043  						},
  3044  					},
  3045  				},
  3046  			},
  3047  		},
  3048  		{
  3049  			name: "Should preserve user-defined object field while dropping managed fields",
  3050  			// Note: This test verifies that reconcileReferencedObject treats changes to fields existing in templates as authoritative
  3051  			// but allows setting additional/instance-specific values.
  3052  			reconcileSteps: []interface{}{
  3053  				reconcileStep{
  3054  					name: "Initially reconcile",
  3055  					desired: object{
  3056  						spec: map[string]interface{}{
  3057  							"machineTemplate": map[string]interface{}{},
  3058  						},
  3059  					},
  3060  					want: object{
  3061  						spec: map[string]interface{}{
  3062  							"machineTemplate": map[string]interface{}{},
  3063  						},
  3064  					},
  3065  					wantCreated: true,
  3066  				},
  3067  				externalStep{
  3068  					name: "User adds an additional object",
  3069  					object: object{
  3070  						spec: map[string]interface{}{
  3071  							"machineTemplate": map[string]interface{}{
  3072  								"infrastructureRef": map[string]interface{}{
  3073  									"apiVersion": "foo/v1alpha1",
  3074  									"kind":       "Foo",
  3075  								},
  3076  							},
  3077  						},
  3078  					},
  3079  				},
  3080  				reconcileStep{
  3081  					name: "ClusterClass starts having an opinion about some fields",
  3082  					desired: object{
  3083  						spec: map[string]interface{}{
  3084  							"machineTemplate": map[string]interface{}{
  3085  								"metadata": map[string]interface{}{
  3086  									"labels": map[string]interface{}{
  3087  										"foo": "foo",
  3088  									},
  3089  								},
  3090  								"nodeDeletionTimeout": "10m",
  3091  							},
  3092  						},
  3093  					},
  3094  					want: object{
  3095  						spec: map[string]interface{}{
  3096  							"machineTemplate": map[string]interface{}{
  3097  								// User fields are preserved.
  3098  								"infrastructureRef": map[string]interface{}{
  3099  									"apiVersion": "foo/v1alpha1",
  3100  									"kind":       "Foo",
  3101  								},
  3102  								// ClusterClass authoritative fields are added.
  3103  								"metadata": map[string]interface{}{
  3104  									"labels": map[string]interface{}{
  3105  										"foo": "foo",
  3106  									},
  3107  								},
  3108  								"nodeDeletionTimeout": "10m",
  3109  							},
  3110  						},
  3111  					},
  3112  				},
  3113  				reconcileStep{
  3114  					name: "ClusterClass stops having an opinion on the field",
  3115  					desired: object{
  3116  						spec: map[string]interface{}{
  3117  							"machineTemplate": map[string]interface{}{
  3118  								"metadata": map[string]interface{}{
  3119  									"labels": map[string]interface{}{
  3120  										"foo": "foo",
  3121  									},
  3122  								},
  3123  								// clusterClassField has been removed by e.g a change in ClusterClass (and extraArgs with it).
  3124  							},
  3125  						},
  3126  					},
  3127  					want: object{
  3128  						spec: map[string]interface{}{
  3129  							"machineTemplate": map[string]interface{}{
  3130  								// Reconcile to drop clusterClassField,
  3131  								// while preserving user-defined field and clusterClassField.
  3132  								"infrastructureRef": map[string]interface{}{
  3133  									"apiVersion": "foo/v1alpha1",
  3134  									"kind":       "Foo",
  3135  								},
  3136  								"metadata": map[string]interface{}{
  3137  									"labels": map[string]interface{}{
  3138  										"foo": "foo",
  3139  									},
  3140  								},
  3141  							},
  3142  						},
  3143  					},
  3144  				},
  3145  				reconcileStep{
  3146  					name: "ClusterClass stops having an opinion on the object",
  3147  					desired: object{
  3148  						spec: map[string]interface{}{
  3149  							"machineTemplate": map[string]interface{}{
  3150  								// clusterClassObject has been removed by e.g a change in ClusterClass (and extraArgs with it).
  3151  							},
  3152  						},
  3153  					},
  3154  					want: object{
  3155  						spec: map[string]interface{}{
  3156  							"machineTemplate": map[string]interface{}{
  3157  								// Reconcile to drop clusterClassObject,
  3158  								// while preserving user-defined field.
  3159  								"infrastructureRef": map[string]interface{}{
  3160  									"apiVersion": "foo/v1alpha1",
  3161  									"kind":       "Foo",
  3162  								},
  3163  							},
  3164  						},
  3165  					},
  3166  				},
  3167  			},
  3168  		},
  3169  	}
  3170  
  3171  	for _, tt := range tests {
  3172  		t.Run(tt.name, func(t *testing.T) {
  3173  			g := NewWithT(t)
  3174  
  3175  			// Create namespace and modify input to have correct namespace set
  3176  			namespace, err := env.CreateNamespace(ctx, "reconcile-ref-obj-seq")
  3177  			g.Expect(err).ToNot(HaveOccurred())
  3178  
  3179  			r := Reconciler{
  3180  				Client:             env,
  3181  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  3182  				recorder:           env.GetEventRecorderFor("test"),
  3183  			}
  3184  
  3185  			s := scope.New(&clusterv1.Cluster{})
  3186  			s.Blueprint = &scope.ClusterBlueprint{
  3187  				ClusterClass: &clusterv1.ClusterClass{},
  3188  			}
  3189  
  3190  			for i, step := range tt.reconcileSteps {
  3191  				var currentControlPlane *unstructured.Unstructured
  3192  
  3193  				// Get current ControlPlane (on later steps).
  3194  				if i > 0 {
  3195  					currentControlPlane = &unstructured.Unstructured{
  3196  						Object: map[string]interface{}{
  3197  							"kind":       builder.TestControlPlaneKind,
  3198  							"apiVersion": builder.ControlPlaneGroupVersion.String(),
  3199  						},
  3200  					}
  3201  					g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: namespace.GetName(), Name: "my-cluster"}, currentControlPlane)).To(Succeed())
  3202  				}
  3203  
  3204  				if step, ok := step.(externalStep); ok {
  3205  					// This is a user step, so let's just update the object using SSA.
  3206  					obj := &unstructured.Unstructured{
  3207  						Object: map[string]interface{}{
  3208  							"kind":       builder.TestControlPlaneKind,
  3209  							"apiVersion": builder.ControlPlaneGroupVersion.String(),
  3210  							"metadata": map[string]interface{}{
  3211  								"name":      "my-cluster",
  3212  								"namespace": namespace.GetName(),
  3213  							},
  3214  							"spec": step.object.spec,
  3215  						},
  3216  					}
  3217  					err := env.PatchAndWait(ctx, obj, client.FieldOwner("other-controller"), client.ForceOwnership)
  3218  					g.Expect(err).ToNot(HaveOccurred())
  3219  					continue
  3220  				}
  3221  
  3222  				if step, ok := step.(reconcileStep); ok {
  3223  					// This is a reconcile step, so let's execute a reconcile and then validate the result.
  3224  
  3225  					// Set the current control plane.
  3226  					s.Current.ControlPlane = &scope.ControlPlaneState{
  3227  						Object: currentControlPlane,
  3228  					}
  3229  					// Set the desired control plane.
  3230  					s.Desired = &scope.ClusterState{
  3231  						ControlPlane: &scope.ControlPlaneState{
  3232  							Object: &unstructured.Unstructured{
  3233  								Object: map[string]interface{}{
  3234  									"kind":       builder.TestControlPlaneKind,
  3235  									"apiVersion": builder.ControlPlaneGroupVersion.String(),
  3236  									"metadata": map[string]interface{}{
  3237  										"name":      "my-cluster",
  3238  										"namespace": namespace.GetName(),
  3239  									},
  3240  								},
  3241  							},
  3242  						},
  3243  					}
  3244  					if step.desired.spec != nil {
  3245  						s.Desired.ControlPlane.Object.Object["spec"] = step.desired.spec
  3246  					}
  3247  
  3248  					// Execute a reconcile
  3249  					created, err := r.reconcileReferencedObject(ctx, reconcileReferencedObjectInput{
  3250  						cluster: s.Current.Cluster,
  3251  						current: s.Current.ControlPlane.Object,
  3252  						desired: s.Desired.ControlPlane.Object,
  3253  					})
  3254  					g.Expect(err).ToNot(HaveOccurred())
  3255  					g.Expect(created).To(Equal(step.wantCreated))
  3256  
  3257  					// Build the object for comparison.
  3258  					want := &unstructured.Unstructured{
  3259  						Object: map[string]interface{}{
  3260  							"kind":       builder.TestControlPlaneKind,
  3261  							"apiVersion": builder.ControlPlaneGroupVersion.String(),
  3262  							"metadata": map[string]interface{}{
  3263  								"name":      "my-cluster",
  3264  								"namespace": namespace.GetName(),
  3265  							},
  3266  						},
  3267  					}
  3268  					if step.want.spec != nil {
  3269  						want.Object["spec"] = step.want.spec
  3270  					}
  3271  
  3272  					// Get the reconciled object.
  3273  					got := want.DeepCopy() // this is required otherwise Get will modify want
  3274  					g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: namespace.GetName(), Name: "my-cluster"}, got)).To(Succeed())
  3275  
  3276  					// Compare want with got.
  3277  					// Ignore .metadata.resourceVersion and .metadata.annotations as we don't care about them in this test.
  3278  					unstructured.RemoveNestedField(got.Object, "metadata", "resourceVersion")
  3279  					unstructured.RemoveNestedField(got.Object, "metadata", "annotations")
  3280  					unstructured.RemoveNestedField(got.Object, "metadata", "creationTimestamp")
  3281  					unstructured.RemoveNestedField(got.Object, "metadata", "generation")
  3282  					unstructured.RemoveNestedField(got.Object, "metadata", "managedFields")
  3283  					unstructured.RemoveNestedField(got.Object, "metadata", "uid")
  3284  					unstructured.RemoveNestedField(got.Object, "metadata", "selfLink")
  3285  					g.Expect(got).To(EqualObject(want), fmt.Sprintf("Step %q failed: %v", step.name, cmp.Diff(want, got)))
  3286  					continue
  3287  				}
  3288  
  3289  				panic(fmt.Errorf("unknown step type %T", step))
  3290  			}
  3291  		})
  3292  	}
  3293  }
  3294  
  3295  func TestReconcileMachineDeploymentMachineHealthCheck(t *testing.T) {
  3296  	md := builder.MachineDeployment(metav1.NamespaceDefault, "md-1").WithLabels(
  3297  		map[string]string{
  3298  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "machine-deployment-one",
  3299  		}).
  3300  		Build()
  3301  
  3302  	maxUnhealthy := intstr.Parse("45%")
  3303  	mhcBuilder := builder.MachineHealthCheck(metav1.NamespaceDefault, "md-1").
  3304  		WithSelector(*selectors.ForMachineDeploymentMHC(md)).
  3305  		WithUnhealthyConditions([]clusterv1.UnhealthyCondition{
  3306  			{
  3307  				Type:    corev1.NodeReady,
  3308  				Status:  corev1.ConditionUnknown,
  3309  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  3310  			},
  3311  		}).
  3312  		WithClusterName("cluster1")
  3313  
  3314  	infrastructureMachineTemplate := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-1").Build()
  3315  	bootstrapTemplate := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-1").Build()
  3316  
  3317  	tests := []struct {
  3318  		name    string
  3319  		current []*scope.MachineDeploymentState
  3320  		desired []*scope.MachineDeploymentState
  3321  		want    []*clusterv1.MachineHealthCheck
  3322  	}{
  3323  		{
  3324  			name:    "Create a MachineHealthCheck if the MachineDeployment is being created",
  3325  			current: nil,
  3326  			desired: []*scope.MachineDeploymentState{
  3327  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3328  					mhcBuilder.DeepCopy().Build()),
  3329  			},
  3330  			want: []*clusterv1.MachineHealthCheck{
  3331  				mhcBuilder.DeepCopy().Build()},
  3332  		},
  3333  		{
  3334  			name: "Create a new MachineHealthCheck if the MachineDeployment is modified to include one",
  3335  			current: []*scope.MachineDeploymentState{
  3336  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3337  					nil)},
  3338  			// MHC is added in the desired state of the MachineDeployment
  3339  			desired: []*scope.MachineDeploymentState{
  3340  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3341  					mhcBuilder.DeepCopy().Build()),
  3342  			},
  3343  			want: []*clusterv1.MachineHealthCheck{
  3344  				mhcBuilder.DeepCopy().Build()}},
  3345  		{
  3346  			name: "Update MachineHealthCheck spec adding a field if the spec adds a field",
  3347  			current: []*scope.MachineDeploymentState{
  3348  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3349  					mhcBuilder.DeepCopy().Build()),
  3350  			},
  3351  			desired: []*scope.MachineDeploymentState{
  3352  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3353  					mhcBuilder.DeepCopy().WithMaxUnhealthy(&maxUnhealthy).Build())},
  3354  			want: []*clusterv1.MachineHealthCheck{
  3355  				mhcBuilder.DeepCopy().
  3356  					WithMaxUnhealthy(&maxUnhealthy).
  3357  					Build()},
  3358  		},
  3359  		{
  3360  			name: "Update MachineHealthCheck spec removing a field if the spec removes a field",
  3361  			current: []*scope.MachineDeploymentState{
  3362  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3363  					mhcBuilder.DeepCopy().WithMaxUnhealthy(&maxUnhealthy).Build()),
  3364  			},
  3365  			desired: []*scope.MachineDeploymentState{
  3366  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3367  					mhcBuilder.DeepCopy().Build()),
  3368  			},
  3369  			want: []*clusterv1.MachineHealthCheck{
  3370  				mhcBuilder.DeepCopy().Build(),
  3371  			},
  3372  		},
  3373  		{
  3374  			name: "Delete MachineHealthCheck spec if the MachineDeployment is modified to remove an existing one",
  3375  			current: []*scope.MachineDeploymentState{
  3376  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3377  					mhcBuilder.DeepCopy().Build()),
  3378  			},
  3379  			desired: []*scope.MachineDeploymentState{newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate, nil)},
  3380  			want:    []*clusterv1.MachineHealthCheck{},
  3381  		},
  3382  		{
  3383  			name: "Delete MachineHealthCheck spec if the MachineDeployment is deleted",
  3384  			current: []*scope.MachineDeploymentState{
  3385  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3386  					mhcBuilder.DeepCopy().Build()),
  3387  			},
  3388  			desired: []*scope.MachineDeploymentState{},
  3389  			want:    []*clusterv1.MachineHealthCheck{},
  3390  		},
  3391  	}
  3392  	for _, tt := range tests {
  3393  		t.Run(tt.name, func(t *testing.T) {
  3394  			g := NewWithT(t)
  3395  
  3396  			// Create namespace and modify input to have correct namespace set
  3397  			namespace, err := env.CreateNamespace(ctx, "reconcile-md-mhc")
  3398  			g.Expect(err).ToNot(HaveOccurred())
  3399  			for i, s := range tt.current {
  3400  				tt.current[i] = prepareMachineDeploymentState(s, namespace.GetName())
  3401  			}
  3402  			for i, s := range tt.desired {
  3403  				tt.desired[i] = prepareMachineDeploymentState(s, namespace.GetName())
  3404  			}
  3405  			for i, mhc := range tt.want {
  3406  				tt.want[i] = mhc.DeepCopy()
  3407  				tt.want[i].SetNamespace(namespace.GetName())
  3408  			}
  3409  
  3410  			uidsByName := map[string]types.UID{}
  3411  
  3412  			for _, mdts := range tt.current {
  3413  				g.Expect(env.PatchAndWait(ctx, mdts.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  3414  				g.Expect(env.PatchAndWait(ctx, mdts.InfrastructureMachineTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  3415  				g.Expect(env.PatchAndWait(ctx, mdts.BootstrapTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  3416  
  3417  				uidsByName[mdts.Object.Name] = mdts.Object.GetUID()
  3418  
  3419  				if mdts.MachineHealthCheck != nil {
  3420  					for i, ref := range mdts.MachineHealthCheck.OwnerReferences {
  3421  						ref.UID = mdts.Object.GetUID()
  3422  						mdts.MachineHealthCheck.OwnerReferences[i] = ref
  3423  					}
  3424  					g.Expect(env.PatchAndWait(ctx, mdts.MachineHealthCheck, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  3425  				}
  3426  			}
  3427  
  3428  			// copy over ownerReference for desired MachineHealthCheck
  3429  			for _, mdts := range tt.desired {
  3430  				if mdts.MachineHealthCheck != nil {
  3431  					for i, ref := range mdts.MachineHealthCheck.OwnerReferences {
  3432  						if uid, ok := uidsByName[ref.Name]; ok {
  3433  							ref.UID = uid
  3434  							mdts.MachineHealthCheck.OwnerReferences[i] = ref
  3435  						}
  3436  					}
  3437  				}
  3438  			}
  3439  
  3440  			currentMachineDeploymentStates := toMachineDeploymentTopologyStateMap(tt.current)
  3441  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build())
  3442  			s.Current.MachineDeployments = currentMachineDeploymentStates
  3443  
  3444  			s.Desired = &scope.ClusterState{MachineDeployments: toMachineDeploymentTopologyStateMap(tt.desired)}
  3445  
  3446  			r := Reconciler{
  3447  				Client:             env.GetClient(),
  3448  				APIReader:          env.GetAPIReader(),
  3449  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  3450  				recorder:           env.GetEventRecorderFor("test"),
  3451  			}
  3452  
  3453  			err = r.reconcileMachineDeployments(ctx, s)
  3454  			g.Expect(err).ToNot(HaveOccurred())
  3455  
  3456  			var gotMachineHealthCheckList clusterv1.MachineHealthCheckList
  3457  			g.Expect(env.GetAPIReader().List(ctx, &gotMachineHealthCheckList, &client.ListOptions{Namespace: namespace.GetName()})).To(Succeed())
  3458  			g.Expect(gotMachineHealthCheckList.Items).To(HaveLen(len(tt.want)))
  3459  
  3460  			g.Expect(tt.want).To(HaveLen(len(gotMachineHealthCheckList.Items)))
  3461  
  3462  			for _, wantMHCOrig := range tt.want {
  3463  				wantMHC := wantMHCOrig.DeepCopy()
  3464  				g.Expect((&webhooks.MachineHealthCheck{}).Default(ctx, wantMHC)).To(Succeed())
  3465  
  3466  				for _, gotMHC := range gotMachineHealthCheckList.Items {
  3467  					if wantMHC.Name == gotMHC.Name {
  3468  						actual := gotMHC
  3469  						// unset UID because it got generated
  3470  						for i, ref := range actual.OwnerReferences {
  3471  							ref.UID = ""
  3472  							actual.OwnerReferences[i] = ref
  3473  						}
  3474  						g.Expect(wantMHC).To(EqualObject(&actual, IgnoreAutogeneratedMetadata))
  3475  					}
  3476  				}
  3477  			}
  3478  		})
  3479  	}
  3480  }
  3481  
  3482  func TestReconcileState(t *testing.T) {
  3483  	t.Run("Cluster get reconciled with infrastructure Ref only when reconcileInfrastructureCluster pass and reconcileControlPlane fails ", func(t *testing.T) {
  3484  		g := NewWithT(t)
  3485  
  3486  		currentCluster := builder.Cluster(metav1.NamespaceDefault, "cluster1").Build()
  3487  
  3488  		infrastructureCluster := builder.TestInfrastructureCluster(metav1.NamespaceDefault, "infrastructure-cluster1").Build()
  3489  		controlPlane := builder.TestControlPlane(metav1.NamespaceDefault, "controlplane-cluster1").Build()
  3490  		desiredCluster := builder.Cluster(metav1.NamespaceDefault, "cluster1").
  3491  			WithInfrastructureCluster(infrastructureCluster).
  3492  			WithControlPlane(controlPlane).
  3493  			Build()
  3494  
  3495  		// cluster requires a UID because reconcileClusterShim will create a cluster shim
  3496  		// which has the cluster set as Owner in an OwnerReference.
  3497  		// A valid OwnerReferences requires a uid.
  3498  		currentCluster.SetUID("foo")
  3499  
  3500  		// NOTE: it is ok to use create given that the Cluster are created by user.
  3501  		g.Expect(env.CreateAndWait(ctx, currentCluster)).To(Succeed())
  3502  
  3503  		s := scope.New(currentCluster)
  3504  		s.Blueprint = &scope.ClusterBlueprint{ClusterClass: &clusterv1.ClusterClass{}}
  3505  		s.Current.ControlPlane = &scope.ControlPlaneState{}
  3506  		s.Desired = &scope.ClusterState{Cluster: desiredCluster, InfrastructureCluster: infrastructureCluster, ControlPlane: &scope.ControlPlaneState{Object: controlPlane}}
  3507  
  3508  		// Create namespace and modify input to have correct namespace set
  3509  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster")
  3510  		g.Expect(err).ToNot(HaveOccurred())
  3511  		prepareControlPlaneState(g, s.Desired.ControlPlane, namespace.GetName())
  3512  
  3513  		// Force reconcile control plane to fail
  3514  		controlPlane.SetNamespace("do-not-exist")
  3515  
  3516  		r := Reconciler{
  3517  			Client:             env,
  3518  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  3519  			recorder:           env.GetEventRecorderFor("test"),
  3520  		}
  3521  		err = r.reconcileState(ctx, s)
  3522  		g.Expect(err).To(HaveOccurred())
  3523  
  3524  		got := currentCluster.DeepCopy()
  3525  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(currentCluster), got)
  3526  		g.Expect(err).ToNot(HaveOccurred())
  3527  
  3528  		g.Expect(got.Spec.InfrastructureRef).ToNot(BeNil())
  3529  		g.Expect(got.Spec.ControlPlaneRef).To(BeNil())
  3530  
  3531  		g.Expect(env.CleanupAndWait(ctx, infrastructureCluster, currentCluster)).To(Succeed())
  3532  	})
  3533  	t.Run("Cluster get reconciled with both infrastructure Ref and control plane ref when both reconcileInfrastructureCluster and reconcileControlPlane pass", func(t *testing.T) {
  3534  		g := NewWithT(t)
  3535  
  3536  		currentCluster := builder.Cluster(metav1.NamespaceDefault, "cluster1").Build()
  3537  
  3538  		infrastructureCluster := builder.TestInfrastructureCluster(metav1.NamespaceDefault, "infrastructure-cluster1").Build()
  3539  		controlPlane := builder.TestControlPlane(metav1.NamespaceDefault, "controlplane-cluster1").Build()
  3540  		desiredCluster := builder.Cluster(metav1.NamespaceDefault, "cluster1").
  3541  			WithInfrastructureCluster(infrastructureCluster).
  3542  			WithControlPlane(controlPlane).
  3543  			Build()
  3544  
  3545  		// cluster requires a UID because reconcileClusterShim will create a cluster shim
  3546  		// which has the cluster set as Owner in an OwnerReference.
  3547  		// A valid OwnerReferences requires a uid.
  3548  		currentCluster.SetUID("foo")
  3549  
  3550  		// NOTE: it is ok to use create given that the Cluster are created by user.
  3551  		g.Expect(env.CreateAndWait(ctx, currentCluster)).To(Succeed())
  3552  
  3553  		s := scope.New(currentCluster)
  3554  		s.Blueprint = &scope.ClusterBlueprint{ClusterClass: &clusterv1.ClusterClass{}}
  3555  		s.Current.ControlPlane = &scope.ControlPlaneState{}
  3556  		s.Desired = &scope.ClusterState{Cluster: desiredCluster, InfrastructureCluster: infrastructureCluster, ControlPlane: &scope.ControlPlaneState{Object: controlPlane}}
  3557  
  3558  		// Create namespace and modify input to have correct namespace set
  3559  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster")
  3560  		g.Expect(err).ToNot(HaveOccurred())
  3561  		prepareControlPlaneState(g, s.Desired.ControlPlane, namespace.GetName())
  3562  
  3563  		r := Reconciler{
  3564  			Client:             env,
  3565  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  3566  			recorder:           env.GetEventRecorderFor("test"),
  3567  		}
  3568  		err = r.reconcileState(ctx, s)
  3569  		g.Expect(err).ToNot(HaveOccurred())
  3570  
  3571  		got := currentCluster.DeepCopy()
  3572  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(currentCluster), got)
  3573  		g.Expect(err).ToNot(HaveOccurred())
  3574  
  3575  		g.Expect(got.Spec.InfrastructureRef).ToNot(BeNil())
  3576  		g.Expect(got.Spec.ControlPlaneRef).ToNot(BeNil())
  3577  
  3578  		g.Expect(env.CleanupAndWait(ctx, infrastructureCluster, controlPlane, currentCluster)).To(Succeed())
  3579  	})
  3580  	t.Run("Cluster does not get reconciled when reconcileControlPlane fails and infrastructure Ref is set", func(t *testing.T) {
  3581  		g := NewWithT(t)
  3582  
  3583  		infrastructureCluster := builder.TestInfrastructureCluster(metav1.NamespaceDefault, "infrastructure-cluster1").Build()
  3584  		controlPlane := builder.TestControlPlane(metav1.NamespaceDefault, "controlplane-cluster1").Build()
  3585  
  3586  		currentCluster := builder.Cluster(metav1.NamespaceDefault, "cluster1").
  3587  			WithInfrastructureCluster(infrastructureCluster).
  3588  			Build()
  3589  
  3590  		desiredCluster := builder.Cluster(metav1.NamespaceDefault, "cluster1").
  3591  			WithInfrastructureCluster(infrastructureCluster).
  3592  			WithControlPlane(controlPlane).
  3593  			Build()
  3594  
  3595  		// cluster requires a UID because reconcileClusterShim will create a cluster shim
  3596  		// which has the cluster set as Owner in an OwnerReference.
  3597  		// A valid OwnerReferences requires a uid.
  3598  		currentCluster.SetUID("foo")
  3599  
  3600  		// NOTE: it is ok to use create given that the Cluster are created by user.
  3601  		g.Expect(env.CreateAndWait(ctx, currentCluster)).To(Succeed())
  3602  
  3603  		s := scope.New(currentCluster)
  3604  		s.Blueprint = &scope.ClusterBlueprint{ClusterClass: &clusterv1.ClusterClass{}}
  3605  		s.Current.ControlPlane = &scope.ControlPlaneState{}
  3606  		s.Desired = &scope.ClusterState{Cluster: desiredCluster, InfrastructureCluster: infrastructureCluster, ControlPlane: &scope.ControlPlaneState{Object: controlPlane}}
  3607  
  3608  		// Create namespace and modify input to have correct namespace set
  3609  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster")
  3610  		g.Expect(err).ToNot(HaveOccurred())
  3611  		prepareControlPlaneState(g, s.Desired.ControlPlane, namespace.GetName())
  3612  
  3613  		// Force reconcile control plane to fail
  3614  		controlPlane.SetNamespace("do-not-exist")
  3615  
  3616  		r := Reconciler{
  3617  			Client:             env,
  3618  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  3619  			recorder:           env.GetEventRecorderFor("test"),
  3620  		}
  3621  		err = r.reconcileState(ctx, s)
  3622  		g.Expect(err).To(HaveOccurred())
  3623  
  3624  		got := currentCluster.DeepCopy()
  3625  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(currentCluster), got)
  3626  		g.Expect(err).ToNot(HaveOccurred())
  3627  
  3628  		g.Expect(got.Spec.InfrastructureRef).ToNot(BeNil())
  3629  		g.Expect(got.Spec.ControlPlaneRef).To(BeNil())
  3630  
  3631  		g.Expect(env.CleanupAndWait(ctx, infrastructureCluster, controlPlane, currentCluster)).To(Succeed())
  3632  	})
  3633  }
  3634  
  3635  func newFakeMachineDeploymentTopologyState(name string, infrastructureMachineTemplate, bootstrapTemplate *unstructured.Unstructured, machineHealthCheck *clusterv1.MachineHealthCheck) *scope.MachineDeploymentState {
  3636  	mdState := &scope.MachineDeploymentState{
  3637  		Object: builder.MachineDeployment(metav1.NamespaceDefault, name).
  3638  			WithInfrastructureTemplate(infrastructureMachineTemplate).
  3639  			WithBootstrapTemplate(bootstrapTemplate).
  3640  			WithLabels(map[string]string{
  3641  				clusterv1.ClusterTopologyMachineDeploymentNameLabel: name + "-topology",
  3642  				clusterv1.ClusterTopologyOwnedLabel:                 "",
  3643  			}).
  3644  			WithClusterName("cluster-1").
  3645  			WithReplicas(1).
  3646  			WithMinReadySeconds(1).
  3647  			Build(),
  3648  		InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  3649  		BootstrapTemplate:             bootstrapTemplate.DeepCopy(),
  3650  		MachineHealthCheck:            machineHealthCheck.DeepCopy(),
  3651  	}
  3652  
  3653  	scheme := runtime.NewScheme()
  3654  	_ = clusterv1.AddToScheme(scheme)
  3655  	if err := (&webhooks.MachineDeployment{}).
  3656  		Default(admission.NewContextWithRequest(ctx, admission.Request{}), mdState.Object); err != nil {
  3657  		panic(err)
  3658  	}
  3659  	return mdState
  3660  }
  3661  
  3662  func newFakeMachinePoolTopologyState(name string, infrastructureMachinePool, bootstrapObject *unstructured.Unstructured) *scope.MachinePoolState {
  3663  	mpState := &scope.MachinePoolState{
  3664  		Object: builder.MachinePool(metav1.NamespaceDefault, name).
  3665  			WithInfrastructure(infrastructureMachinePool).
  3666  			WithBootstrap(bootstrapObject).
  3667  			WithLabels(map[string]string{
  3668  				clusterv1.ClusterTopologyMachinePoolNameLabel: name + "-topology",
  3669  				clusterv1.ClusterTopologyOwnedLabel:           "",
  3670  			}).
  3671  			WithClusterName("cluster-1").
  3672  			WithReplicas(1).
  3673  			WithMinReadySeconds(1).
  3674  			Build(),
  3675  		InfrastructureMachinePoolObject: infrastructureMachinePool.DeepCopy(),
  3676  		BootstrapObject:                 bootstrapObject.DeepCopy(),
  3677  	}
  3678  
  3679  	return mpState
  3680  }
  3681  
  3682  func toMachineDeploymentTopologyStateMap(states []*scope.MachineDeploymentState) map[string]*scope.MachineDeploymentState {
  3683  	ret := map[string]*scope.MachineDeploymentState{}
  3684  	for _, state := range states {
  3685  		ret[state.Object.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel]] = state
  3686  	}
  3687  	return ret
  3688  }
  3689  
  3690  func toMachinePoolTopologyStateMap(states []*scope.MachinePoolState) map[string]*scope.MachinePoolState {
  3691  	ret := map[string]*scope.MachinePoolState{}
  3692  	for _, state := range states {
  3693  		ret[state.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]] = state
  3694  	}
  3695  	return ret
  3696  }
  3697  
  3698  func TestReconciler_reconcileMachineHealthCheck(t *testing.T) {
  3699  	// create a controlPlane object with enough information to be used as an OwnerReference for the MachineHealthCheck.
  3700  	cp := builder.ControlPlane(metav1.NamespaceDefault, "cp1").Build()
  3701  	mhcBuilder := builder.MachineHealthCheck(metav1.NamespaceDefault, "cp1").
  3702  		WithSelector(*selectors.ForControlPlaneMHC()).
  3703  		WithUnhealthyConditions([]clusterv1.UnhealthyCondition{
  3704  			{
  3705  				Type:    corev1.NodeReady,
  3706  				Status:  corev1.ConditionUnknown,
  3707  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  3708  			},
  3709  		}).
  3710  		WithClusterName("cluster1")
  3711  	tests := []struct {
  3712  		name    string
  3713  		current *clusterv1.MachineHealthCheck
  3714  		desired *clusterv1.MachineHealthCheck
  3715  		want    *clusterv1.MachineHealthCheck
  3716  		wantErr bool
  3717  	}{
  3718  		{
  3719  			name:    "Create a MachineHealthCheck",
  3720  			current: nil,
  3721  			desired: mhcBuilder.DeepCopy().Build(),
  3722  			want:    mhcBuilder.DeepCopy().Build(),
  3723  		},
  3724  		{
  3725  			name:    "Update a MachineHealthCheck with changes",
  3726  			current: mhcBuilder.DeepCopy().Build(),
  3727  			// update the unhealthy conditions in the MachineHealthCheck
  3728  			desired: mhcBuilder.DeepCopy().WithUnhealthyConditions([]clusterv1.UnhealthyCondition{
  3729  				{
  3730  					Type:    corev1.NodeReady,
  3731  					Status:  corev1.ConditionUnknown,
  3732  					Timeout: metav1.Duration{Duration: 1000 * time.Minute},
  3733  				},
  3734  			}).Build(),
  3735  			want: mhcBuilder.DeepCopy().WithUnhealthyConditions([]clusterv1.UnhealthyCondition{
  3736  				{
  3737  					Type:    corev1.NodeReady,
  3738  					Status:  corev1.ConditionUnknown,
  3739  					Timeout: metav1.Duration{Duration: 1000 * time.Minute},
  3740  				},
  3741  			}).Build(),
  3742  		},
  3743  		{
  3744  			name:    "Don't change a MachineHealthCheck with no difference between desired and current",
  3745  			current: mhcBuilder.DeepCopy().Build(),
  3746  			// update the unhealthy conditions in the MachineHealthCheck
  3747  			desired: mhcBuilder.DeepCopy().Build(),
  3748  			want:    mhcBuilder.DeepCopy().Build(),
  3749  		},
  3750  		{
  3751  			name:    "Delete a MachineHealthCheck",
  3752  			current: mhcBuilder.DeepCopy().Build(),
  3753  			// update the unhealthy conditions in the MachineHealthCheck
  3754  			desired: nil,
  3755  			want:    nil,
  3756  			wantErr: true,
  3757  		},
  3758  	}
  3759  	for _, tt := range tests {
  3760  		t.Run(tt.name, func(t *testing.T) {
  3761  			g := NewWithT(t)
  3762  			got := &clusterv1.MachineHealthCheck{}
  3763  
  3764  			// Create namespace
  3765  			namespace, err := env.CreateNamespace(ctx, "reconcile-mhc")
  3766  			// Create control plane
  3767  			g.Expect(err).ToNot(HaveOccurred())
  3768  			localCP := cp.DeepCopy()
  3769  			localCP.SetNamespace(namespace.GetName())
  3770  			g.Expect(env.CreateAndWait(ctx, localCP)).To(Succeed())
  3771  			// Modify test input and re-use control plane uid if necessary
  3772  			if tt.current != nil {
  3773  				tt.current = tt.current.DeepCopy()
  3774  				tt.current.SetNamespace(namespace.GetName())
  3775  			}
  3776  			if tt.desired != nil {
  3777  				tt.desired = tt.desired.DeepCopy()
  3778  				tt.desired.SetNamespace(namespace.GetName())
  3779  			}
  3780  			if tt.want != nil {
  3781  				tt.want = tt.want.DeepCopy()
  3782  				tt.want.SetNamespace(namespace.GetName())
  3783  				if len(tt.want.OwnerReferences) == 1 {
  3784  					tt.want.OwnerReferences[0].UID = localCP.GetUID()
  3785  				}
  3786  			}
  3787  
  3788  			r := Reconciler{
  3789  				Client:             env,
  3790  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  3791  				recorder:           env.GetEventRecorderFor("test"),
  3792  			}
  3793  			if tt.current != nil {
  3794  				g.Expect(env.CreateAndWait(ctx, tt.current)).To(Succeed())
  3795  			}
  3796  			if err := r.reconcileMachineHealthCheck(ctx, tt.current, tt.desired); err != nil {
  3797  				if !tt.wantErr {
  3798  					t.Errorf("reconcileMachineHealthCheck() error = %v, wantErr %v", err, tt.wantErr)
  3799  				}
  3800  			}
  3801  
  3802  			key := mhcBuilder.Build()
  3803  			key.SetNamespace(namespace.GetName())
  3804  			if err := env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(key), got); err != nil {
  3805  				if !tt.wantErr {
  3806  					t.Errorf("reconcileMachineHealthCheck() error = %v, wantErr %v", err, tt.wantErr)
  3807  				}
  3808  				if apierrors.IsNotFound(err) {
  3809  					got = nil
  3810  				}
  3811  			}
  3812  
  3813  			want := tt.want.DeepCopy()
  3814  			if want != nil {
  3815  				g.Expect((&webhooks.MachineHealthCheck{}).Default(ctx, want)).To(Succeed())
  3816  			}
  3817  
  3818  			g.Expect(got).To(EqualObject(want, IgnoreAutogeneratedMetadata, IgnorePaths{".kind", ".apiVersion"}))
  3819  		})
  3820  	}
  3821  }
  3822  
  3823  // prepareControlPlaneBluePrint deep-copies and returns the input scope and sets
  3824  // the given namespace to all relevant objects.
  3825  func prepareControlPlaneBluePrint(in *scope.ControlPlaneBlueprint, namespace string) *scope.ControlPlaneBlueprint {
  3826  	s := &scope.ControlPlaneBlueprint{}
  3827  	if in.InfrastructureMachineTemplate != nil {
  3828  		s.InfrastructureMachineTemplate = in.InfrastructureMachineTemplate.DeepCopy()
  3829  		if s.InfrastructureMachineTemplate.GetNamespace() == metav1.NamespaceDefault {
  3830  			s.InfrastructureMachineTemplate.SetNamespace(namespace)
  3831  		}
  3832  	}
  3833  	if in.MachineHealthCheck != nil {
  3834  		s.MachineHealthCheck = in.MachineHealthCheck.DeepCopy()
  3835  	}
  3836  	if in.Template != nil {
  3837  		s.Template = in.Template.DeepCopy()
  3838  		if s.Template.GetNamespace() == metav1.NamespaceDefault {
  3839  			s.Template.SetNamespace(namespace)
  3840  		}
  3841  	}
  3842  	return s
  3843  }
  3844  
  3845  // prepareControlPlaneState deep-copies and returns the input scope and sets
  3846  // the given namespace to all relevant objects.
  3847  func prepareControlPlaneState(g *WithT, in *scope.ControlPlaneState, namespace string) *scope.ControlPlaneState {
  3848  	s := &scope.ControlPlaneState{}
  3849  	if in.InfrastructureMachineTemplate != nil {
  3850  		s.InfrastructureMachineTemplate = in.InfrastructureMachineTemplate.DeepCopy()
  3851  		if s.InfrastructureMachineTemplate.GetNamespace() == metav1.NamespaceDefault {
  3852  			s.InfrastructureMachineTemplate.SetNamespace(namespace)
  3853  		}
  3854  	}
  3855  	if in.MachineHealthCheck != nil {
  3856  		s.MachineHealthCheck = in.MachineHealthCheck.DeepCopy()
  3857  		if s.MachineHealthCheck.GetNamespace() == metav1.NamespaceDefault {
  3858  			s.MachineHealthCheck.SetNamespace(namespace)
  3859  		}
  3860  	}
  3861  	if in.Object != nil {
  3862  		s.Object = in.Object.DeepCopy()
  3863  		if s.Object.GetNamespace() == metav1.NamespaceDefault {
  3864  			s.Object.SetNamespace(namespace)
  3865  		}
  3866  		if current, ok, err := unstructured.NestedString(s.Object.Object, "spec", "machineTemplate", "infrastructureRef", "namespace"); ok && err == nil && current == metav1.NamespaceDefault {
  3867  			g.Expect(unstructured.SetNestedField(s.Object.Object, namespace, "spec", "machineTemplate", "infrastructureRef", "namespace")).To(Succeed())
  3868  		}
  3869  	}
  3870  	return s
  3871  }
  3872  
  3873  // prepareMachineDeploymentState deep-copies and returns the input scope and sets
  3874  // the given namespace to all relevant objects.
  3875  func prepareMachineDeploymentState(in *scope.MachineDeploymentState, namespace string) *scope.MachineDeploymentState {
  3876  	s := &scope.MachineDeploymentState{}
  3877  	if in.BootstrapTemplate != nil {
  3878  		s.BootstrapTemplate = in.BootstrapTemplate.DeepCopy()
  3879  		if s.BootstrapTemplate.GetNamespace() == metav1.NamespaceDefault {
  3880  			s.BootstrapTemplate.SetNamespace(namespace)
  3881  		}
  3882  	}
  3883  	if in.InfrastructureMachineTemplate != nil {
  3884  		s.InfrastructureMachineTemplate = in.InfrastructureMachineTemplate.DeepCopy()
  3885  		if s.InfrastructureMachineTemplate.GetNamespace() == metav1.NamespaceDefault {
  3886  			s.InfrastructureMachineTemplate.SetNamespace(namespace)
  3887  		}
  3888  	}
  3889  	if in.MachineHealthCheck != nil {
  3890  		s.MachineHealthCheck = in.MachineHealthCheck.DeepCopy()
  3891  		if s.MachineHealthCheck.GetNamespace() == metav1.NamespaceDefault {
  3892  			s.MachineHealthCheck.SetNamespace(namespace)
  3893  		}
  3894  	}
  3895  	if in.Object != nil {
  3896  		s.Object = in.Object.DeepCopy()
  3897  		if s.Object.GetNamespace() == metav1.NamespaceDefault {
  3898  			s.Object.SetNamespace(namespace)
  3899  		}
  3900  		if s.Object.Spec.Template.Spec.Bootstrap.ConfigRef != nil && s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace == metav1.NamespaceDefault {
  3901  			s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace = namespace
  3902  		}
  3903  		if s.Object.Spec.Template.Spec.InfrastructureRef.Namespace == metav1.NamespaceDefault {
  3904  			s.Object.Spec.Template.Spec.InfrastructureRef.Namespace = namespace
  3905  		}
  3906  	}
  3907  	return s
  3908  }
  3909  
  3910  // prepareMachinePoolState deep-copies and returns the input scope and sets
  3911  // the given namespace to all relevant objects.
  3912  func prepareMachinePoolState(in *scope.MachinePoolState, namespace string) *scope.MachinePoolState {
  3913  	s := &scope.MachinePoolState{}
  3914  	if in.BootstrapObject != nil {
  3915  		s.BootstrapObject = in.BootstrapObject.DeepCopy()
  3916  		if s.BootstrapObject.GetNamespace() == metav1.NamespaceDefault {
  3917  			s.BootstrapObject.SetNamespace(namespace)
  3918  		}
  3919  	}
  3920  	if in.InfrastructureMachinePoolObject != nil {
  3921  		s.InfrastructureMachinePoolObject = in.InfrastructureMachinePoolObject.DeepCopy()
  3922  		if s.InfrastructureMachinePoolObject.GetNamespace() == metav1.NamespaceDefault {
  3923  			s.InfrastructureMachinePoolObject.SetNamespace(namespace)
  3924  		}
  3925  	}
  3926  	if in.Object != nil {
  3927  		s.Object = in.Object.DeepCopy()
  3928  		if s.Object.GetNamespace() == metav1.NamespaceDefault {
  3929  			s.Object.SetNamespace(namespace)
  3930  		}
  3931  		if s.Object.Spec.Template.Spec.Bootstrap.ConfigRef != nil && s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace == metav1.NamespaceDefault {
  3932  			s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace = namespace
  3933  		}
  3934  		if s.Object.Spec.Template.Spec.InfrastructureRef.Namespace == metav1.NamespaceDefault {
  3935  			s.Object.Spec.Template.Spec.InfrastructureRef.Namespace = namespace
  3936  		}
  3937  	}
  3938  	return s
  3939  }
  3940  
  3941  // prepareCluster deep-copies and returns the input Cluster and sets
  3942  // the given namespace to all relevant objects.
  3943  func prepareCluster(in *clusterv1.Cluster, namespace string) *clusterv1.Cluster {
  3944  	c := in.DeepCopy()
  3945  	if c.Namespace == metav1.NamespaceDefault {
  3946  		c.SetNamespace(namespace)
  3947  	}
  3948  	if c.Spec.InfrastructureRef != nil && c.Spec.InfrastructureRef.Namespace == metav1.NamespaceDefault {
  3949  		c.Spec.InfrastructureRef.Namespace = namespace
  3950  	}
  3951  	if c.Spec.ControlPlaneRef != nil && c.Spec.ControlPlaneRef.Namespace == metav1.NamespaceDefault {
  3952  		c.Spec.ControlPlaneRef.Namespace = namespace
  3953  	}
  3954  	return c
  3955  }
  3956  
  3957  func Test_createErrorWithoutObjectName(t *testing.T) {
  3958  	detailsError := &apierrors.StatusError{
  3959  		ErrStatus: metav1.Status{
  3960  			Status:  metav1.StatusFailure,
  3961  			Code:    http.StatusUnprocessableEntity,
  3962  			Reason:  metav1.StatusReasonInvalid,
  3963  			Message: "DockerMachineTemplate.infrastructure.cluster.x-k8s.io \"docker-template-one\" is invalid: spec.template.spec.preLoadImages: Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3964  			Details: &metav1.StatusDetails{
  3965  				Group: "infrastructure.cluster.x-k8s.io",
  3966  				Kind:  "DockerMachineTemplate",
  3967  				Name:  "docker-template-one",
  3968  				Causes: []metav1.StatusCause{
  3969  					{
  3970  						Type:    "FieldValueInvalid",
  3971  						Message: "Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3972  						Field:   "spec.template.spec.preLoadImages",
  3973  					},
  3974  				},
  3975  			},
  3976  		},
  3977  	}
  3978  	expectedDetailsError := &apierrors.StatusError{
  3979  		ErrStatus: metav1.Status{
  3980  			Status: metav1.StatusFailure,
  3981  			Code:   http.StatusUnprocessableEntity,
  3982  			Reason: metav1.StatusReasonInvalid,
  3983  			// The only difference between the two objects should be in the Message section.
  3984  			Message: "failed to create DockerMachineTemplate.infrastructure.cluster.x-k8s.io: FieldValueInvalid: spec.template.spec.preLoadImages: Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3985  			Details: &metav1.StatusDetails{
  3986  				Group: "infrastructure.cluster.x-k8s.io",
  3987  				Kind:  "DockerMachineTemplate",
  3988  				Name:  "docker-template-one",
  3989  				Causes: []metav1.StatusCause{
  3990  					{
  3991  						Type:    "FieldValueInvalid",
  3992  						Message: "Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3993  						Field:   "spec.template.spec.preLoadImages",
  3994  					},
  3995  				},
  3996  			},
  3997  		},
  3998  	}
  3999  	NoCausesDetailsError := &apierrors.StatusError{
  4000  		ErrStatus: metav1.Status{
  4001  			Status:  metav1.StatusFailure,
  4002  			Code:    http.StatusUnprocessableEntity,
  4003  			Reason:  metav1.StatusReasonInvalid,
  4004  			Message: "DockerMachineTemplate.infrastructure.cluster.x-k8s.io \"docker-template-one\" is invalid: spec.template.spec.preLoadImages: Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  4005  			Details: &metav1.StatusDetails{
  4006  				Group: "infrastructure.cluster.x-k8s.io",
  4007  				Kind:  "DockerMachineTemplate",
  4008  				Name:  "docker-template-one",
  4009  			},
  4010  		},
  4011  	}
  4012  	expectedNoCausesDetailsError := &apierrors.StatusError{
  4013  		ErrStatus: metav1.Status{
  4014  			Status: metav1.StatusFailure,
  4015  			Code:   http.StatusUnprocessableEntity,
  4016  			Reason: metav1.StatusReasonInvalid,
  4017  			// The only difference between the two objects should be in the Message section.
  4018  			Message: "failed to create DockerMachineTemplate.infrastructure.cluster.x-k8s.io",
  4019  			Details: &metav1.StatusDetails{
  4020  				Group: "infrastructure.cluster.x-k8s.io",
  4021  				Kind:  "DockerMachineTemplate",
  4022  				Name:  "docker-template-one",
  4023  			},
  4024  		},
  4025  	}
  4026  	noDetailsError := &apierrors.StatusError{
  4027  		ErrStatus: metav1.Status{
  4028  			Status:  metav1.StatusFailure,
  4029  			Code:    http.StatusUnprocessableEntity,
  4030  			Reason:  metav1.StatusReasonInvalid,
  4031  			Message: "DockerMachineTemplate.infrastructure.cluster.x-k8s.io \"docker-template-one\" is invalid: spec.template.spec.preLoadImages: Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  4032  		},
  4033  	}
  4034  	expectedNoDetailsError := &apierrors.StatusError{
  4035  		ErrStatus: metav1.Status{
  4036  			Status: metav1.StatusFailure,
  4037  			Code:   http.StatusUnprocessableEntity,
  4038  			Reason: metav1.StatusReasonInvalid,
  4039  			// The only difference between the two objects should be in the Message section.
  4040  			Message: "failed to create TestControlPlane.controlplane.cluster.x-k8s.io",
  4041  		},
  4042  	}
  4043  	expectedObjectNilError := &apierrors.StatusError{
  4044  		ErrStatus: metav1.Status{
  4045  			Status: metav1.StatusFailure,
  4046  			Code:   http.StatusUnprocessableEntity,
  4047  			Reason: metav1.StatusReasonInvalid,
  4048  			// The only difference between the two objects should be in the Message section.
  4049  			Message: "failed to create object",
  4050  		},
  4051  	}
  4052  	nonStatusError := errors.New("an unexpected error with unknown information inside")
  4053  	expectedNonStatusError := errors.New("failed to create TestControlPlane.controlplane.cluster.x-k8s.io")
  4054  	expectedNilObjectNonStatusError := errors.New("failed to create object")
  4055  	tests := []struct {
  4056  		name     string
  4057  		input    error
  4058  		expected error
  4059  		obj      client.Object
  4060  	}{
  4061  		{
  4062  			name:     "Remove name from status error with details",
  4063  			input:    detailsError,
  4064  			expected: expectedDetailsError,
  4065  			obj:      builder.TestControlPlane("default", "cp1").Build(),
  4066  		},
  4067  		{
  4068  			name:     "Remove name from status error with details but no causes",
  4069  			input:    NoCausesDetailsError,
  4070  			expected: expectedNoCausesDetailsError,
  4071  			obj:      builder.TestControlPlane("default", "cp1").Build(),
  4072  		},
  4073  		{
  4074  			name:     "Remove name from status error with no details",
  4075  			input:    noDetailsError,
  4076  			expected: expectedNoDetailsError,
  4077  			obj:      builder.TestControlPlane("default", "cp1").Build(),
  4078  		},
  4079  		{
  4080  			name:     "Remove name from status error with nil object",
  4081  			input:    noDetailsError,
  4082  			expected: expectedObjectNilError,
  4083  			obj:      nil,
  4084  		},
  4085  		{
  4086  			name:     "Remove name from status error with nil object",
  4087  			input:    noDetailsError,
  4088  			expected: expectedObjectNilError,
  4089  			obj:      nil,
  4090  		},
  4091  		{
  4092  			name:     "Replace message of non status error",
  4093  			input:    nonStatusError,
  4094  			expected: expectedNonStatusError,
  4095  			obj:      builder.TestControlPlane("default", "cp1").Build(),
  4096  		},
  4097  		{
  4098  			name:     "Replace message of non status error with nil object",
  4099  			input:    nonStatusError,
  4100  			expected: expectedNilObjectNonStatusError,
  4101  			obj:      nil,
  4102  		},
  4103  	}
  4104  	for _, tt := range tests {
  4105  		t.Run(tt.name, func(t *testing.T) {
  4106  			g := NewWithT(t)
  4107  			err := createErrorWithoutObjectName(ctx, tt.input, tt.obj)
  4108  			g.Expect(err.Error()).To(Equal(tt.expected.Error()))
  4109  		})
  4110  	}
  4111  }