sigs.k8s.io/cluster-api@v1.6.3/internal/controllers/topology/cluster/reconcile_state_test.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cluster
    18  
    19  import (
    20  	"fmt"
    21  	"net/http"
    22  	"regexp"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/google/go-cmp/cmp"
    27  	. "github.com/onsi/gomega"
    28  	"github.com/pkg/errors"
    29  	corev1 "k8s.io/api/core/v1"
    30  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    31  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    32  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    33  	"k8s.io/apimachinery/pkg/runtime"
    34  	"k8s.io/apimachinery/pkg/types"
    35  	"k8s.io/apimachinery/pkg/util/intstr"
    36  	utilfeature "k8s.io/component-base/featuregate/testing"
    37  	"k8s.io/utils/pointer"
    38  	"sigs.k8s.io/controller-runtime/pkg/client"
    39  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    40  	. "sigs.k8s.io/controller-runtime/pkg/envtest/komega"
    41  	"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
    42  
    43  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    44  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    45  	runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
    46  	runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog"
    47  	runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1"
    48  	"sigs.k8s.io/cluster-api/feature"
    49  	"sigs.k8s.io/cluster-api/internal/contract"
    50  	"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope"
    51  	"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
    52  	"sigs.k8s.io/cluster-api/internal/hooks"
    53  	fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake"
    54  	"sigs.k8s.io/cluster-api/internal/test/builder"
    55  	"sigs.k8s.io/cluster-api/internal/util/ssa"
    56  	"sigs.k8s.io/cluster-api/internal/webhooks"
    57  )
    58  
    59  var (
    60  	IgnoreNameGenerated = IgnorePaths{
    61  		"metadata.name",
    62  	}
    63  )
    64  
    65  func TestReconcileShim(t *testing.T) {
    66  	infrastructureCluster := builder.TestInfrastructureCluster(metav1.NamespaceDefault, "infrastructure-cluster1").Build()
    67  	controlPlane := builder.TestControlPlane(metav1.NamespaceDefault, "controlplane-cluster1").Build()
    68  	cluster := builder.Cluster(metav1.NamespaceDefault, "cluster1").Build()
    69  	// cluster requires a UID because reconcileClusterShim will create a cluster shim
    70  	// which has the cluster set as Owner in an OwnerReference.
    71  	// A valid OwnerReferences requires a uid.
    72  	cluster.SetUID("foo")
    73  
    74  	t.Run("Shim gets created when InfrastructureCluster and ControlPlane object have to be created", func(t *testing.T) {
    75  		g := NewWithT(t)
    76  
    77  		// Create namespace and modify input to have correct namespace set
    78  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
    79  		g.Expect(err).ToNot(HaveOccurred())
    80  		cluster1 := cluster.DeepCopy()
    81  		cluster1.SetNamespace(namespace.GetName())
    82  		cluster1Shim := clusterShim(cluster1)
    83  
    84  		// Create a scope with a cluster and InfrastructureCluster yet to be created.
    85  		s := scope.New(cluster1)
    86  		s.Desired = &scope.ClusterState{
    87  			InfrastructureCluster: infrastructureCluster.DeepCopy(),
    88  			ControlPlane: &scope.ControlPlaneState{
    89  				Object: controlPlane.DeepCopy(),
    90  			},
    91  		}
    92  
    93  		// Run reconcileClusterShim.
    94  		r := Reconciler{
    95  			Client:             env,
    96  			APIReader:          env.GetAPIReader(),
    97  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
    98  		}
    99  		err = r.reconcileClusterShim(ctx, s)
   100  		g.Expect(err).ToNot(HaveOccurred())
   101  
   102  		// Check cluster shim exists.
   103  		shim := cluster1Shim.DeepCopy()
   104  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(shim), shim)
   105  		g.Expect(err).ToNot(HaveOccurred())
   106  
   107  		// Check shim is assigned as owner for InfrastructureCluster and ControlPlane objects.
   108  		g.Expect(s.Desired.InfrastructureCluster.GetOwnerReferences()).To(HaveLen(1))
   109  		g.Expect(s.Desired.InfrastructureCluster.GetOwnerReferences()[0].Name).To(Equal(shim.Name))
   110  		g.Expect(s.Desired.ControlPlane.Object.GetOwnerReferences()).To(HaveLen(1))
   111  		g.Expect(s.Desired.ControlPlane.Object.GetOwnerReferences()[0].Name).To(Equal(shim.Name))
   112  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   113  	})
   114  	t.Run("Shim creation is re-entrant", func(t *testing.T) {
   115  		g := NewWithT(t)
   116  
   117  		// Create namespace and modify input to have correct namespace set
   118  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
   119  		g.Expect(err).ToNot(HaveOccurred())
   120  		cluster1 := cluster.DeepCopy()
   121  		cluster1.SetNamespace(namespace.GetName())
   122  		cluster1Shim := clusterShim(cluster1)
   123  
   124  		// Create a scope with a cluster and InfrastructureCluster yet to be created.
   125  		s := scope.New(cluster1)
   126  		s.Desired = &scope.ClusterState{
   127  			InfrastructureCluster: infrastructureCluster.DeepCopy(),
   128  			ControlPlane: &scope.ControlPlaneState{
   129  				Object: controlPlane.DeepCopy(),
   130  			},
   131  		}
   132  
   133  		// Pre-create a shim
   134  		g.Expect(env.CreateAndWait(ctx, cluster1Shim.DeepCopy())).ToNot(HaveOccurred())
   135  
   136  		// Run reconcileClusterShim.
   137  		r := Reconciler{
   138  			Client:             env,
   139  			APIReader:          env.GetAPIReader(),
   140  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
   141  		}
   142  		err = r.reconcileClusterShim(ctx, s)
   143  		g.Expect(err).ToNot(HaveOccurred())
   144  
   145  		// Check cluster shim exists.
   146  		shim := cluster1Shim.DeepCopy()
   147  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(shim), shim)
   148  		g.Expect(err).ToNot(HaveOccurred())
   149  
   150  		// Check shim is assigned as owner for InfrastructureCluster and ControlPlane objects.
   151  		g.Expect(s.Desired.InfrastructureCluster.GetOwnerReferences()).To(HaveLen(1))
   152  		g.Expect(s.Desired.InfrastructureCluster.GetOwnerReferences()[0].Name).To(Equal(shim.Name))
   153  		g.Expect(s.Desired.ControlPlane.Object.GetOwnerReferences()).To(HaveLen(1))
   154  		g.Expect(s.Desired.ControlPlane.Object.GetOwnerReferences()[0].Name).To(Equal(shim.Name))
   155  
   156  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   157  	})
   158  	t.Run("Shim is not deleted if InfrastructureCluster and ControlPlane object are waiting to be reconciled", func(t *testing.T) {
   159  		g := NewWithT(t)
   160  
   161  		// Create namespace and modify input to have correct namespace set
   162  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
   163  		g.Expect(err).ToNot(HaveOccurred())
   164  		cluster1 := cluster.DeepCopy()
   165  		cluster1.SetNamespace(namespace.GetName())
   166  		cluster1Shim := clusterShim(cluster1)
   167  
   168  		// Create a scope with a cluster and InfrastructureCluster created but not yet reconciled.
   169  		s := scope.New(cluster1)
   170  		s.Current.InfrastructureCluster = infrastructureCluster.DeepCopy()
   171  		s.Current.ControlPlane = &scope.ControlPlaneState{
   172  			Object: controlPlane.DeepCopy(),
   173  		}
   174  
   175  		// Add the shim as a temporary owner for the InfrastructureCluster and ControlPlane.
   176  		ownerRefs := s.Current.InfrastructureCluster.GetOwnerReferences()
   177  		ownerRefs = append(ownerRefs, *ownerReferenceTo(cluster1Shim))
   178  		s.Current.InfrastructureCluster.SetOwnerReferences(ownerRefs)
   179  		ownerRefs = s.Current.ControlPlane.Object.GetOwnerReferences()
   180  		ownerRefs = append(ownerRefs, *ownerReferenceTo(cluster1Shim))
   181  		s.Current.ControlPlane.Object.SetOwnerReferences(ownerRefs)
   182  
   183  		// Pre-create a shim
   184  		g.Expect(env.CreateAndWait(ctx, cluster1Shim.DeepCopy())).ToNot(HaveOccurred())
   185  
   186  		// Run reconcileClusterShim.
   187  		r := Reconciler{
   188  			Client:             env,
   189  			APIReader:          env.GetAPIReader(),
   190  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
   191  		}
   192  		err = r.reconcileClusterShim(ctx, s)
   193  		g.Expect(err).ToNot(HaveOccurred())
   194  
   195  		// Check cluster shim exists.
   196  		shim := cluster1Shim.DeepCopy()
   197  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(shim), shim)
   198  		g.Expect(err).ToNot(HaveOccurred())
   199  
   200  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   201  	})
   202  	t.Run("Shim gets deleted when InfrastructureCluster and ControlPlane object have been reconciled", func(t *testing.T) {
   203  		g := NewWithT(t)
   204  
   205  		// Create namespace and modify input to have correct namespace set
   206  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
   207  		g.Expect(err).ToNot(HaveOccurred())
   208  		cluster1 := cluster.DeepCopy()
   209  		cluster1.SetNamespace(namespace.GetName())
   210  		cluster1Shim := clusterShim(cluster1)
   211  
   212  		// Create a scope with a cluster and InfrastructureCluster created and reconciled.
   213  		s := scope.New(cluster1)
   214  		s.Current.InfrastructureCluster = infrastructureCluster.DeepCopy()
   215  		s.Current.ControlPlane = &scope.ControlPlaneState{
   216  			Object: controlPlane.DeepCopy(),
   217  		}
   218  
   219  		// Add the shim as a temporary owner for the InfrastructureCluster and ControlPlane.
   220  		// Add the cluster as a final owner for the InfrastructureCluster and ControlPlane (reconciled).
   221  		ownerRefs := s.Current.InfrastructureCluster.GetOwnerReferences()
   222  		ownerRefs = append(
   223  			ownerRefs,
   224  			*ownerReferenceTo(cluster1Shim),
   225  			*ownerReferenceTo(cluster1))
   226  		s.Current.InfrastructureCluster.SetOwnerReferences(ownerRefs)
   227  		ownerRefs = s.Current.ControlPlane.Object.GetOwnerReferences()
   228  		ownerRefs = append(
   229  			ownerRefs,
   230  			*ownerReferenceTo(cluster1Shim),
   231  			*ownerReferenceTo(cluster1))
   232  		s.Current.ControlPlane.Object.SetOwnerReferences(ownerRefs)
   233  
   234  		// Pre-create a shim
   235  		g.Expect(env.CreateAndWait(ctx, cluster1Shim.DeepCopy())).ToNot(HaveOccurred())
   236  
   237  		// Run reconcileClusterShim.
   238  		r := Reconciler{
   239  			Client:             env,
   240  			APIReader:          env.GetAPIReader(),
   241  			patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
   242  		}
   243  		err = r.reconcileClusterShim(ctx, s)
   244  		g.Expect(err).ToNot(HaveOccurred())
   245  
   246  		// Check cluster shim exists.
   247  		shim := cluster1Shim.DeepCopy()
   248  		err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(shim), shim)
   249  		g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
   250  
   251  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   252  	})
   253  	t.Run("No op if InfrastructureCluster and ControlPlane object have been reconciled and shim is gone", func(t *testing.T) {
   254  		g := NewWithT(t)
   255  
   256  		// Create namespace and modify input to have correct namespace set
   257  		namespace, err := env.CreateNamespace(ctx, "reconcile-cluster-shim")
   258  		g.Expect(err).ToNot(HaveOccurred())
   259  		cluster1 := cluster.DeepCopy()
   260  		cluster1.SetNamespace(namespace.GetName())
   261  		cluster1Shim := clusterShim(cluster1)
   262  
   263  		// Create a scope with a cluster and InfrastructureCluster created and reconciled.
   264  		s := scope.New(cluster1)
   265  		s.Current.InfrastructureCluster = infrastructureCluster.DeepCopy()
   266  		s.Current.ControlPlane = &scope.ControlPlaneState{
   267  			Object: controlPlane.DeepCopy(),
   268  		}
   269  
   270  		// Add the cluster as a final owner for the InfrastructureCluster and ControlPlane (reconciled).
   271  		ownerRefs := s.Current.InfrastructureCluster.GetOwnerReferences()
   272  		ownerRefs = append(ownerRefs, *ownerReferenceTo(cluster1))
   273  		s.Current.InfrastructureCluster.SetOwnerReferences(ownerRefs)
   274  		ownerRefs = s.Current.ControlPlane.Object.GetOwnerReferences()
   275  		ownerRefs = append(ownerRefs, *ownerReferenceTo(cluster1))
   276  		s.Current.ControlPlane.Object.SetOwnerReferences(ownerRefs)
   277  
   278  		// Run reconcileClusterShim using a nil client, so an error will be triggered if any operation is attempted
   279  		r := Reconciler{
   280  			Client:             nil,
   281  			APIReader:          env.GetAPIReader(),
   282  			patchHelperFactory: serverSideApplyPatchHelperFactory(nil, ssa.NewCache()),
   283  		}
   284  		err = r.reconcileClusterShim(ctx, s)
   285  		g.Expect(err).ToNot(HaveOccurred())
   286  
   287  		g.Expect(env.CleanupAndWait(ctx, cluster1Shim)).To(Succeed())
   288  	})
   289  }
   290  
   291  func TestReconcile_callAfterControlPlaneInitialized(t *testing.T) {
   292  	catalog := runtimecatalog.New()
   293  	_ = runtimehooksv1.AddToCatalog(catalog)
   294  
   295  	afterControlPlaneInitializedGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterControlPlaneInitialized)
   296  	if err != nil {
   297  		panic(err)
   298  	}
   299  
   300  	successResponse := &runtimehooksv1.AfterControlPlaneInitializedResponse{
   301  
   302  		CommonResponse: runtimehooksv1.CommonResponse{
   303  			Status: runtimehooksv1.ResponseStatusSuccess,
   304  		},
   305  	}
   306  	failureResponse := &runtimehooksv1.AfterControlPlaneInitializedResponse{
   307  		CommonResponse: runtimehooksv1.CommonResponse{
   308  			Status: runtimehooksv1.ResponseStatusFailure,
   309  		},
   310  	}
   311  
   312  	tests := []struct {
   313  		name               string
   314  		cluster            *clusterv1.Cluster
   315  		hookResponse       *runtimehooksv1.AfterControlPlaneInitializedResponse
   316  		wantMarked         bool
   317  		wantHookToBeCalled bool
   318  		wantError          bool
   319  	}{
   320  		{
   321  			name: "hook should be marked if the cluster is about to be created",
   322  			cluster: &clusterv1.Cluster{
   323  				ObjectMeta: metav1.ObjectMeta{
   324  					Name:      "test-cluster",
   325  					Namespace: "test-ns",
   326  				},
   327  				Spec: clusterv1.ClusterSpec{},
   328  			},
   329  			hookResponse:       successResponse,
   330  			wantMarked:         true,
   331  			wantHookToBeCalled: false,
   332  			wantError:          false,
   333  		},
   334  		{
   335  			name: "hook should be called if it is marked and the control plane is ready - the hook should become unmarked for a success response",
   336  			cluster: &clusterv1.Cluster{
   337  				ObjectMeta: metav1.ObjectMeta{
   338  					Name:      "test-cluster",
   339  					Namespace: "test-ns",
   340  					Annotations: map[string]string{
   341  						runtimev1.PendingHooksAnnotation: "AfterControlPlaneInitialized",
   342  					},
   343  				},
   344  				Spec: clusterv1.ClusterSpec{
   345  					ControlPlaneRef:   &corev1.ObjectReference{},
   346  					InfrastructureRef: &corev1.ObjectReference{},
   347  				},
   348  				Status: clusterv1.ClusterStatus{
   349  					Conditions: clusterv1.Conditions{
   350  						clusterv1.Condition{
   351  							Type:   clusterv1.ControlPlaneInitializedCondition,
   352  							Status: corev1.ConditionTrue,
   353  						},
   354  					},
   355  				},
   356  			},
   357  			hookResponse:       successResponse,
   358  			wantMarked:         false,
   359  			wantHookToBeCalled: true,
   360  			wantError:          false,
   361  		},
   362  		{
   363  			name: "hook should be called if it is marked and the control plane is ready - the hook should remain marked for a failure response",
   364  			cluster: &clusterv1.Cluster{
   365  				ObjectMeta: metav1.ObjectMeta{
   366  					Name:      "test-cluster",
   367  					Namespace: "test-ns",
   368  					Annotations: map[string]string{
   369  						runtimev1.PendingHooksAnnotation: "AfterControlPlaneInitialized",
   370  					},
   371  				},
   372  				Spec: clusterv1.ClusterSpec{
   373  					ControlPlaneRef:   &corev1.ObjectReference{},
   374  					InfrastructureRef: &corev1.ObjectReference{},
   375  				},
   376  				Status: clusterv1.ClusterStatus{
   377  					Conditions: clusterv1.Conditions{
   378  						clusterv1.Condition{
   379  							Type:   clusterv1.ControlPlaneInitializedCondition,
   380  							Status: corev1.ConditionTrue,
   381  						},
   382  					},
   383  				},
   384  			},
   385  			hookResponse:       failureResponse,
   386  			wantMarked:         true,
   387  			wantHookToBeCalled: true,
   388  			wantError:          true,
   389  		},
   390  		{
   391  			name: "hook should not be called if it is marked and the control plane is not ready - the hook should remain marked",
   392  			cluster: &clusterv1.Cluster{
   393  				ObjectMeta: metav1.ObjectMeta{
   394  					Name:      "test-cluster",
   395  					Namespace: "test-ns",
   396  					Annotations: map[string]string{
   397  						runtimev1.PendingHooksAnnotation: "AfterControlPlaneInitialized",
   398  					},
   399  				},
   400  				Spec: clusterv1.ClusterSpec{
   401  					ControlPlaneRef:   &corev1.ObjectReference{},
   402  					InfrastructureRef: &corev1.ObjectReference{},
   403  				},
   404  				Status: clusterv1.ClusterStatus{
   405  					Conditions: clusterv1.Conditions{
   406  						clusterv1.Condition{
   407  							Type:   clusterv1.ControlPlaneInitializedCondition,
   408  							Status: corev1.ConditionFalse,
   409  						},
   410  					},
   411  				},
   412  			},
   413  			hookResponse:       failureResponse,
   414  			wantMarked:         true,
   415  			wantHookToBeCalled: false,
   416  			wantError:          false,
   417  		},
   418  		{
   419  			name: "hook should not be called if it is not marked",
   420  			cluster: &clusterv1.Cluster{
   421  				ObjectMeta: metav1.ObjectMeta{
   422  					Name:      "test-cluster",
   423  					Namespace: "test-ns",
   424  				},
   425  				Spec: clusterv1.ClusterSpec{
   426  					ControlPlaneRef:   &corev1.ObjectReference{},
   427  					InfrastructureRef: &corev1.ObjectReference{},
   428  				},
   429  				Status: clusterv1.ClusterStatus{
   430  					Conditions: clusterv1.Conditions{
   431  						clusterv1.Condition{
   432  							Type:   clusterv1.ControlPlaneInitializedCondition,
   433  							Status: corev1.ConditionTrue,
   434  						},
   435  					},
   436  				},
   437  			},
   438  			hookResponse:       failureResponse,
   439  			wantMarked:         false,
   440  			wantHookToBeCalled: false,
   441  			wantError:          false,
   442  		},
   443  	}
   444  
   445  	for _, tt := range tests {
   446  		t.Run(tt.name, func(t *testing.T) {
   447  			g := NewWithT(t)
   448  
   449  			s := &scope.Scope{
   450  				Current: &scope.ClusterState{
   451  					Cluster: tt.cluster,
   452  				},
   453  				HookResponseTracker: scope.NewHookResponseTracker(),
   454  			}
   455  
   456  			fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
   457  				WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
   458  					afterControlPlaneInitializedGVH: tt.hookResponse,
   459  				}).
   460  				WithCatalog(catalog).
   461  				Build()
   462  
   463  			fakeClient := fake.NewClientBuilder().WithObjects(tt.cluster).Build()
   464  
   465  			r := &Reconciler{
   466  				Client:        fakeClient,
   467  				APIReader:     fakeClient,
   468  				RuntimeClient: fakeRuntimeClient,
   469  			}
   470  
   471  			err := r.callAfterControlPlaneInitialized(ctx, s)
   472  			g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.AfterControlPlaneInitialized) == 1).To(Equal(tt.wantHookToBeCalled))
   473  			g.Expect(hooks.IsPending(runtimehooksv1.AfterControlPlaneInitialized, tt.cluster)).To(Equal(tt.wantMarked))
   474  			g.Expect(err != nil).To(Equal(tt.wantError))
   475  		})
   476  	}
   477  }
   478  
   479  func TestReconcile_callAfterClusterUpgrade(t *testing.T) {
   480  	catalog := runtimecatalog.New()
   481  	_ = runtimehooksv1.AddToCatalog(catalog)
   482  
   483  	afterClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterClusterUpgrade)
   484  	if err != nil {
   485  		panic(err)
   486  	}
   487  
   488  	successResponse := &runtimehooksv1.AfterClusterUpgradeResponse{
   489  
   490  		CommonResponse: runtimehooksv1.CommonResponse{
   491  			Status: runtimehooksv1.ResponseStatusSuccess,
   492  		},
   493  	}
   494  	failureResponse := &runtimehooksv1.AfterClusterUpgradeResponse{
   495  		CommonResponse: runtimehooksv1.CommonResponse{
   496  			Status: runtimehooksv1.ResponseStatusFailure,
   497  		},
   498  	}
   499  
   500  	topologyVersion := "v1.2.3"
   501  	controlPlaneObj := builder.ControlPlane("test1", "cp1").
   502  		Build()
   503  
   504  	tests := []struct {
   505  		name               string
   506  		s                  *scope.Scope
   507  		hookResponse       *runtimehooksv1.AfterClusterUpgradeResponse
   508  		wantMarked         bool
   509  		wantHookToBeCalled bool
   510  		wantError          bool
   511  	}{
   512  		{
   513  			name: "hook should not be called if it is not marked",
   514  			s: &scope.Scope{
   515  				Blueprint: &scope.ClusterBlueprint{
   516  					Topology: &clusterv1.Topology{
   517  						ControlPlane: clusterv1.ControlPlaneTopology{
   518  							Replicas: pointer.Int32(2),
   519  						},
   520  					},
   521  				},
   522  				Current: &scope.ClusterState{
   523  					Cluster: &clusterv1.Cluster{
   524  						ObjectMeta: metav1.ObjectMeta{
   525  							Name:      "test-cluster",
   526  							Namespace: "test-ns",
   527  						},
   528  						Spec: clusterv1.ClusterSpec{},
   529  					},
   530  					ControlPlane: &scope.ControlPlaneState{
   531  						Object: controlPlaneObj,
   532  					},
   533  				},
   534  				HookResponseTracker: scope.NewHookResponseTracker(),
   535  				UpgradeTracker:      scope.NewUpgradeTracker(),
   536  			},
   537  			wantMarked:         false,
   538  			hookResponse:       successResponse,
   539  			wantHookToBeCalled: false,
   540  			wantError:          false,
   541  		},
   542  		{
   543  			name: "hook should not be called if the control plane is starting a new upgrade - hook is marked",
   544  			s: &scope.Scope{
   545  				Blueprint: &scope.ClusterBlueprint{
   546  					Topology: &clusterv1.Topology{
   547  						ControlPlane: clusterv1.ControlPlaneTopology{
   548  							Replicas: pointer.Int32(2),
   549  						},
   550  					},
   551  				},
   552  				Current: &scope.ClusterState{
   553  					Cluster: &clusterv1.Cluster{
   554  						ObjectMeta: metav1.ObjectMeta{
   555  							Name:      "test-cluster",
   556  							Namespace: "test-ns",
   557  							Annotations: map[string]string{
   558  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   559  							},
   560  						},
   561  						Spec: clusterv1.ClusterSpec{},
   562  					},
   563  					ControlPlane: &scope.ControlPlaneState{
   564  						Object: controlPlaneObj,
   565  					},
   566  				},
   567  				HookResponseTracker: scope.NewHookResponseTracker(),
   568  				UpgradeTracker: func() *scope.UpgradeTracker {
   569  					ut := scope.NewUpgradeTracker()
   570  					ut.ControlPlane.IsStartingUpgrade = true
   571  					return ut
   572  				}(),
   573  			},
   574  			wantMarked:         true,
   575  			hookResponse:       successResponse,
   576  			wantHookToBeCalled: false,
   577  			wantError:          false,
   578  		},
   579  		{
   580  			name: "hook should not be called if the control plane is upgrading - hook is marked",
   581  			s: &scope.Scope{
   582  				Blueprint: &scope.ClusterBlueprint{
   583  					Topology: &clusterv1.Topology{
   584  						ControlPlane: clusterv1.ControlPlaneTopology{
   585  							Replicas: pointer.Int32(2),
   586  						},
   587  					},
   588  				},
   589  				Current: &scope.ClusterState{
   590  					Cluster: &clusterv1.Cluster{
   591  						ObjectMeta: metav1.ObjectMeta{
   592  							Name:      "test-cluster",
   593  							Namespace: "test-ns",
   594  							Annotations: map[string]string{
   595  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   596  							},
   597  						},
   598  						Spec: clusterv1.ClusterSpec{},
   599  					},
   600  					ControlPlane: &scope.ControlPlaneState{
   601  						Object: controlPlaneObj,
   602  					},
   603  				},
   604  				HookResponseTracker: scope.NewHookResponseTracker(),
   605  				UpgradeTracker: func() *scope.UpgradeTracker {
   606  					ut := scope.NewUpgradeTracker()
   607  					ut.ControlPlane.IsUpgrading = true
   608  					return ut
   609  				}(),
   610  			},
   611  			wantMarked:         true,
   612  			hookResponse:       successResponse,
   613  			wantHookToBeCalled: false,
   614  			wantError:          false,
   615  		},
   616  		{
   617  			name: "hook should not be called if the control plane is scaling - hook is marked",
   618  			s: &scope.Scope{
   619  				Blueprint: &scope.ClusterBlueprint{
   620  					Topology: &clusterv1.Topology{
   621  						ControlPlane: clusterv1.ControlPlaneTopology{
   622  							Replicas: pointer.Int32(2),
   623  						},
   624  					},
   625  				},
   626  				Current: &scope.ClusterState{
   627  					Cluster: &clusterv1.Cluster{
   628  						ObjectMeta: metav1.ObjectMeta{
   629  							Name:      "test-cluster",
   630  							Namespace: "test-ns",
   631  							Annotations: map[string]string{
   632  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   633  							},
   634  						},
   635  						Spec: clusterv1.ClusterSpec{},
   636  					},
   637  					ControlPlane: &scope.ControlPlaneState{
   638  						Object: controlPlaneObj,
   639  					},
   640  				},
   641  				HookResponseTracker: scope.NewHookResponseTracker(),
   642  				UpgradeTracker: func() *scope.UpgradeTracker {
   643  					ut := scope.NewUpgradeTracker()
   644  					ut.ControlPlane.IsScaling = true
   645  					return ut
   646  				}(),
   647  			},
   648  			wantMarked:         true,
   649  			hookResponse:       successResponse,
   650  			wantHookToBeCalled: false,
   651  			wantError:          false,
   652  		},
   653  		{
   654  			name: "hook should not be called if the control plane is pending an upgrade - hook is marked",
   655  			s: &scope.Scope{
   656  				Blueprint: &scope.ClusterBlueprint{
   657  					Topology: &clusterv1.Topology{
   658  						ControlPlane: clusterv1.ControlPlaneTopology{
   659  							Replicas: pointer.Int32(2),
   660  						},
   661  					},
   662  				},
   663  				Current: &scope.ClusterState{
   664  					Cluster: &clusterv1.Cluster{
   665  						ObjectMeta: metav1.ObjectMeta{
   666  							Name:      "test-cluster",
   667  							Namespace: "test-ns",
   668  							Annotations: map[string]string{
   669  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   670  							},
   671  						},
   672  						Spec: clusterv1.ClusterSpec{},
   673  					},
   674  					ControlPlane: &scope.ControlPlaneState{
   675  						Object: controlPlaneObj,
   676  					},
   677  				},
   678  				HookResponseTracker: scope.NewHookResponseTracker(),
   679  				UpgradeTracker: func() *scope.UpgradeTracker {
   680  					ut := scope.NewUpgradeTracker()
   681  					ut.ControlPlane.IsPendingUpgrade = true
   682  					return ut
   683  				}(),
   684  			},
   685  			wantMarked:         true,
   686  			hookResponse:       successResponse,
   687  			wantHookToBeCalled: false,
   688  			wantError:          false,
   689  		},
   690  		{
   691  			name: "hook should not be called if the control plane is stable at desired version but MDs are upgrading - hook is marked",
   692  			s: &scope.Scope{
   693  				Blueprint: &scope.ClusterBlueprint{
   694  					Topology: &clusterv1.Topology{
   695  						ControlPlane: clusterv1.ControlPlaneTopology{
   696  							Replicas: pointer.Int32(2),
   697  						},
   698  					},
   699  				},
   700  				Current: &scope.ClusterState{
   701  					Cluster: &clusterv1.Cluster{
   702  						ObjectMeta: metav1.ObjectMeta{
   703  							Name:      "test-cluster",
   704  							Namespace: "test-ns",
   705  							Annotations: map[string]string{
   706  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   707  							},
   708  						},
   709  						Spec: clusterv1.ClusterSpec{},
   710  					},
   711  					ControlPlane: &scope.ControlPlaneState{
   712  						Object: controlPlaneObj,
   713  					},
   714  				},
   715  				HookResponseTracker: scope.NewHookResponseTracker(),
   716  				UpgradeTracker: func() *scope.UpgradeTracker {
   717  					ut := scope.NewUpgradeTracker()
   718  					ut.ControlPlane.IsPendingUpgrade = false
   719  					ut.MachineDeployments.MarkUpgrading("md1")
   720  					return ut
   721  				}(),
   722  			},
   723  			wantMarked:         true,
   724  			hookResponse:       successResponse,
   725  			wantHookToBeCalled: false,
   726  			wantError:          false,
   727  		},
   728  		{
   729  			name: "hook should not be called if the control plane is stable at desired version but MPs are upgrading - hook is marked",
   730  			s: &scope.Scope{
   731  				Blueprint: &scope.ClusterBlueprint{
   732  					Topology: &clusterv1.Topology{
   733  						ControlPlane: clusterv1.ControlPlaneTopology{
   734  							Replicas: pointer.Int32(2),
   735  						},
   736  					},
   737  				},
   738  				Current: &scope.ClusterState{
   739  					Cluster: &clusterv1.Cluster{
   740  						ObjectMeta: metav1.ObjectMeta{
   741  							Name:      "test-cluster",
   742  							Namespace: "test-ns",
   743  							Annotations: map[string]string{
   744  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   745  							},
   746  						},
   747  						Spec: clusterv1.ClusterSpec{},
   748  					},
   749  					ControlPlane: &scope.ControlPlaneState{
   750  						Object: controlPlaneObj,
   751  					},
   752  				},
   753  				HookResponseTracker: scope.NewHookResponseTracker(),
   754  				UpgradeTracker: func() *scope.UpgradeTracker {
   755  					ut := scope.NewUpgradeTracker()
   756  					ut.ControlPlane.IsPendingUpgrade = false
   757  					ut.MachinePools.MarkUpgrading("mp1")
   758  					return ut
   759  				}(),
   760  			},
   761  			wantMarked:         true,
   762  			hookResponse:       successResponse,
   763  			wantHookToBeCalled: false,
   764  			wantError:          false,
   765  		},
   766  		{
   767  			name: "hook should not be called if the control plane is stable at desired version but MDs are pending create - hook is marked",
   768  			s: &scope.Scope{
   769  				Blueprint: &scope.ClusterBlueprint{
   770  					Topology: &clusterv1.Topology{
   771  						ControlPlane: clusterv1.ControlPlaneTopology{
   772  							Replicas: pointer.Int32(2),
   773  						},
   774  					},
   775  				},
   776  				Current: &scope.ClusterState{
   777  					Cluster: &clusterv1.Cluster{
   778  						ObjectMeta: metav1.ObjectMeta{
   779  							Name:      "test-cluster",
   780  							Namespace: "test-ns",
   781  							Annotations: map[string]string{
   782  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   783  							},
   784  						},
   785  						Spec: clusterv1.ClusterSpec{},
   786  					},
   787  					ControlPlane: &scope.ControlPlaneState{
   788  						Object: controlPlaneObj,
   789  					}},
   790  				HookResponseTracker: scope.NewHookResponseTracker(),
   791  				UpgradeTracker: func() *scope.UpgradeTracker {
   792  					ut := scope.NewUpgradeTracker()
   793  					ut.ControlPlane.IsPendingUpgrade = false
   794  					ut.MachineDeployments.MarkPendingCreate("md-topology-1")
   795  					return ut
   796  				}(),
   797  			},
   798  			wantMarked:         true,
   799  			hookResponse:       successResponse,
   800  			wantHookToBeCalled: false,
   801  			wantError:          false,
   802  		},
   803  		{
   804  			name: "hook should not be called if the control plane is stable at desired version but MPs are pending create - hook is marked",
   805  			s: &scope.Scope{
   806  				Blueprint: &scope.ClusterBlueprint{
   807  					Topology: &clusterv1.Topology{
   808  						ControlPlane: clusterv1.ControlPlaneTopology{
   809  							Replicas: pointer.Int32(2),
   810  						},
   811  					},
   812  				},
   813  				Current: &scope.ClusterState{
   814  					Cluster: &clusterv1.Cluster{
   815  						ObjectMeta: metav1.ObjectMeta{
   816  							Name:      "test-cluster",
   817  							Namespace: "test-ns",
   818  							Annotations: map[string]string{
   819  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   820  							},
   821  						},
   822  						Spec: clusterv1.ClusterSpec{},
   823  					},
   824  					ControlPlane: &scope.ControlPlaneState{
   825  						Object: controlPlaneObj,
   826  					}},
   827  				HookResponseTracker: scope.NewHookResponseTracker(),
   828  				UpgradeTracker: func() *scope.UpgradeTracker {
   829  					ut := scope.NewUpgradeTracker()
   830  					ut.ControlPlane.IsPendingUpgrade = false
   831  					ut.MachinePools.MarkPendingCreate("mp-topology-1")
   832  					return ut
   833  				}(),
   834  			},
   835  			wantMarked:         true,
   836  			hookResponse:       successResponse,
   837  			wantHookToBeCalled: false,
   838  			wantError:          false,
   839  		},
   840  		{
   841  			name: "hook should not be called if the control plane is stable at desired version but MDs are pending upgrade - hook is marked",
   842  			s: &scope.Scope{
   843  				Blueprint: &scope.ClusterBlueprint{
   844  					Topology: &clusterv1.Topology{
   845  						ControlPlane: clusterv1.ControlPlaneTopology{
   846  							Replicas: pointer.Int32(2),
   847  						},
   848  					},
   849  				},
   850  				Current: &scope.ClusterState{
   851  					Cluster: &clusterv1.Cluster{
   852  						ObjectMeta: metav1.ObjectMeta{
   853  							Name:      "test-cluster",
   854  							Namespace: "test-ns",
   855  							Annotations: map[string]string{
   856  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   857  							},
   858  						},
   859  						Spec: clusterv1.ClusterSpec{},
   860  					},
   861  					ControlPlane: &scope.ControlPlaneState{
   862  						Object: controlPlaneObj,
   863  					}},
   864  				HookResponseTracker: scope.NewHookResponseTracker(),
   865  				UpgradeTracker: func() *scope.UpgradeTracker {
   866  					ut := scope.NewUpgradeTracker()
   867  					ut.ControlPlane.IsPendingUpgrade = false
   868  					ut.MachineDeployments.MarkPendingUpgrade("md1")
   869  					return ut
   870  				}(),
   871  			},
   872  			wantMarked:         true,
   873  			hookResponse:       successResponse,
   874  			wantHookToBeCalled: false,
   875  			wantError:          false,
   876  		},
   877  		{
   878  			name: "hook should not be called if the control plane is stable at desired version but MPs are pending upgrade - hook is marked",
   879  			s: &scope.Scope{
   880  				Blueprint: &scope.ClusterBlueprint{
   881  					Topology: &clusterv1.Topology{
   882  						ControlPlane: clusterv1.ControlPlaneTopology{
   883  							Replicas: pointer.Int32(2),
   884  						},
   885  					},
   886  				},
   887  				Current: &scope.ClusterState{
   888  					Cluster: &clusterv1.Cluster{
   889  						ObjectMeta: metav1.ObjectMeta{
   890  							Name:      "test-cluster",
   891  							Namespace: "test-ns",
   892  							Annotations: map[string]string{
   893  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   894  							},
   895  						},
   896  						Spec: clusterv1.ClusterSpec{},
   897  					},
   898  					ControlPlane: &scope.ControlPlaneState{
   899  						Object: controlPlaneObj,
   900  					}},
   901  				HookResponseTracker: scope.NewHookResponseTracker(),
   902  				UpgradeTracker: func() *scope.UpgradeTracker {
   903  					ut := scope.NewUpgradeTracker()
   904  					ut.ControlPlane.IsPendingUpgrade = false
   905  					ut.MachinePools.MarkPendingUpgrade("mp1")
   906  					return ut
   907  				}(),
   908  			},
   909  			wantMarked:         true,
   910  			hookResponse:       successResponse,
   911  			wantHookToBeCalled: false,
   912  			wantError:          false,
   913  		},
   914  		{
   915  			name: "hook should not be called if the control plane is stable at desired version but MDs upgrade is deferred - hook is marked",
   916  			s: &scope.Scope{
   917  				Blueprint: &scope.ClusterBlueprint{
   918  					Topology: &clusterv1.Topology{
   919  						ControlPlane: clusterv1.ControlPlaneTopology{
   920  							Replicas: pointer.Int32(2),
   921  						},
   922  					},
   923  				},
   924  				Current: &scope.ClusterState{
   925  					Cluster: &clusterv1.Cluster{
   926  						ObjectMeta: metav1.ObjectMeta{
   927  							Name:      "test-cluster",
   928  							Namespace: "test-ns",
   929  							Annotations: map[string]string{
   930  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   931  							},
   932  						},
   933  						Spec: clusterv1.ClusterSpec{},
   934  					},
   935  					ControlPlane: &scope.ControlPlaneState{
   936  						Object: controlPlaneObj,
   937  					},
   938  				},
   939  				HookResponseTracker: scope.NewHookResponseTracker(),
   940  				UpgradeTracker: func() *scope.UpgradeTracker {
   941  					ut := scope.NewUpgradeTracker()
   942  					ut.ControlPlane.IsPendingUpgrade = false
   943  					ut.MachineDeployments.MarkDeferredUpgrade("md1")
   944  					return ut
   945  				}(),
   946  			},
   947  			wantMarked:         true,
   948  			hookResponse:       successResponse,
   949  			wantHookToBeCalled: false,
   950  			wantError:          false,
   951  		},
   952  		{
   953  			name: "hook should not be called if the control plane is stable at desired version but MPs upgrade is deferred - hook is marked",
   954  			s: &scope.Scope{
   955  				Blueprint: &scope.ClusterBlueprint{
   956  					Topology: &clusterv1.Topology{
   957  						ControlPlane: clusterv1.ControlPlaneTopology{
   958  							Replicas: pointer.Int32(2),
   959  						},
   960  					},
   961  				},
   962  				Current: &scope.ClusterState{
   963  					Cluster: &clusterv1.Cluster{
   964  						ObjectMeta: metav1.ObjectMeta{
   965  							Name:      "test-cluster",
   966  							Namespace: "test-ns",
   967  							Annotations: map[string]string{
   968  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
   969  							},
   970  						},
   971  						Spec: clusterv1.ClusterSpec{},
   972  					},
   973  					ControlPlane: &scope.ControlPlaneState{
   974  						Object: controlPlaneObj,
   975  					},
   976  				},
   977  				HookResponseTracker: scope.NewHookResponseTracker(),
   978  				UpgradeTracker: func() *scope.UpgradeTracker {
   979  					ut := scope.NewUpgradeTracker()
   980  					ut.ControlPlane.IsPendingUpgrade = false
   981  					ut.MachinePools.MarkDeferredUpgrade("mp1")
   982  					return ut
   983  				}(),
   984  			},
   985  			wantMarked:         true,
   986  			hookResponse:       successResponse,
   987  			wantHookToBeCalled: false,
   988  			wantError:          false,
   989  		},
   990  		{
   991  			name: "hook should be called if the control plane, MDs, and MPs are stable at the topology version - success response should unmark the hook",
   992  			s: &scope.Scope{
   993  				Blueprint: &scope.ClusterBlueprint{
   994  					Topology: &clusterv1.Topology{
   995  						ControlPlane: clusterv1.ControlPlaneTopology{
   996  							Replicas: pointer.Int32(2),
   997  						},
   998  					},
   999  				},
  1000  				Current: &scope.ClusterState{
  1001  					Cluster: &clusterv1.Cluster{
  1002  						ObjectMeta: metav1.ObjectMeta{
  1003  							Name:      "test-cluster",
  1004  							Namespace: "test-ns",
  1005  							Annotations: map[string]string{
  1006  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
  1007  							},
  1008  						},
  1009  						Spec: clusterv1.ClusterSpec{
  1010  							Topology: &clusterv1.Topology{
  1011  								Version: topologyVersion,
  1012  							},
  1013  						},
  1014  					},
  1015  					ControlPlane: &scope.ControlPlaneState{
  1016  						Object: controlPlaneObj,
  1017  					},
  1018  				},
  1019  				HookResponseTracker: scope.NewHookResponseTracker(),
  1020  				UpgradeTracker:      scope.NewUpgradeTracker(),
  1021  			},
  1022  			wantMarked:         false,
  1023  			hookResponse:       successResponse,
  1024  			wantHookToBeCalled: true,
  1025  			wantError:          false,
  1026  		},
  1027  		{
  1028  			name: "hook should be called if the control plane, MDs, and MPs are stable at the topology version - failure response should leave the hook marked",
  1029  			s: &scope.Scope{
  1030  				Blueprint: &scope.ClusterBlueprint{
  1031  					Topology: &clusterv1.Topology{
  1032  						ControlPlane: clusterv1.ControlPlaneTopology{
  1033  							Replicas: pointer.Int32(2),
  1034  						},
  1035  					},
  1036  				},
  1037  				Current: &scope.ClusterState{
  1038  					Cluster: &clusterv1.Cluster{
  1039  						ObjectMeta: metav1.ObjectMeta{
  1040  							Name:      "test-cluster",
  1041  							Namespace: "test-ns",
  1042  							Annotations: map[string]string{
  1043  								runtimev1.PendingHooksAnnotation: "AfterClusterUpgrade",
  1044  							},
  1045  						},
  1046  						Spec: clusterv1.ClusterSpec{
  1047  							Topology: &clusterv1.Topology{
  1048  								Version: topologyVersion,
  1049  							},
  1050  						},
  1051  					},
  1052  					ControlPlane: &scope.ControlPlaneState{
  1053  						Object: controlPlaneObj,
  1054  					},
  1055  				},
  1056  				HookResponseTracker: scope.NewHookResponseTracker(),
  1057  				UpgradeTracker:      scope.NewUpgradeTracker(),
  1058  			},
  1059  			wantMarked:         true,
  1060  			hookResponse:       failureResponse,
  1061  			wantHookToBeCalled: true,
  1062  			wantError:          true,
  1063  		},
  1064  	}
  1065  
  1066  	for _, tt := range tests {
  1067  		t.Run(tt.name, func(t *testing.T) {
  1068  			g := NewWithT(t)
  1069  
  1070  			fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder().
  1071  				WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{
  1072  					afterClusterUpgradeGVH: tt.hookResponse,
  1073  				}).
  1074  				WithCatalog(catalog).
  1075  				Build()
  1076  
  1077  			fakeClient := fake.NewClientBuilder().WithObjects(tt.s.Current.Cluster).Build()
  1078  
  1079  			r := &Reconciler{
  1080  				Client:        fakeClient,
  1081  				APIReader:     fakeClient,
  1082  				RuntimeClient: fakeRuntimeClient,
  1083  			}
  1084  
  1085  			err := r.callAfterClusterUpgrade(ctx, tt.s)
  1086  			g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.AfterClusterUpgrade) == 1).To(Equal(tt.wantHookToBeCalled))
  1087  			g.Expect(hooks.IsPending(runtimehooksv1.AfterClusterUpgrade, tt.s.Current.Cluster)).To(Equal(tt.wantMarked))
  1088  			g.Expect(err != nil).To(Equal(tt.wantError))
  1089  		})
  1090  	}
  1091  }
  1092  
  1093  func TestReconcileCluster(t *testing.T) {
  1094  	cluster1 := builder.Cluster(metav1.NamespaceDefault, "cluster1").
  1095  		Build()
  1096  	cluster1WithReferences := builder.Cluster(metav1.NamespaceDefault, "cluster1").
  1097  		WithInfrastructureCluster(builder.TestInfrastructureCluster(metav1.NamespaceDefault, "infrastructure-cluster1").
  1098  			Build()).
  1099  		WithControlPlane(builder.TestControlPlane(metav1.NamespaceDefault, "control-plane1").Build()).
  1100  		Build()
  1101  	cluster2WithReferences := cluster1WithReferences.DeepCopy()
  1102  	cluster2WithReferences.SetGroupVersionKind(cluster1WithReferences.GroupVersionKind())
  1103  	cluster2WithReferences.Name = "cluster2"
  1104  
  1105  	tests := []struct {
  1106  		name    string
  1107  		current *clusterv1.Cluster
  1108  		desired *clusterv1.Cluster
  1109  		want    *clusterv1.Cluster
  1110  		wantErr bool
  1111  	}{
  1112  		{
  1113  			name:    "Should update the cluster if infrastructure and control plane references are not set",
  1114  			current: cluster1,
  1115  			desired: cluster1WithReferences,
  1116  			want:    cluster1WithReferences,
  1117  			wantErr: false,
  1118  		},
  1119  		{
  1120  			name:    "Should be a no op if infrastructure and control plane references are already set",
  1121  			current: cluster2WithReferences,
  1122  			desired: cluster2WithReferences,
  1123  			want:    cluster2WithReferences,
  1124  			wantErr: false,
  1125  		},
  1126  	}
  1127  	for _, tt := range tests {
  1128  		t.Run(tt.name, func(t *testing.T) {
  1129  			g := NewWithT(t)
  1130  
  1131  			// Create namespace and modify input to have correct namespace set
  1132  			namespace, err := env.CreateNamespace(ctx, "reconcile-cluster")
  1133  			g.Expect(err).ToNot(HaveOccurred())
  1134  			if tt.desired != nil {
  1135  				tt.desired = prepareCluster(tt.desired, namespace.GetName())
  1136  			}
  1137  			if tt.want != nil {
  1138  				tt.want = prepareCluster(tt.want, namespace.GetName())
  1139  			}
  1140  			if tt.current != nil {
  1141  				tt.current = prepareCluster(tt.current, namespace.GetName())
  1142  			}
  1143  
  1144  			if tt.current != nil {
  1145  				// NOTE: it is ok to use create given that the Cluster are created by user.
  1146  				g.Expect(env.CreateAndWait(ctx, tt.current)).To(Succeed())
  1147  			}
  1148  
  1149  			s := scope.New(tt.current)
  1150  
  1151  			s.Desired = &scope.ClusterState{Cluster: tt.desired}
  1152  
  1153  			r := Reconciler{
  1154  				Client:             env,
  1155  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  1156  				recorder:           env.GetEventRecorderFor("test"),
  1157  			}
  1158  			err = r.reconcileCluster(ctx, s)
  1159  			if tt.wantErr {
  1160  				g.Expect(err).To(HaveOccurred())
  1161  				return
  1162  			}
  1163  			g.Expect(err).ToNot(HaveOccurred())
  1164  
  1165  			got := tt.want.DeepCopy()
  1166  			err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.want), got)
  1167  			g.Expect(err).ToNot(HaveOccurred())
  1168  
  1169  			g.Expect(got.Spec.InfrastructureRef).To(EqualObject(tt.want.Spec.InfrastructureRef))
  1170  			g.Expect(got.Spec.ControlPlaneRef).To(EqualObject(tt.want.Spec.ControlPlaneRef))
  1171  
  1172  			if tt.current != nil {
  1173  				g.Expect(env.CleanupAndWait(ctx, tt.current)).To(Succeed())
  1174  			}
  1175  		})
  1176  	}
  1177  }
  1178  
  1179  func TestReconcileInfrastructureCluster(t *testing.T) {
  1180  	g := NewWithT(t)
  1181  
  1182  	// build an infrastructure cluster with a field managed by the topology controller (derived from the template).
  1183  	clusterInfrastructure1 := builder.TestInfrastructureCluster(metav1.NamespaceDefault, "infrastructure-cluster1").
  1184  		WithSpecFields(map[string]interface{}{"spec.foo": "foo"}).
  1185  		Build()
  1186  
  1187  	// build a patch used to simulate instance specific changes made by an external controller, and build the expected cluster infrastructure object.
  1188  	clusterInfrastructure1ExternalChanges := "{ \"spec\": { \"bar\": \"bar\" }}"
  1189  	clusterInfrastructure1WithExternalChanges := clusterInfrastructure1.DeepCopy()
  1190  	g.Expect(unstructured.SetNestedField(clusterInfrastructure1WithExternalChanges.UnstructuredContent(), "bar", "spec", "bar")).To(Succeed())
  1191  
  1192  	// build a patch used to simulate an external controller overriding a field managed by the topology controller.
  1193  	clusterInfrastructure1TemplateOverridingChanges := "{ \"spec\": { \"foo\": \"foo-override\" }}"
  1194  
  1195  	// build a desired infrastructure cluster with incompatible changes.
  1196  	clusterInfrastructure1WithIncompatibleChanges := clusterInfrastructure1.DeepCopy()
  1197  	clusterInfrastructure1WithIncompatibleChanges.SetName("infrastructure-cluster1-changed")
  1198  
  1199  	tests := []struct {
  1200  		name            string
  1201  		original        *unstructured.Unstructured
  1202  		externalChanges string
  1203  		desired         *unstructured.Unstructured
  1204  		want            *unstructured.Unstructured
  1205  		wantErr         bool
  1206  	}{
  1207  		{
  1208  			name:     "Should create desired InfrastructureCluster if the current does not exists yet",
  1209  			original: nil,
  1210  			desired:  clusterInfrastructure1,
  1211  			want:     clusterInfrastructure1,
  1212  			wantErr:  false,
  1213  		},
  1214  		{
  1215  			name:     "No-op if current InfrastructureCluster is equal to desired",
  1216  			original: clusterInfrastructure1,
  1217  			desired:  clusterInfrastructure1,
  1218  			want:     clusterInfrastructure1,
  1219  			wantErr:  false,
  1220  		},
  1221  		{
  1222  			name:            "Should preserve changes from external controllers",
  1223  			original:        clusterInfrastructure1,
  1224  			externalChanges: clusterInfrastructure1ExternalChanges,
  1225  			desired:         clusterInfrastructure1,
  1226  			want:            clusterInfrastructure1WithExternalChanges,
  1227  			wantErr:         false,
  1228  		},
  1229  		{
  1230  			name:            "Should restore template values if overridden by external controllers",
  1231  			original:        clusterInfrastructure1,
  1232  			externalChanges: clusterInfrastructure1TemplateOverridingChanges,
  1233  			desired:         clusterInfrastructure1,
  1234  			want:            clusterInfrastructure1,
  1235  			wantErr:         false,
  1236  		},
  1237  		{
  1238  			name:     "Fails for incompatible changes",
  1239  			original: clusterInfrastructure1,
  1240  			desired:  clusterInfrastructure1WithIncompatibleChanges,
  1241  			wantErr:  true,
  1242  		},
  1243  	}
  1244  	for _, tt := range tests {
  1245  		t.Run(tt.name, func(t *testing.T) {
  1246  			g := NewWithT(t)
  1247  
  1248  			// Create namespace and modify input to have correct namespace set
  1249  			namespace, err := env.CreateNamespace(ctx, "reconcile-infrastructure-cluster")
  1250  			g.Expect(err).ToNot(HaveOccurred())
  1251  			if tt.original != nil {
  1252  				tt.original.SetNamespace(namespace.GetName())
  1253  			}
  1254  			if tt.desired != nil {
  1255  				tt.desired.SetNamespace(namespace.GetName())
  1256  			}
  1257  			if tt.want != nil {
  1258  				tt.want.SetNamespace(namespace.GetName())
  1259  			}
  1260  
  1261  			if tt.original != nil {
  1262  				// NOTE: it is required to use server side apply to creat the object in order to ensure consistency with the topology controller behaviour.
  1263  				g.Expect(env.PatchAndWait(ctx, tt.original.DeepCopy(), client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1264  				// NOTE: it is required to apply instance specific changes with a "plain" Patch operation to simulate a different manger.
  1265  				if tt.externalChanges != "" {
  1266  					g.Expect(env.Patch(ctx, tt.original.DeepCopy(), client.RawPatch(types.MergePatchType, []byte(tt.externalChanges)))).To(Succeed())
  1267  				}
  1268  			}
  1269  
  1270  			s := scope.New(&clusterv1.Cluster{})
  1271  			if tt.original != nil {
  1272  				current := builder.TestInfrastructureCluster("", "").Build()
  1273  				g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.original), current)).To(Succeed())
  1274  				s.Current.InfrastructureCluster = current
  1275  			}
  1276  			s.Desired = &scope.ClusterState{InfrastructureCluster: tt.desired.DeepCopy()}
  1277  
  1278  			r := Reconciler{
  1279  				Client:             env,
  1280  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  1281  				recorder:           env.GetEventRecorderFor("test"),
  1282  			}
  1283  			err = r.reconcileInfrastructureCluster(ctx, s)
  1284  			if tt.wantErr {
  1285  				g.Expect(err).To(HaveOccurred())
  1286  				return
  1287  			}
  1288  			g.Expect(err).ToNot(HaveOccurred())
  1289  
  1290  			got := tt.want.DeepCopy() // this is required otherwise Get will modify tt.want
  1291  			err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.want), got)
  1292  			g.Expect(err).ToNot(HaveOccurred())
  1293  
  1294  			// Spec
  1295  			wantSpec, ok, err := unstructured.NestedMap(tt.want.UnstructuredContent(), "spec")
  1296  			g.Expect(err).ToNot(HaveOccurred())
  1297  			g.Expect(ok).To(BeTrue())
  1298  
  1299  			gotSpec, ok, err := unstructured.NestedMap(got.UnstructuredContent(), "spec")
  1300  			g.Expect(err).ToNot(HaveOccurred())
  1301  			g.Expect(ok).To(BeTrue())
  1302  			for k, v := range wantSpec {
  1303  				g.Expect(gotSpec).To(HaveKeyWithValue(k, v))
  1304  			}
  1305  
  1306  			if tt.desired != nil {
  1307  				g.Expect(env.CleanupAndWait(ctx, tt.desired)).To(Succeed())
  1308  			}
  1309  		})
  1310  	}
  1311  }
  1312  
  1313  func TestReconcileControlPlane(t *testing.T) {
  1314  	g := NewWithT(t)
  1315  
  1316  	// Objects for testing reconciliation of a control plane without machines.
  1317  
  1318  	// Create cluster class which does not require controlPlaneInfrastructure.
  1319  	ccWithoutControlPlaneInfrastructure := &scope.ControlPlaneBlueprint{}
  1320  
  1321  	// Create ControlPlaneObject without machine templates.
  1322  	controlPlaneWithoutInfrastructure := builder.TestControlPlane(metav1.NamespaceDefault, "cp1").
  1323  		WithSpecFields(map[string]interface{}{"spec.foo": "foo"}).
  1324  		Build()
  1325  
  1326  	// Create desired ControlPlaneObject without machine templates but introducing some change.
  1327  	controlPlaneWithoutInfrastructureWithChanges := controlPlaneWithoutInfrastructure.DeepCopy()
  1328  	g.Expect(unstructured.SetNestedField(controlPlaneWithoutInfrastructureWithChanges.UnstructuredContent(), "foo-changed", "spec", "foo")).To(Succeed())
  1329  
  1330  	// Build a patch used to simulate instance specific changes made by an external controller, and build the expected control plane object.
  1331  	controlPlaneWithoutInfrastructureExternalChanges := "{ \"spec\": { \"bar\": \"bar\" }}"
  1332  	controlPlaneWithoutInfrastructureWithExternalChanges := controlPlaneWithoutInfrastructure.DeepCopy()
  1333  	g.Expect(unstructured.SetNestedField(controlPlaneWithoutInfrastructureWithExternalChanges.UnstructuredContent(), "bar", "spec", "bar")).To(Succeed())
  1334  
  1335  	// Build a patch used to simulate an external controller overriding a field managed by the topology controller.
  1336  	controlPlaneWithoutInfrastructureWithExternalOverridingChanges := "{ \"spec\": { \"foo\": \"foo-override\" }}"
  1337  
  1338  	// Create a desired ControlPlaneObject without machine templates but introducing incompatible changes.
  1339  	controlPlaneWithoutInfrastructureWithIncompatibleChanges := controlPlaneWithoutInfrastructure.DeepCopy()
  1340  	controlPlaneWithoutInfrastructureWithIncompatibleChanges.SetName("cp1-changed")
  1341  
  1342  	// Objects for testing reconciliation of a control plane with machines.
  1343  
  1344  	// Create cluster class which does not require controlPlaneInfrastructure.
  1345  	infrastructureMachineTemplate := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").
  1346  		WithSpecFields(map[string]interface{}{"spec.template.spec.foo": "foo"}).
  1347  		Build()
  1348  	ccWithControlPlaneInfrastructure := &scope.ControlPlaneBlueprint{InfrastructureMachineTemplate: infrastructureMachineTemplate}
  1349  
  1350  	// Create ControlPlaneObject with machine templates.
  1351  	controlPlaneWithInfrastructure := builder.TestControlPlane(metav1.NamespaceDefault, "cp1").
  1352  		WithInfrastructureMachineTemplate(infrastructureMachineTemplate).
  1353  		WithSpecFields(map[string]interface{}{"spec.foo": "foo"}).
  1354  		Build()
  1355  
  1356  	// Create desired controlPlaneInfrastructure with some change.
  1357  	infrastructureMachineTemplateWithChanges := infrastructureMachineTemplate.DeepCopy()
  1358  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplateWithChanges.UnstructuredContent(), "foo-changed", "spec", "template", "spec", "foo")).To(Succeed())
  1359  
  1360  	// Build a patch used to simulate instance specific changes made by an external controller, and build the expected machine infrastructure object.
  1361  	infrastructureMachineTemplateExternalChanges := "{ \"spec\": { \"template\": { \"spec\": { \"bar\": \"bar\" } } }}"
  1362  	infrastructureMachineTemplateWithExternalChanges := infrastructureMachineTemplate.DeepCopy()
  1363  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplateWithExternalChanges.UnstructuredContent(), "bar", "spec", "template", "spec", "bar")).To(Succeed())
  1364  
  1365  	// Build a patch used to simulate an external controller overriding a field managed by the topology controller.
  1366  	infrastructureMachineTemplateExternalOverridingChanges := "{ \"spec\": { \"template\": { \"spec\": { \"foo\": \"foo-override\" } } }}"
  1367  
  1368  	// Create a desired infrastructure machine template with incompatible changes.
  1369  	infrastructureMachineTemplateWithIncompatibleChanges := infrastructureMachineTemplate.DeepCopy()
  1370  	gvk := infrastructureMachineTemplateWithIncompatibleChanges.GroupVersionKind()
  1371  	gvk.Kind = "KindChanged"
  1372  	infrastructureMachineTemplateWithIncompatibleChanges.SetGroupVersionKind(gvk)
  1373  
  1374  	upgradeTrackerWithControlPlanePendingUpgrade := scope.NewUpgradeTracker()
  1375  	upgradeTrackerWithControlPlanePendingUpgrade.ControlPlane.IsPendingUpgrade = true
  1376  
  1377  	tests := []struct {
  1378  		name                                 string
  1379  		class                                *scope.ControlPlaneBlueprint
  1380  		original                             *scope.ControlPlaneState
  1381  		controlPlaneExternalChanges          string
  1382  		machineInfrastructureExternalChanges string
  1383  		upgradeTracker                       *scope.UpgradeTracker
  1384  		desired                              *scope.ControlPlaneState
  1385  		want                                 *scope.ControlPlaneState
  1386  		wantRotation                         bool
  1387  		wantErr                              bool
  1388  	}{
  1389  		// Testing reconciliation of a control plane without machines.
  1390  		{
  1391  			name:     "Should create desired ControlPlane without machine infrastructure if the current does not exist",
  1392  			class:    ccWithoutControlPlaneInfrastructure,
  1393  			original: nil,
  1394  			desired:  &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1395  			want:     &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1396  			wantErr:  false,
  1397  		},
  1398  		{
  1399  			name:     "Should update the ControlPlane without machine infrastructure",
  1400  			class:    ccWithoutControlPlaneInfrastructure,
  1401  			original: &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1402  			desired:  &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithChanges.DeepCopy()},
  1403  			want:     &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithChanges.DeepCopy()},
  1404  			wantErr:  false,
  1405  		},
  1406  		{
  1407  			name:           "Should not update the ControlPlane if ControlPlane is pending upgrade",
  1408  			class:          ccWithoutControlPlaneInfrastructure,
  1409  			upgradeTracker: upgradeTrackerWithControlPlanePendingUpgrade,
  1410  			original:       &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1411  			desired:        &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithChanges.DeepCopy()},
  1412  			want:           &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1413  			wantErr:        false,
  1414  		},
  1415  		{
  1416  			name:                        "Should preserve external changes to ControlPlane without machine infrastructure",
  1417  			class:                       ccWithoutControlPlaneInfrastructure,
  1418  			original:                    &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1419  			controlPlaneExternalChanges: controlPlaneWithoutInfrastructureExternalChanges,
  1420  			desired:                     &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1421  			want:                        &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithExternalChanges.DeepCopy()},
  1422  			wantErr:                     false,
  1423  		},
  1424  		{
  1425  			name:                        "Should restore template values if overridden by external controllers into a ControlPlane without machine infrastructure",
  1426  			class:                       ccWithoutControlPlaneInfrastructure,
  1427  			original:                    &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1428  			controlPlaneExternalChanges: controlPlaneWithoutInfrastructureWithExternalOverridingChanges,
  1429  			desired:                     &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1430  			want:                        &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1431  			wantErr:                     false,
  1432  		},
  1433  		{
  1434  			name:     "Fail on updating ControlPlane without machine infrastructure in case of incompatible changes",
  1435  			class:    ccWithoutControlPlaneInfrastructure,
  1436  			original: &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructure.DeepCopy()},
  1437  			desired:  &scope.ControlPlaneState{Object: controlPlaneWithoutInfrastructureWithIncompatibleChanges.DeepCopy()},
  1438  			wantErr:  true,
  1439  		},
  1440  
  1441  		// Testing reconciliation of a control plane with machines.
  1442  		{
  1443  			name:     "Should create desired ControlPlane with machine infrastructure if the current does not exist",
  1444  			class:    ccWithControlPlaneInfrastructure,
  1445  			original: nil,
  1446  			desired:  &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1447  			want:     &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1448  			wantErr:  false,
  1449  		},
  1450  		{
  1451  			name:         "Should rotate machine infrastructure in case of changes to the desired template",
  1452  			class:        ccWithControlPlaneInfrastructure,
  1453  			original:     &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1454  			desired:      &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplateWithChanges.DeepCopy()},
  1455  			want:         &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplateWithChanges.DeepCopy()},
  1456  			wantRotation: true,
  1457  			wantErr:      false,
  1458  		},
  1459  		{
  1460  			name:                                 "Should preserve external changes to ControlPlane's machine infrastructure", // NOTE: template are not expected to mutate, this is for extra safety.
  1461  			class:                                ccWithControlPlaneInfrastructure,
  1462  			original:                             &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1463  			machineInfrastructureExternalChanges: infrastructureMachineTemplateExternalChanges,
  1464  			desired:                              &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1465  			want:                                 &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplateWithExternalChanges.DeepCopy()},
  1466  			wantRotation:                         false,
  1467  			wantErr:                              false,
  1468  		},
  1469  		{
  1470  			name:                                 "Should restore template values if overridden by external controllers into the ControlPlane's machine infrastructure", // NOTE: template are not expected to mutate, this is for extra safety.
  1471  			class:                                ccWithControlPlaneInfrastructure,
  1472  			original:                             &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1473  			machineInfrastructureExternalChanges: infrastructureMachineTemplateExternalOverridingChanges,
  1474  			desired:                              &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1475  			want:                                 &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1476  			wantRotation:                         true,
  1477  			wantErr:                              false,
  1478  		},
  1479  		{
  1480  			name:     "Fail on updating ControlPlane with a machine infrastructure in case of incompatible changes",
  1481  			class:    ccWithControlPlaneInfrastructure,
  1482  			original: &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy()},
  1483  			desired:  &scope.ControlPlaneState{Object: controlPlaneWithInfrastructure.DeepCopy(), InfrastructureMachineTemplate: infrastructureMachineTemplateWithIncompatibleChanges.DeepCopy()},
  1484  			wantErr:  true,
  1485  		},
  1486  	}
  1487  	for _, tt := range tests {
  1488  		t.Run(tt.name, func(t *testing.T) {
  1489  			g := NewWithT(t)
  1490  
  1491  			// Create namespace and modify input to have correct namespace set
  1492  			namespace, err := env.CreateNamespace(ctx, "reconcile-control-plane")
  1493  			g.Expect(err).ToNot(HaveOccurred())
  1494  			if tt.class != nil { // *scope.ControlPlaneBlueprint
  1495  				tt.class = prepareControlPlaneBluePrint(tt.class, namespace.GetName())
  1496  			}
  1497  			if tt.original != nil { // *scope.ControlPlaneState
  1498  				tt.original = prepareControlPlaneState(g, tt.original, namespace.GetName())
  1499  			}
  1500  			if tt.desired != nil { // *scope.ControlPlaneState
  1501  				tt.desired = prepareControlPlaneState(g, tt.desired, namespace.GetName())
  1502  			}
  1503  			if tt.want != nil { // *scope.ControlPlaneState
  1504  				tt.want = prepareControlPlaneState(g, tt.want, namespace.GetName())
  1505  			}
  1506  
  1507  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster1").Build())
  1508  			s.Blueprint = &scope.ClusterBlueprint{
  1509  				ClusterClass: &clusterv1.ClusterClass{},
  1510  			}
  1511  			if tt.class.InfrastructureMachineTemplate != nil {
  1512  				s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure = &clusterv1.LocalObjectTemplate{
  1513  					Ref: contract.ObjToRef(tt.class.InfrastructureMachineTemplate),
  1514  				}
  1515  			}
  1516  			if tt.upgradeTracker != nil {
  1517  				s.UpgradeTracker = tt.upgradeTracker
  1518  			}
  1519  
  1520  			s.Current.ControlPlane = &scope.ControlPlaneState{}
  1521  			if tt.original != nil {
  1522  				if tt.original.InfrastructureMachineTemplate != nil {
  1523  					// NOTE: it is required to use server side apply to creat the object in order to ensure consistency with the topology controller behaviour.
  1524  					g.Expect(env.PatchAndWait(ctx, tt.original.InfrastructureMachineTemplate.DeepCopy(), client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1525  					// NOTE: it is required to apply instance specific changes with a "plain" Patch operation to simulate a different manger.
  1526  					if tt.machineInfrastructureExternalChanges != "" {
  1527  						g.Expect(env.Patch(ctx, tt.original.InfrastructureMachineTemplate.DeepCopy(), client.RawPatch(types.MergePatchType, []byte(tt.machineInfrastructureExternalChanges)))).To(Succeed())
  1528  					}
  1529  
  1530  					current := builder.TestInfrastructureMachineTemplate("", "").Build()
  1531  					g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.original.InfrastructureMachineTemplate), current)).To(Succeed())
  1532  					s.Current.ControlPlane.InfrastructureMachineTemplate = current
  1533  				}
  1534  				if tt.original.Object != nil {
  1535  					// NOTE: it is required to use server side apply to creat the object in order to ensure consistency with the topology controller behaviour.
  1536  					g.Expect(env.PatchAndWait(ctx, tt.original.Object.DeepCopy(), client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1537  					// NOTE: it is required to apply instance specific changes with a "plain" Patch operation to simulate a different manger.
  1538  					if tt.controlPlaneExternalChanges != "" {
  1539  						g.Expect(env.Patch(ctx, tt.original.Object.DeepCopy(), client.RawPatch(types.MergePatchType, []byte(tt.controlPlaneExternalChanges)))).To(Succeed())
  1540  					}
  1541  
  1542  					current := builder.TestControlPlane("", "").Build()
  1543  					g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.original.Object), current)).To(Succeed())
  1544  					s.Current.ControlPlane.Object = current
  1545  				}
  1546  			}
  1547  
  1548  			r := Reconciler{
  1549  				Client:             env,
  1550  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  1551  				recorder:           env.GetEventRecorderFor("test"),
  1552  			}
  1553  
  1554  			s.Desired = &scope.ClusterState{
  1555  				ControlPlane: &scope.ControlPlaneState{
  1556  					Object:                        tt.desired.Object,
  1557  					InfrastructureMachineTemplate: tt.desired.InfrastructureMachineTemplate,
  1558  				},
  1559  			}
  1560  
  1561  			// Run reconcileControlPlane with the states created in the initial section of the test.
  1562  			err = r.reconcileControlPlane(ctx, s)
  1563  			if tt.wantErr {
  1564  				g.Expect(err).To(HaveOccurred())
  1565  				return
  1566  			}
  1567  			g.Expect(err).ToNot(HaveOccurred())
  1568  
  1569  			// Create ControlPlane object for fetching data into
  1570  			gotControlPlaneObject := builder.TestControlPlane("", "").Build()
  1571  			err = env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(tt.want.Object), gotControlPlaneObject)
  1572  			g.Expect(err).ToNot(HaveOccurred())
  1573  
  1574  			// check for template rotation.
  1575  			gotRotation := false
  1576  			var gotInfrastructureMachineRef *corev1.ObjectReference
  1577  			if tt.class.InfrastructureMachineTemplate != nil {
  1578  				gotInfrastructureMachineRef, err = contract.ControlPlane().MachineTemplate().InfrastructureRef().Get(gotControlPlaneObject)
  1579  				g.Expect(err).ToNot(HaveOccurred())
  1580  				if tt.original != nil {
  1581  					if tt.original.InfrastructureMachineTemplate != nil && tt.original.InfrastructureMachineTemplate.GetName() != gotInfrastructureMachineRef.Name {
  1582  						gotRotation = true
  1583  						// if template has been rotated, fixup infrastructureRef in the wantControlPlaneObjectSpec before comparison.
  1584  						g.Expect(contract.ControlPlane().MachineTemplate().InfrastructureRef().Set(tt.want.Object, refToUnstructured(gotInfrastructureMachineRef))).To(Succeed())
  1585  					}
  1586  				}
  1587  			}
  1588  			g.Expect(gotRotation).To(Equal(tt.wantRotation))
  1589  
  1590  			// Get the spec from the ControlPlaneObject we are expecting
  1591  			wantControlPlaneObjectSpec, ok, err := unstructured.NestedMap(tt.want.Object.UnstructuredContent(), "spec")
  1592  			g.Expect(err).ToNot(HaveOccurred())
  1593  			g.Expect(ok).To(BeTrue())
  1594  
  1595  			// Get the spec from the ControlPlaneObject we got from the client.Get
  1596  			gotControlPlaneObjectSpec, ok, err := unstructured.NestedMap(gotControlPlaneObject.UnstructuredContent(), "spec")
  1597  			g.Expect(err).ToNot(HaveOccurred())
  1598  			g.Expect(ok).To(BeTrue())
  1599  
  1600  			for k, v := range wantControlPlaneObjectSpec {
  1601  				g.Expect(gotControlPlaneObjectSpec).To(HaveKeyWithValue(k, v))
  1602  			}
  1603  			for k, v := range tt.want.Object.GetLabels() {
  1604  				g.Expect(gotControlPlaneObject.GetLabels()).To(HaveKeyWithValue(k, v))
  1605  			}
  1606  
  1607  			// Check the infrastructure template
  1608  			if tt.want.InfrastructureMachineTemplate != nil {
  1609  				// Check to see if the controlPlaneObject has been updated with a new template.
  1610  				// This check is just for the naming format uses by generated templates - here it's templateName-*
  1611  				// This check is only performed when we had an initial template that has been changed
  1612  				if gotRotation {
  1613  					pattern := fmt.Sprintf("%s.*", controlPlaneInfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name))
  1614  					ok, err := regexp.Match(pattern, []byte(gotInfrastructureMachineRef.Name))
  1615  					g.Expect(err).ToNot(HaveOccurred())
  1616  					g.Expect(ok).To(BeTrue())
  1617  				}
  1618  
  1619  				// Create object to hold the queried InfrastructureMachineTemplate
  1620  				gotInfrastructureMachineTemplateKey := client.ObjectKey{Namespace: gotInfrastructureMachineRef.Namespace, Name: gotInfrastructureMachineRef.Name}
  1621  				gotInfrastructureMachineTemplate := builder.TestInfrastructureMachineTemplate("", "").Build()
  1622  				err = env.GetAPIReader().Get(ctx, gotInfrastructureMachineTemplateKey, gotInfrastructureMachineTemplate)
  1623  				g.Expect(err).ToNot(HaveOccurred())
  1624  
  1625  				// Get the spec from the InfrastructureMachineTemplate we are expecting
  1626  				wantInfrastructureMachineTemplateSpec, ok, err := unstructured.NestedMap(tt.want.InfrastructureMachineTemplate.UnstructuredContent(), "spec")
  1627  				g.Expect(err).ToNot(HaveOccurred())
  1628  				g.Expect(ok).To(BeTrue())
  1629  
  1630  				// Get the spec from the InfrastructureMachineTemplate we got from the client.Get
  1631  				gotInfrastructureMachineTemplateSpec, ok, err := unstructured.NestedMap(gotInfrastructureMachineTemplate.UnstructuredContent(), "spec")
  1632  				g.Expect(err).ToNot(HaveOccurred())
  1633  				g.Expect(ok).To(BeTrue())
  1634  
  1635  				// Compare all keys and values in the InfrastructureMachineTemplate Spec
  1636  				for k, v := range wantInfrastructureMachineTemplateSpec {
  1637  					g.Expect(gotInfrastructureMachineTemplateSpec).To(HaveKeyWithValue(k, v))
  1638  				}
  1639  
  1640  				// Check to see that labels are as expected on the object
  1641  				for k, v := range tt.want.InfrastructureMachineTemplate.GetLabels() {
  1642  					g.Expect(gotInfrastructureMachineTemplate.GetLabels()).To(HaveKeyWithValue(k, v))
  1643  				}
  1644  
  1645  				// If the template was rotated during the reconcile we want to make sure the old template was deleted.
  1646  				if gotRotation {
  1647  					obj := &unstructured.Unstructured{}
  1648  					obj.SetAPIVersion(builder.InfrastructureGroupVersion.String())
  1649  					obj.SetKind(builder.GenericInfrastructureMachineTemplateKind)
  1650  					err := r.Client.Get(ctx, client.ObjectKey{
  1651  						Namespace: tt.original.InfrastructureMachineTemplate.GetNamespace(),
  1652  						Name:      tt.original.InfrastructureMachineTemplate.GetName(),
  1653  					}, obj)
  1654  					g.Expect(apierrors.IsNotFound(err)).To(BeTrue())
  1655  				}
  1656  			}
  1657  		})
  1658  	}
  1659  }
  1660  
  1661  func TestReconcileControlPlaneMachineHealthCheck(t *testing.T) {
  1662  	// Create InfrastructureMachineTemplates for test cases
  1663  	infrastructureMachineTemplate := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infra1").Build()
  1664  
  1665  	mhcClass := &clusterv1.MachineHealthCheckClass{
  1666  		UnhealthyConditions: []clusterv1.UnhealthyCondition{
  1667  			{
  1668  				Type:    corev1.NodeReady,
  1669  				Status:  corev1.ConditionUnknown,
  1670  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  1671  			},
  1672  		},
  1673  	}
  1674  	maxUnhealthy := intstr.Parse("45%")
  1675  	// Create clusterClasses requiring controlPlaneInfrastructure and one not.
  1676  	ccWithControlPlaneInfrastructure := &scope.ControlPlaneBlueprint{
  1677  		InfrastructureMachineTemplate: infrastructureMachineTemplate,
  1678  		MachineHealthCheck:            mhcClass,
  1679  	}
  1680  	ccWithoutControlPlaneInfrastructure := &scope.ControlPlaneBlueprint{
  1681  		MachineHealthCheck: mhcClass,
  1682  	}
  1683  
  1684  	// Create ControlPlane Object.
  1685  	controlPlane1 := builder.TestControlPlane(metav1.NamespaceDefault, "cp1").
  1686  		WithInfrastructureMachineTemplate(infrastructureMachineTemplate).
  1687  		Build()
  1688  
  1689  	mhcBuilder := builder.MachineHealthCheck(metav1.NamespaceDefault, "cp1").
  1690  		WithSelector(*selectorForControlPlaneMHC()).
  1691  		WithUnhealthyConditions(mhcClass.UnhealthyConditions).
  1692  		WithClusterName("cluster1")
  1693  
  1694  	tests := []struct {
  1695  		name    string
  1696  		class   *scope.ControlPlaneBlueprint
  1697  		current *scope.ControlPlaneState
  1698  		desired *scope.ControlPlaneState
  1699  		want    *clusterv1.MachineHealthCheck
  1700  	}{
  1701  		{
  1702  			name:    "Should create desired ControlPlane MachineHealthCheck for a new ControlPlane",
  1703  			class:   ccWithControlPlaneInfrastructure,
  1704  			current: nil,
  1705  			desired: &scope.ControlPlaneState{
  1706  				Object:                        controlPlane1.DeepCopy(),
  1707  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1708  				MachineHealthCheck:            mhcBuilder.Build()},
  1709  			want: mhcBuilder.DeepCopy().
  1710  				Build(),
  1711  		},
  1712  		{
  1713  			name:  "Should not create ControlPlane MachineHealthCheck when no MachineInfrastructure is defined",
  1714  			class: ccWithoutControlPlaneInfrastructure,
  1715  			current: &scope.ControlPlaneState{
  1716  				Object: controlPlane1.DeepCopy(),
  1717  				// Note this creation would be blocked by the validation Webhook. MHC with no MachineInfrastructure is not allowed.
  1718  				MachineHealthCheck: mhcBuilder.Build()},
  1719  			desired: &scope.ControlPlaneState{
  1720  				Object: controlPlane1.DeepCopy(),
  1721  				// ControlPlane does not have defined MachineInfrastructure.
  1722  				// InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1723  			},
  1724  			want: nil,
  1725  		},
  1726  		{
  1727  			name:  "Should update ControlPlane MachineHealthCheck when changed in desired state",
  1728  			class: ccWithControlPlaneInfrastructure,
  1729  			current: &scope.ControlPlaneState{
  1730  				Object:                        controlPlane1.DeepCopy(),
  1731  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1732  				MachineHealthCheck:            mhcBuilder.Build()},
  1733  			desired: &scope.ControlPlaneState{
  1734  				Object:                        controlPlane1.DeepCopy(),
  1735  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1736  				MachineHealthCheck:            mhcBuilder.WithMaxUnhealthy(&maxUnhealthy).Build(),
  1737  			},
  1738  			// Want to get the updated version of the MachineHealthCheck after reconciliation.
  1739  			want: mhcBuilder.DeepCopy().WithMaxUnhealthy(&maxUnhealthy).
  1740  				Build(),
  1741  		},
  1742  		{
  1743  			name:  "Should delete ControlPlane MachineHealthCheck when removed from desired state",
  1744  			class: ccWithControlPlaneInfrastructure,
  1745  			current: &scope.ControlPlaneState{
  1746  				Object:                        controlPlane1.DeepCopy(),
  1747  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1748  				MachineHealthCheck:            mhcBuilder.Build()},
  1749  			desired: &scope.ControlPlaneState{
  1750  				Object:                        controlPlane1.DeepCopy(),
  1751  				InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  1752  				// MachineHealthCheck removed from the desired state of the ControlPlane
  1753  			},
  1754  			want: nil,
  1755  		},
  1756  	}
  1757  	for _, tt := range tests {
  1758  		t.Run(tt.name, func(t *testing.T) {
  1759  			g := NewWithT(t)
  1760  
  1761  			// Create namespace and modify input to have correct namespace set
  1762  			namespace, err := env.CreateNamespace(ctx, "reconcile-control-plane")
  1763  			g.Expect(err).ToNot(HaveOccurred())
  1764  			if tt.class != nil {
  1765  				tt.class = prepareControlPlaneBluePrint(tt.class, namespace.GetName())
  1766  			}
  1767  			if tt.current != nil {
  1768  				tt.current = prepareControlPlaneState(g, tt.current, namespace.GetName())
  1769  			}
  1770  			if tt.desired != nil {
  1771  				tt.desired = prepareControlPlaneState(g, tt.desired, namespace.GetName())
  1772  			}
  1773  			if tt.want != nil {
  1774  				tt.want.SetNamespace(namespace.GetName())
  1775  			}
  1776  
  1777  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster1").Build())
  1778  			s.Blueprint = &scope.ClusterBlueprint{
  1779  				ClusterClass: &clusterv1.ClusterClass{},
  1780  			}
  1781  			if tt.class.InfrastructureMachineTemplate != nil {
  1782  				s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure = &clusterv1.LocalObjectTemplate{
  1783  					Ref: contract.ObjToRef(tt.class.InfrastructureMachineTemplate),
  1784  				}
  1785  			}
  1786  
  1787  			s.Current.ControlPlane = &scope.ControlPlaneState{}
  1788  			if tt.current != nil {
  1789  				s.Current.ControlPlane = tt.current
  1790  				if tt.current.Object != nil {
  1791  					g.Expect(env.PatchAndWait(ctx, tt.current.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1792  				}
  1793  				if tt.current.InfrastructureMachineTemplate != nil {
  1794  					g.Expect(env.PatchAndWait(ctx, tt.current.InfrastructureMachineTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1795  				}
  1796  				if tt.current.MachineHealthCheck != nil {
  1797  					g.Expect(env.PatchAndWait(ctx, tt.current.MachineHealthCheck, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  1798  				}
  1799  			}
  1800  
  1801  			// copy over uid of created and desired ControlPlane
  1802  			if tt.current != nil && tt.current.Object != nil && tt.desired != nil && tt.desired.Object != nil {
  1803  				tt.desired.Object.SetUID(tt.current.Object.GetUID())
  1804  			}
  1805  
  1806  			r := Reconciler{
  1807  				Client:             env,
  1808  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  1809  				recorder:           env.GetEventRecorderFor("test"),
  1810  			}
  1811  
  1812  			s.Desired = &scope.ClusterState{
  1813  				ControlPlane: tt.desired,
  1814  			}
  1815  
  1816  			// Run reconcileControlPlane with the states created in the initial section of the test.
  1817  			err = r.reconcileControlPlane(ctx, s)
  1818  			g.Expect(err).ToNot(HaveOccurred())
  1819  
  1820  			gotCP := s.Desired.ControlPlane.Object.DeepCopy()
  1821  			g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: namespace.GetName(), Name: controlPlane1.GetName()}, gotCP)).To(Succeed())
  1822  
  1823  			// Create MachineHealthCheck object for fetching data into
  1824  			gotMHC := &clusterv1.MachineHealthCheck{}
  1825  			err = env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: namespace.GetName(), Name: controlPlane1.GetName()}, gotMHC)
  1826  
  1827  			// Nil case: If we want to find nothing (i.e. delete or MHC not created) and the Get call returns a NotFound error from the API the test succeeds.
  1828  			if tt.want == nil && apierrors.IsNotFound(err) {
  1829  				return
  1830  			}
  1831  
  1832  			want := tt.want.DeepCopy()
  1833  			g.Expect((&webhooks.MachineHealthCheck{}).Default(ctx, want)).To(Succeed())
  1834  
  1835  			g.Expect(err).ToNot(HaveOccurred())
  1836  			g.Expect(gotMHC).To(EqualObject(want, IgnoreAutogeneratedMetadata, IgnorePaths{".kind", ".apiVersion"}))
  1837  		})
  1838  	}
  1839  }
  1840  
  1841  func TestReconcileMachineDeployments(t *testing.T) {
  1842  	g := NewWithT(t)
  1843  
  1844  	// Write the config file to access the test env for debugging.
  1845  	// g.Expect(os.WriteFile("test.conf", kubeconfig.FromEnvTestConfig(env.Config, &clusterv1.Cluster{
  1846  	// 	ObjectMeta: metav1.ObjectMeta{Name: "test"},
  1847  	// }), 0777)).To(Succeed())
  1848  
  1849  	infrastructureMachineTemplate1 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-1").Build()
  1850  	bootstrapTemplate1 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-1").Build()
  1851  	md1 := newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate1, bootstrapTemplate1, nil)
  1852  
  1853  	upgradeTrackerWithMD1PendingCreate := scope.NewUpgradeTracker()
  1854  	upgradeTrackerWithMD1PendingCreate.MachineDeployments.MarkPendingCreate("md-1-topology")
  1855  
  1856  	infrastructureMachineTemplate2 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-2").Build()
  1857  	bootstrapTemplate2 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-2").Build()
  1858  	md2 := newFakeMachineDeploymentTopologyState("md-2", infrastructureMachineTemplate2, bootstrapTemplate2, nil)
  1859  	infrastructureMachineTemplate2WithChanges := infrastructureMachineTemplate2.DeepCopy()
  1860  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplate2WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1861  	md2WithRotatedInfrastructureMachineTemplate := newFakeMachineDeploymentTopologyState("md-2", infrastructureMachineTemplate2WithChanges, bootstrapTemplate2, nil)
  1862  	upgradeTrackerWithMD2PendingUpgrade := scope.NewUpgradeTracker()
  1863  	upgradeTrackerWithMD2PendingUpgrade.MachineDeployments.MarkPendingUpgrade("md-2")
  1864  
  1865  	infrastructureMachineTemplate3 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-3").Build()
  1866  	bootstrapTemplate3 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-3").Build()
  1867  	md3 := newFakeMachineDeploymentTopologyState("md-3", infrastructureMachineTemplate3, bootstrapTemplate3, nil)
  1868  	bootstrapTemplate3WithChanges := bootstrapTemplate3.DeepCopy()
  1869  	g.Expect(unstructured.SetNestedField(bootstrapTemplate3WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1870  	md3WithRotatedBootstrapTemplate := newFakeMachineDeploymentTopologyState("md-3", infrastructureMachineTemplate3, bootstrapTemplate3WithChanges, nil)
  1871  	bootstrapTemplate3WithChangeKind := bootstrapTemplate3.DeepCopy()
  1872  	bootstrapTemplate3WithChangeKind.SetKind("AnotherGenericBootstrapTemplate")
  1873  	md3WithRotatedBootstrapTemplateChangedKind := newFakeMachineDeploymentTopologyState("md-3", infrastructureMachineTemplate3, bootstrapTemplate3WithChanges, nil)
  1874  
  1875  	infrastructureMachineTemplate4 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-4").Build()
  1876  	bootstrapTemplate4 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-4").Build()
  1877  	md4 := newFakeMachineDeploymentTopologyState("md-4", infrastructureMachineTemplate4, bootstrapTemplate4, nil)
  1878  	infrastructureMachineTemplate4WithChanges := infrastructureMachineTemplate4.DeepCopy()
  1879  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplate4WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1880  	bootstrapTemplate4WithChanges := bootstrapTemplate4.DeepCopy()
  1881  	g.Expect(unstructured.SetNestedField(bootstrapTemplate4WithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1882  	md4WithRotatedTemplates := newFakeMachineDeploymentTopologyState("md-4", infrastructureMachineTemplate4WithChanges, bootstrapTemplate4WithChanges, nil)
  1883  
  1884  	infrastructureMachineTemplate4m := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-4m").Build()
  1885  	bootstrapTemplate4m := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-4m").Build()
  1886  	md4m := newFakeMachineDeploymentTopologyState("md-4m", infrastructureMachineTemplate4m, bootstrapTemplate4m, nil)
  1887  	infrastructureMachineTemplate4mWithChanges := infrastructureMachineTemplate4m.DeepCopy()
  1888  	infrastructureMachineTemplate4mWithChanges.SetLabels(map[string]string{"foo": "bar"})
  1889  	bootstrapTemplate4mWithChanges := bootstrapTemplate4m.DeepCopy()
  1890  	bootstrapTemplate4mWithChanges.SetLabels(map[string]string{"foo": "bar"})
  1891  	md4mWithInPlaceUpdatedTemplates := newFakeMachineDeploymentTopologyState("md-4m", infrastructureMachineTemplate4mWithChanges, bootstrapTemplate4mWithChanges, nil)
  1892  
  1893  	infrastructureMachineTemplate5 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-5").Build()
  1894  	bootstrapTemplate5 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-5").Build()
  1895  	md5 := newFakeMachineDeploymentTopologyState("md-5", infrastructureMachineTemplate5, bootstrapTemplate5, nil)
  1896  	infrastructureMachineTemplate5WithChangedKind := infrastructureMachineTemplate5.DeepCopy()
  1897  	infrastructureMachineTemplate5WithChangedKind.SetKind("ChangedKind")
  1898  	md5WithChangedInfrastructureMachineTemplateKind := newFakeMachineDeploymentTopologyState("md-4", infrastructureMachineTemplate5WithChangedKind, bootstrapTemplate5, nil)
  1899  
  1900  	infrastructureMachineTemplate6 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-6").Build()
  1901  	bootstrapTemplate6 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-6").Build()
  1902  	md6 := newFakeMachineDeploymentTopologyState("md-6", infrastructureMachineTemplate6, bootstrapTemplate6, nil)
  1903  	bootstrapTemplate6WithChangedNamespace := bootstrapTemplate6.DeepCopy()
  1904  	bootstrapTemplate6WithChangedNamespace.SetNamespace("ChangedNamespace")
  1905  	md6WithChangedBootstrapTemplateNamespace := newFakeMachineDeploymentTopologyState("md-6", infrastructureMachineTemplate6, bootstrapTemplate6WithChangedNamespace, nil)
  1906  
  1907  	infrastructureMachineTemplate7 := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-7").Build()
  1908  	bootstrapTemplate7 := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-7").Build()
  1909  	md7 := newFakeMachineDeploymentTopologyState("md-7", infrastructureMachineTemplate7, bootstrapTemplate7, nil)
  1910  
  1911  	infrastructureMachineTemplate8Create := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-8-create").Build()
  1912  	bootstrapTemplate8Create := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-8-create").Build()
  1913  	md8Create := newFakeMachineDeploymentTopologyState("md-8-create", infrastructureMachineTemplate8Create, bootstrapTemplate8Create, nil)
  1914  	infrastructureMachineTemplate8Delete := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-8-delete").Build()
  1915  	bootstrapTemplate8Delete := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-8-delete").Build()
  1916  	md8Delete := newFakeMachineDeploymentTopologyState("md-8-delete", infrastructureMachineTemplate8Delete, bootstrapTemplate8Delete, nil)
  1917  	infrastructureMachineTemplate8Update := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-8-update").Build()
  1918  	bootstrapTemplate8Update := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-8-update").Build()
  1919  	md8Update := newFakeMachineDeploymentTopologyState("md-8-update", infrastructureMachineTemplate8Update, bootstrapTemplate8Update, nil)
  1920  	infrastructureMachineTemplate8UpdateWithChanges := infrastructureMachineTemplate8Update.DeepCopy()
  1921  	g.Expect(unstructured.SetNestedField(infrastructureMachineTemplate8UpdateWithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1922  	bootstrapTemplate8UpdateWithChanges := bootstrapTemplate8Update.DeepCopy()
  1923  	g.Expect(unstructured.SetNestedField(bootstrapTemplate8UpdateWithChanges.Object, "foo", "spec", "template", "spec", "foo")).To(Succeed())
  1924  	md8UpdateWithRotatedTemplates := newFakeMachineDeploymentTopologyState("md-8-update", infrastructureMachineTemplate8UpdateWithChanges, bootstrapTemplate8UpdateWithChanges, nil)
  1925  
  1926  	infrastructureMachineTemplate9m := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-9m").Build()
  1927  	bootstrapTemplate9m := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-9m").Build()
  1928  	md9 := newFakeMachineDeploymentTopologyState("md-9m", infrastructureMachineTemplate9m, bootstrapTemplate9m, nil)
  1929  	md9.Object.Spec.Template.ObjectMeta.Labels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"}
  1930  	md9.Object.Spec.Selector.MatchLabels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"}
  1931  	md9WithInstanceSpecificTemplateMetadataAndSelector := newFakeMachineDeploymentTopologyState("md-9m", infrastructureMachineTemplate9m, bootstrapTemplate9m, nil)
  1932  	md9WithInstanceSpecificTemplateMetadataAndSelector.Object.Spec.Template.ObjectMeta.Labels = map[string]string{"foo": "bar"}
  1933  	md9WithInstanceSpecificTemplateMetadataAndSelector.Object.Spec.Selector.MatchLabels = map[string]string{"foo": "bar"}
  1934  
  1935  	tests := []struct {
  1936  		name                                      string
  1937  		current                                   []*scope.MachineDeploymentState
  1938  		currentOnlyAPIServer                      []*scope.MachineDeploymentState
  1939  		desired                                   []*scope.MachineDeploymentState
  1940  		upgradeTracker                            *scope.UpgradeTracker
  1941  		want                                      []*scope.MachineDeploymentState
  1942  		wantInfrastructureMachineTemplateRotation map[string]bool
  1943  		wantBootstrapTemplateRotation             map[string]bool
  1944  		wantErr                                   bool
  1945  	}{
  1946  		{
  1947  			name:    "Should create desired MachineDeployment if the current does not exists yet",
  1948  			current: nil,
  1949  			desired: []*scope.MachineDeploymentState{md1},
  1950  			want:    []*scope.MachineDeploymentState{md1},
  1951  			wantErr: false,
  1952  		},
  1953  		{
  1954  			name:                 "Should skip creating desired MachineDeployment if it already exists in the apiserver (even if it is not in current state)",
  1955  			current:              nil,
  1956  			currentOnlyAPIServer: []*scope.MachineDeploymentState{md1},
  1957  			desired:              []*scope.MachineDeploymentState{md1},
  1958  			want:                 []*scope.MachineDeploymentState{md1},
  1959  			wantErr:              false,
  1960  		},
  1961  		{
  1962  			name:           "Should not create desired MachineDeployment if the current does not exists yet and it marked as pending create",
  1963  			current:        nil,
  1964  			upgradeTracker: upgradeTrackerWithMD1PendingCreate,
  1965  			desired:        []*scope.MachineDeploymentState{md1},
  1966  			want:           nil,
  1967  			wantErr:        false,
  1968  		},
  1969  		{
  1970  			name:    "No-op if current MachineDeployment is equal to desired",
  1971  			current: []*scope.MachineDeploymentState{md1},
  1972  			desired: []*scope.MachineDeploymentState{md1},
  1973  			want:    []*scope.MachineDeploymentState{md1},
  1974  			wantErr: false,
  1975  		},
  1976  		{
  1977  			name:    "Should update MachineDeployment with InfrastructureMachineTemplate rotation",
  1978  			current: []*scope.MachineDeploymentState{md2},
  1979  			desired: []*scope.MachineDeploymentState{md2WithRotatedInfrastructureMachineTemplate},
  1980  			want:    []*scope.MachineDeploymentState{md2WithRotatedInfrastructureMachineTemplate},
  1981  			wantInfrastructureMachineTemplateRotation: map[string]bool{"md-2": true},
  1982  			wantErr: false,
  1983  		},
  1984  		{
  1985  			name:           "Should not update MachineDeployment if MachineDeployment is pending upgrade",
  1986  			current:        []*scope.MachineDeploymentState{md2},
  1987  			desired:        []*scope.MachineDeploymentState{md2WithRotatedInfrastructureMachineTemplate},
  1988  			upgradeTracker: upgradeTrackerWithMD2PendingUpgrade,
  1989  			want:           []*scope.MachineDeploymentState{md2},
  1990  			wantInfrastructureMachineTemplateRotation: map[string]bool{"md-2": false},
  1991  			wantErr: false,
  1992  		},
  1993  		{
  1994  			name:                          "Should update MachineDeployment with BootstrapTemplate rotation",
  1995  			current:                       []*scope.MachineDeploymentState{md3},
  1996  			desired:                       []*scope.MachineDeploymentState{md3WithRotatedBootstrapTemplate},
  1997  			want:                          []*scope.MachineDeploymentState{md3WithRotatedBootstrapTemplate},
  1998  			wantBootstrapTemplateRotation: map[string]bool{"md-3": true},
  1999  			wantErr:                       false,
  2000  		},
  2001  		{
  2002  			name:                          "Should update MachineDeployment with BootstrapTemplate rotation with changed kind",
  2003  			current:                       []*scope.MachineDeploymentState{md3},
  2004  			desired:                       []*scope.MachineDeploymentState{md3WithRotatedBootstrapTemplateChangedKind},
  2005  			want:                          []*scope.MachineDeploymentState{md3WithRotatedBootstrapTemplateChangedKind},
  2006  			wantBootstrapTemplateRotation: map[string]bool{"md-3": true},
  2007  			wantErr:                       false,
  2008  		},
  2009  		{
  2010  			name:    "Should update MachineDeployment with InfrastructureMachineTemplate and BootstrapTemplate rotation",
  2011  			current: []*scope.MachineDeploymentState{md4},
  2012  			desired: []*scope.MachineDeploymentState{md4WithRotatedTemplates},
  2013  			want:    []*scope.MachineDeploymentState{md4WithRotatedTemplates},
  2014  			wantInfrastructureMachineTemplateRotation: map[string]bool{"md-4": true},
  2015  			wantBootstrapTemplateRotation:             map[string]bool{"md-4": true},
  2016  			wantErr:                                   false,
  2017  		},
  2018  		{
  2019  			name:    "Should update MachineDeployment with InfrastructureMachineTemplate and BootstrapTemplate without rotation",
  2020  			current: []*scope.MachineDeploymentState{md4m},
  2021  			desired: []*scope.MachineDeploymentState{md4mWithInPlaceUpdatedTemplates},
  2022  			want:    []*scope.MachineDeploymentState{md4mWithInPlaceUpdatedTemplates},
  2023  			wantErr: false,
  2024  		},
  2025  		{
  2026  			name:    "Should fail update MachineDeployment because of changed InfrastructureMachineTemplate kind",
  2027  			current: []*scope.MachineDeploymentState{md5},
  2028  			desired: []*scope.MachineDeploymentState{md5WithChangedInfrastructureMachineTemplateKind},
  2029  			wantErr: true,
  2030  		},
  2031  		{
  2032  			name:    "Should fail update MachineDeployment because of changed BootstrapTemplate namespace",
  2033  			current: []*scope.MachineDeploymentState{md6},
  2034  			desired: []*scope.MachineDeploymentState{md6WithChangedBootstrapTemplateNamespace},
  2035  			wantErr: true,
  2036  		},
  2037  		{
  2038  			name:    "Should delete MachineDeployment",
  2039  			current: []*scope.MachineDeploymentState{md7},
  2040  			desired: []*scope.MachineDeploymentState{},
  2041  			want:    []*scope.MachineDeploymentState{},
  2042  			wantErr: false,
  2043  		},
  2044  		{
  2045  			name:    "Should create, update and delete MachineDeployments",
  2046  			current: []*scope.MachineDeploymentState{md8Update, md8Delete},
  2047  			desired: []*scope.MachineDeploymentState{md8Create, md8UpdateWithRotatedTemplates},
  2048  			want:    []*scope.MachineDeploymentState{md8Create, md8UpdateWithRotatedTemplates},
  2049  			wantInfrastructureMachineTemplateRotation: map[string]bool{"md-8-update": true},
  2050  			wantBootstrapTemplateRotation:             map[string]bool{"md-8-update": true},
  2051  			wantErr:                                   false,
  2052  		},
  2053  		{
  2054  			name:    "Enforce template metadata",
  2055  			current: []*scope.MachineDeploymentState{md9WithInstanceSpecificTemplateMetadataAndSelector},
  2056  			desired: []*scope.MachineDeploymentState{md9},
  2057  			want:    []*scope.MachineDeploymentState{md9},
  2058  			wantErr: false,
  2059  		},
  2060  	}
  2061  	for _, tt := range tests {
  2062  		t.Run(tt.name, func(t *testing.T) {
  2063  			g := NewWithT(t)
  2064  
  2065  			// Create namespace and modify input to have correct namespace set
  2066  			namespace, err := env.CreateNamespace(ctx, "reconcile-machine-deployments")
  2067  			g.Expect(err).ToNot(HaveOccurred())
  2068  			for i, s := range tt.current {
  2069  				tt.current[i] = prepareMachineDeploymentState(s, namespace.GetName())
  2070  			}
  2071  			for i, s := range tt.desired {
  2072  				tt.desired[i] = prepareMachineDeploymentState(s, namespace.GetName())
  2073  			}
  2074  			for i, s := range tt.want {
  2075  				tt.want[i] = prepareMachineDeploymentState(s, namespace.GetName())
  2076  			}
  2077  
  2078  			for _, s := range tt.current {
  2079  				g.Expect(env.PatchAndWait(ctx, s.InfrastructureMachineTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2080  				g.Expect(env.PatchAndWait(ctx, s.BootstrapTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2081  				g.Expect(env.PatchAndWait(ctx, s.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2082  			}
  2083  
  2084  			currentMachineDeploymentStates := toMachineDeploymentTopologyStateMap(tt.current)
  2085  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build())
  2086  			s.Current.MachineDeployments = currentMachineDeploymentStates
  2087  
  2088  			// currentOnlyAPIServer MDs only exist in the APIserver but are not part of s.Current.
  2089  			// This simulates that getCurrentMachineDeploymentState in current_state.go read a stale MD list.
  2090  			for _, s := range tt.currentOnlyAPIServer {
  2091  				mdState := prepareMachineDeploymentState(s, namespace.GetName())
  2092  
  2093  				g.Expect(env.PatchAndWait(ctx, mdState.InfrastructureMachineTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2094  				g.Expect(env.PatchAndWait(ctx, mdState.BootstrapTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2095  				g.Expect(env.PatchAndWait(ctx, mdState.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2096  			}
  2097  
  2098  			s.Desired = &scope.ClusterState{MachineDeployments: toMachineDeploymentTopologyStateMap(tt.desired)}
  2099  
  2100  			if tt.upgradeTracker != nil {
  2101  				s.UpgradeTracker = tt.upgradeTracker
  2102  			}
  2103  
  2104  			r := Reconciler{
  2105  				Client:             env.GetClient(),
  2106  				APIReader:          env.GetAPIReader(),
  2107  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  2108  				recorder:           env.GetEventRecorderFor("test"),
  2109  			}
  2110  			err = r.reconcileMachineDeployments(ctx, s)
  2111  			if tt.wantErr {
  2112  				g.Expect(err).To(HaveOccurred())
  2113  				return
  2114  			}
  2115  			g.Expect(err).ToNot(HaveOccurred())
  2116  
  2117  			var gotMachineDeploymentList clusterv1.MachineDeploymentList
  2118  			g.Expect(env.GetAPIReader().List(ctx, &gotMachineDeploymentList, &client.ListOptions{Namespace: namespace.GetName()})).To(Succeed())
  2119  			g.Expect(gotMachineDeploymentList.Items).To(HaveLen(len(tt.want)))
  2120  
  2121  			if tt.want == nil {
  2122  				// No machine deployments should exist.
  2123  				g.Expect(gotMachineDeploymentList.Items).To(BeEmpty())
  2124  			}
  2125  
  2126  			for _, wantMachineDeploymentState := range tt.want {
  2127  				for _, gotMachineDeployment := range gotMachineDeploymentList.Items {
  2128  					if wantMachineDeploymentState.Object.Name != gotMachineDeployment.Name {
  2129  						continue
  2130  					}
  2131  					currentMachineDeploymentTopologyName := wantMachineDeploymentState.Object.ObjectMeta.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel]
  2132  					currentMachineDeploymentState := currentMachineDeploymentStates[currentMachineDeploymentTopologyName]
  2133  
  2134  					// Copy over the name of the newly created InfrastructureRef and Bootsrap.ConfigRef because they get a generated name
  2135  					wantMachineDeploymentState.Object.Spec.Template.Spec.InfrastructureRef.Name = gotMachineDeployment.Spec.Template.Spec.InfrastructureRef.Name
  2136  					if gotMachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef != nil {
  2137  						wantMachineDeploymentState.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Name = gotMachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef.Name
  2138  					}
  2139  
  2140  					// Compare MachineDeployment.
  2141  					// Note: We're intentionally only comparing Spec as otherwise we would have to account for
  2142  					// empty vs. filled out TypeMeta.
  2143  					g.Expect(gotMachineDeployment.Spec).To(BeComparableTo(wantMachineDeploymentState.Object.Spec))
  2144  
  2145  					// Compare BootstrapTemplate.
  2146  					gotBootstrapTemplateRef := gotMachineDeployment.Spec.Template.Spec.Bootstrap.ConfigRef
  2147  					gotBootstrapTemplate := unstructured.Unstructured{}
  2148  					gotBootstrapTemplate.SetKind(gotBootstrapTemplateRef.Kind)
  2149  					gotBootstrapTemplate.SetAPIVersion(gotBootstrapTemplateRef.APIVersion)
  2150  
  2151  					err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2152  						Namespace: gotBootstrapTemplateRef.Namespace,
  2153  						Name:      gotBootstrapTemplateRef.Name,
  2154  					}, &gotBootstrapTemplate)
  2155  
  2156  					g.Expect(err).ToNot(HaveOccurred())
  2157  
  2158  					g.Expect(&gotBootstrapTemplate).To(EqualObject(wantMachineDeploymentState.BootstrapTemplate, IgnoreAutogeneratedMetadata, IgnoreNameGenerated))
  2159  
  2160  					// Check BootstrapTemplate rotation if there was a previous MachineDeployment/Template.
  2161  					if currentMachineDeploymentState != nil && currentMachineDeploymentState.BootstrapTemplate != nil {
  2162  						if tt.wantBootstrapTemplateRotation[gotMachineDeployment.Name] {
  2163  							g.Expect(currentMachineDeploymentState.BootstrapTemplate.GetName()).ToNot(Equal(gotBootstrapTemplate.GetName()))
  2164  						} else {
  2165  							g.Expect(currentMachineDeploymentState.BootstrapTemplate.GetName()).To(Equal(gotBootstrapTemplate.GetName()))
  2166  						}
  2167  					}
  2168  
  2169  					// Compare InfrastructureMachineTemplate.
  2170  					gotInfrastructureMachineTemplateRef := gotMachineDeployment.Spec.Template.Spec.InfrastructureRef
  2171  					gotInfrastructureMachineTemplate := unstructured.Unstructured{}
  2172  					gotInfrastructureMachineTemplate.SetKind(gotInfrastructureMachineTemplateRef.Kind)
  2173  					gotInfrastructureMachineTemplate.SetAPIVersion(gotInfrastructureMachineTemplateRef.APIVersion)
  2174  
  2175  					err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2176  						Namespace: gotInfrastructureMachineTemplateRef.Namespace,
  2177  						Name:      gotInfrastructureMachineTemplateRef.Name,
  2178  					}, &gotInfrastructureMachineTemplate)
  2179  
  2180  					g.Expect(err).ToNot(HaveOccurred())
  2181  
  2182  					g.Expect(&gotInfrastructureMachineTemplate).To(EqualObject(wantMachineDeploymentState.InfrastructureMachineTemplate, IgnoreAutogeneratedMetadata, IgnoreNameGenerated))
  2183  
  2184  					// Check InfrastructureMachineTemplate rotation if there was a previous MachineDeployment/Template.
  2185  					if currentMachineDeploymentState != nil && currentMachineDeploymentState.InfrastructureMachineTemplate != nil {
  2186  						if tt.wantInfrastructureMachineTemplateRotation[gotMachineDeployment.Name] {
  2187  							g.Expect(currentMachineDeploymentState.InfrastructureMachineTemplate.GetName()).ToNot(Equal(gotInfrastructureMachineTemplate.GetName()))
  2188  						} else {
  2189  							g.Expect(currentMachineDeploymentState.InfrastructureMachineTemplate.GetName()).To(Equal(gotInfrastructureMachineTemplate.GetName()))
  2190  						}
  2191  					}
  2192  				}
  2193  			}
  2194  		})
  2195  	}
  2196  }
  2197  
  2198  func TestReconcileMachinePools(t *testing.T) {
  2199  	defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.MachinePool, true)()
  2200  
  2201  	g := NewWithT(t)
  2202  
  2203  	infrastructureMachinePool1 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-1").Build()
  2204  	bootstrapConfig1 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-1").Build()
  2205  	mp1 := newFakeMachinePoolTopologyState("mp-1", infrastructureMachinePool1, bootstrapConfig1)
  2206  
  2207  	upgradeTrackerWithmp1PendingCreate := scope.NewUpgradeTracker()
  2208  	upgradeTrackerWithmp1PendingCreate.MachinePools.MarkPendingCreate("mp-1-topology")
  2209  
  2210  	infrastructureMachinePool2 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-2").Build()
  2211  	bootstrapConfig2 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-2").Build()
  2212  	mp2 := newFakeMachinePoolTopologyState("mp-2", infrastructureMachinePool2, bootstrapConfig2)
  2213  	infrastructureMachinePool2WithChanges := infrastructureMachinePool2.DeepCopy()
  2214  	g.Expect(unstructured.SetNestedField(infrastructureMachinePool2WithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2215  	mp2WithChangedInfrastructureMachinePool := newFakeMachinePoolTopologyState("mp-2", infrastructureMachinePool2WithChanges, bootstrapConfig2)
  2216  	upgradeTrackerWithmp2PendingUpgrade := scope.NewUpgradeTracker()
  2217  	upgradeTrackerWithmp2PendingUpgrade.MachinePools.MarkPendingUpgrade("mp-2")
  2218  
  2219  	infrastructureMachinePool3 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-3").Build()
  2220  	bootstrapConfig3 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-3").Build()
  2221  	mp3 := newFakeMachinePoolTopologyState("mp-3", infrastructureMachinePool3, bootstrapConfig3)
  2222  	bootstrapConfig3WithChanges := bootstrapConfig3.DeepCopy()
  2223  	g.Expect(unstructured.SetNestedField(bootstrapConfig3WithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2224  	mp3WithChangedbootstrapConfig := newFakeMachinePoolTopologyState("mp-3", infrastructureMachinePool3, bootstrapConfig3WithChanges)
  2225  	bootstrapConfig3WithChangeKind := bootstrapConfig3.DeepCopy()
  2226  	bootstrapConfig3WithChangeKind.SetKind("AnotherGenericbootstrapConfig")
  2227  	mp3WithChangedbootstrapConfigChangedKind := newFakeMachinePoolTopologyState("mp-3", infrastructureMachinePool3, bootstrapConfig3WithChangeKind)
  2228  
  2229  	infrastructureMachinePool4 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-4").Build()
  2230  	bootstrapConfig4 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-4").Build()
  2231  	mp4 := newFakeMachinePoolTopologyState("mp-4", infrastructureMachinePool4, bootstrapConfig4)
  2232  	infrastructureMachinePool4WithChanges := infrastructureMachinePool4.DeepCopy()
  2233  	g.Expect(unstructured.SetNestedField(infrastructureMachinePool4WithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2234  	bootstrapConfig4WithChanges := bootstrapConfig4.DeepCopy()
  2235  	g.Expect(unstructured.SetNestedField(bootstrapConfig4WithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2236  	mp4WithChangedObjects := newFakeMachinePoolTopologyState("mp-4", infrastructureMachinePool4WithChanges, bootstrapConfig4WithChanges)
  2237  
  2238  	infrastructureMachinePool5 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-5").Build()
  2239  	bootstrapConfig5 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-5").Build()
  2240  	mp5 := newFakeMachinePoolTopologyState("mp-5", infrastructureMachinePool5, bootstrapConfig5)
  2241  	infrastructureMachinePool5WithChangedKind := infrastructureMachinePool5.DeepCopy()
  2242  	infrastructureMachinePool5WithChangedKind.SetKind("ChangedKind")
  2243  	mp5WithChangedinfrastructureMachinePoolKind := newFakeMachinePoolTopologyState("mp-4", infrastructureMachinePool5WithChangedKind, bootstrapConfig5)
  2244  
  2245  	infrastructureMachinePool6 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-6").Build()
  2246  	bootstrapConfig6 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-6").Build()
  2247  	mp6 := newFakeMachinePoolTopologyState("mp-6", infrastructureMachinePool6, bootstrapConfig6)
  2248  	bootstrapConfig6WithChangedNamespace := bootstrapConfig6.DeepCopy()
  2249  	bootstrapConfig6WithChangedNamespace.SetNamespace("ChangedNamespace")
  2250  	mp6WithChangedbootstrapConfigNamespace := newFakeMachinePoolTopologyState("mp-6", infrastructureMachinePool6, bootstrapConfig6WithChangedNamespace)
  2251  
  2252  	infrastructureMachinePool7 := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-7").Build()
  2253  	bootstrapConfig7 := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-7").Build()
  2254  	mp7 := newFakeMachinePoolTopologyState("mp-7", infrastructureMachinePool7, bootstrapConfig7)
  2255  
  2256  	infrastructureMachinePool8Create := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-8-create").Build()
  2257  	bootstrapConfig8Create := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-8-create").Build()
  2258  	mp8Create := newFakeMachinePoolTopologyState("mp-8-create", infrastructureMachinePool8Create, bootstrapConfig8Create)
  2259  	infrastructureMachinePool8Delete := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-8-delete").Build()
  2260  	bootstrapConfig8Delete := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-8-delete").Build()
  2261  	mp8Delete := newFakeMachinePoolTopologyState("mp-8-delete", infrastructureMachinePool8Delete, bootstrapConfig8Delete)
  2262  	infrastructureMachinePool8Update := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-8-update").Build()
  2263  	bootstrapConfig8Update := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-8-update").Build()
  2264  	mp8Update := newFakeMachinePoolTopologyState("mp-8-update", infrastructureMachinePool8Update, bootstrapConfig8Update)
  2265  	infrastructureMachinePool8UpdateWithChanges := infrastructureMachinePool8Update.DeepCopy()
  2266  	g.Expect(unstructured.SetNestedField(infrastructureMachinePool8UpdateWithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2267  	bootstrapConfig8UpdateWithChanges := bootstrapConfig8Update.DeepCopy()
  2268  	g.Expect(unstructured.SetNestedField(bootstrapConfig8UpdateWithChanges.Object, "foo", "spec", "foo")).To(Succeed())
  2269  	mp8UpdateWithChangedObjects := newFakeMachinePoolTopologyState("mp-8-update", infrastructureMachinePool8UpdateWithChanges, bootstrapConfig8UpdateWithChanges)
  2270  
  2271  	infrastructureMachinePool9m := builder.TestInfrastructureMachinePool(metav1.NamespaceDefault, "infrastructure-machinepool-9m").Build()
  2272  	bootstrapConfig9m := builder.TestBootstrapConfig(metav1.NamespaceDefault, "bootstrap-config-9m").Build()
  2273  	mp9 := newFakeMachinePoolTopologyState("mp-9m", infrastructureMachinePool9m, bootstrapConfig9m)
  2274  	mp9.Object.Spec.Template.ObjectMeta.Labels = map[string]string{clusterv1.ClusterNameLabel: "cluster-1", "foo": "bar"}
  2275  	mp9WithInstanceSpecificTemplateMetadata := newFakeMachinePoolTopologyState("mp-9m", infrastructureMachinePool9m, bootstrapConfig9m)
  2276  	mp9WithInstanceSpecificTemplateMetadata.Object.Spec.Template.ObjectMeta.Labels = map[string]string{"foo": "bar"}
  2277  
  2278  	tests := []struct {
  2279  		name                                      string
  2280  		current                                   []*scope.MachinePoolState
  2281  		currentOnlyAPIServer                      []*scope.MachinePoolState
  2282  		desired                                   []*scope.MachinePoolState
  2283  		upgradeTracker                            *scope.UpgradeTracker
  2284  		want                                      []*scope.MachinePoolState
  2285  		wantInfrastructureMachinePoolObjectUpdate map[string]bool
  2286  		wantBootstrapObjectUpdate                 map[string]bool
  2287  		wantErr                                   bool
  2288  	}{
  2289  		{
  2290  			name:    "Should create desired MachinePool if the current does not exists yet",
  2291  			current: nil,
  2292  			desired: []*scope.MachinePoolState{mp1},
  2293  			want:    []*scope.MachinePoolState{mp1},
  2294  			wantErr: false,
  2295  		},
  2296  		{
  2297  			name:                 "Should skip creating desired MachinePool if it already exists in the apiserver (even if it is not in current state)",
  2298  			current:              nil,
  2299  			currentOnlyAPIServer: []*scope.MachinePoolState{mp1},
  2300  			desired:              []*scope.MachinePoolState{mp1},
  2301  			want:                 []*scope.MachinePoolState{mp1},
  2302  			wantErr:              false,
  2303  		},
  2304  		{
  2305  			name:           "Should not create desired MachinePool if the current does not exists yet and it marked as pending create",
  2306  			current:        nil,
  2307  			upgradeTracker: upgradeTrackerWithmp1PendingCreate,
  2308  			desired:        []*scope.MachinePoolState{mp1},
  2309  			want:           nil,
  2310  			wantErr:        false,
  2311  		},
  2312  		{
  2313  			name:    "No-op if current MachinePool is equal to desired",
  2314  			current: []*scope.MachinePoolState{mp1},
  2315  			desired: []*scope.MachinePoolState{mp1},
  2316  			want:    []*scope.MachinePoolState{mp1},
  2317  			wantErr: false,
  2318  		},
  2319  		{
  2320  			name:    "Should update InfrastructureMachinePool",
  2321  			current: []*scope.MachinePoolState{mp2},
  2322  			desired: []*scope.MachinePoolState{mp2WithChangedInfrastructureMachinePool},
  2323  			want:    []*scope.MachinePoolState{mp2WithChangedInfrastructureMachinePool},
  2324  			wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-2": true},
  2325  			wantErr: false,
  2326  		},
  2327  		{
  2328  			name:           "Should not update InfrastructureMachinePool if MachinePool is pending upgrade",
  2329  			current:        []*scope.MachinePoolState{mp2},
  2330  			desired:        []*scope.MachinePoolState{mp2WithChangedInfrastructureMachinePool},
  2331  			upgradeTracker: upgradeTrackerWithmp2PendingUpgrade,
  2332  			want:           []*scope.MachinePoolState{mp2},
  2333  			wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-2": false},
  2334  			wantErr: false,
  2335  		},
  2336  		{
  2337  			name:                      "Should update BootstrapConfig",
  2338  			current:                   []*scope.MachinePoolState{mp3},
  2339  			desired:                   []*scope.MachinePoolState{mp3WithChangedbootstrapConfig},
  2340  			want:                      []*scope.MachinePoolState{mp3WithChangedbootstrapConfig},
  2341  			wantBootstrapObjectUpdate: map[string]bool{"mp-3": true},
  2342  			wantErr:                   false,
  2343  		},
  2344  		{
  2345  			name:    "Should fail update MachinePool because of changed BootstrapConfig kind",
  2346  			current: []*scope.MachinePoolState{mp3},
  2347  			desired: []*scope.MachinePoolState{mp3WithChangedbootstrapConfigChangedKind},
  2348  			wantErr: true,
  2349  		},
  2350  		{
  2351  			name:    "Should update InfrastructureMachinePool and BootstrapConfig",
  2352  			current: []*scope.MachinePoolState{mp4},
  2353  			desired: []*scope.MachinePoolState{mp4WithChangedObjects},
  2354  			want:    []*scope.MachinePoolState{mp4WithChangedObjects},
  2355  			wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-4": true},
  2356  			wantBootstrapObjectUpdate:                 map[string]bool{"mp-4": true},
  2357  			wantErr:                                   false,
  2358  		},
  2359  		{
  2360  			name:    "Should fail update MachinePool because of changed InfrastructureMachinePool kind",
  2361  			current: []*scope.MachinePoolState{mp5},
  2362  			desired: []*scope.MachinePoolState{mp5WithChangedinfrastructureMachinePoolKind},
  2363  			wantErr: true,
  2364  		},
  2365  		{
  2366  			name:    "Should fail update MachinePool because of changed bootstrapConfig namespace",
  2367  			current: []*scope.MachinePoolState{mp6},
  2368  			desired: []*scope.MachinePoolState{mp6WithChangedbootstrapConfigNamespace},
  2369  			wantErr: true,
  2370  		},
  2371  		{
  2372  			name:    "Should delete MachinePool",
  2373  			current: []*scope.MachinePoolState{mp7},
  2374  			desired: []*scope.MachinePoolState{},
  2375  			want:    []*scope.MachinePoolState{},
  2376  			wantErr: false,
  2377  		},
  2378  		{
  2379  			name:    "Should create, update and delete MachinePools",
  2380  			current: []*scope.MachinePoolState{mp8Update, mp8Delete},
  2381  			desired: []*scope.MachinePoolState{mp8Create, mp8UpdateWithChangedObjects},
  2382  			want:    []*scope.MachinePoolState{mp8Create, mp8UpdateWithChangedObjects},
  2383  			wantInfrastructureMachinePoolObjectUpdate: map[string]bool{"mp-8-update": true},
  2384  			wantBootstrapObjectUpdate:                 map[string]bool{"mp-8-update": true},
  2385  			wantErr:                                   false,
  2386  		},
  2387  		{
  2388  			name:    "Enforce template metadata",
  2389  			current: []*scope.MachinePoolState{mp9WithInstanceSpecificTemplateMetadata},
  2390  			desired: []*scope.MachinePoolState{mp9},
  2391  			want:    []*scope.MachinePoolState{mp9},
  2392  			wantErr: false,
  2393  		},
  2394  	}
  2395  	for _, tt := range tests {
  2396  		t.Run(tt.name, func(t *testing.T) {
  2397  			g := NewWithT(t)
  2398  
  2399  			// Create namespace and modify input to have correct namespace set
  2400  			namespace, err := env.CreateNamespace(ctx, "reconcile-machine-pools")
  2401  			g.Expect(err).ToNot(HaveOccurred())
  2402  			for i, s := range tt.current {
  2403  				tt.current[i] = prepareMachinePoolState(s, namespace.GetName())
  2404  			}
  2405  			for i, s := range tt.desired {
  2406  				tt.desired[i] = prepareMachinePoolState(s, namespace.GetName())
  2407  			}
  2408  			for i, s := range tt.want {
  2409  				tt.want[i] = prepareMachinePoolState(s, namespace.GetName())
  2410  			}
  2411  
  2412  			for _, s := range tt.current {
  2413  				g.Expect(env.PatchAndWait(ctx, s.InfrastructureMachinePoolObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2414  				g.Expect(env.PatchAndWait(ctx, s.BootstrapObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2415  				g.Expect(env.PatchAndWait(ctx, s.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2416  			}
  2417  
  2418  			currentMachinePoolStates := toMachinePoolTopologyStateMap(tt.current)
  2419  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build())
  2420  			s.Current.MachinePools = currentMachinePoolStates
  2421  
  2422  			// currentOnlyAPIServer mps only exist in the APIserver but are not part of s.Current.
  2423  			// This simulates that getCurrentMachinePoolState in current_state.go read a stale mp list.
  2424  			for _, s := range tt.currentOnlyAPIServer {
  2425  				mpState := prepareMachinePoolState(s, namespace.GetName())
  2426  
  2427  				g.Expect(env.PatchAndWait(ctx, mpState.InfrastructureMachinePoolObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2428  				g.Expect(env.PatchAndWait(ctx, mpState.BootstrapObject, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2429  				g.Expect(env.PatchAndWait(ctx, mpState.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  2430  			}
  2431  
  2432  			s.Desired = &scope.ClusterState{MachinePools: toMachinePoolTopologyStateMap(tt.desired)}
  2433  
  2434  			if tt.upgradeTracker != nil {
  2435  				s.UpgradeTracker = tt.upgradeTracker
  2436  			}
  2437  
  2438  			r := Reconciler{
  2439  				Client:             env.GetClient(),
  2440  				APIReader:          env.GetAPIReader(),
  2441  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  2442  				recorder:           env.GetEventRecorderFor("test"),
  2443  			}
  2444  			err = r.reconcileMachinePools(ctx, s)
  2445  			if tt.wantErr {
  2446  				g.Expect(err).To(HaveOccurred())
  2447  				return
  2448  			}
  2449  			g.Expect(err).ToNot(HaveOccurred())
  2450  
  2451  			var gotMachinePoolList expv1.MachinePoolList
  2452  			g.Expect(env.GetAPIReader().List(ctx, &gotMachinePoolList, &client.ListOptions{Namespace: namespace.GetName()})).To(Succeed())
  2453  			g.Expect(gotMachinePoolList.Items).To(HaveLen(len(tt.want)))
  2454  
  2455  			if tt.want == nil {
  2456  				// No machine Pools should exist.
  2457  				g.Expect(gotMachinePoolList.Items).To(BeEmpty())
  2458  			}
  2459  
  2460  			for _, wantMachinePoolState := range tt.want {
  2461  				for _, gotMachinePool := range gotMachinePoolList.Items {
  2462  					if wantMachinePoolState.Object.Name != gotMachinePool.Name {
  2463  						continue
  2464  					}
  2465  					currentMachinePoolTopologyName := wantMachinePoolState.Object.ObjectMeta.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]
  2466  					currentMachinePoolState := currentMachinePoolStates[currentMachinePoolTopologyName]
  2467  
  2468  					// Copy over the name of the newly created InfrastructureRef and Bootsrap.ConfigRef because they get a generated name
  2469  					wantMachinePoolState.Object.Spec.Template.Spec.InfrastructureRef.Name = gotMachinePool.Spec.Template.Spec.InfrastructureRef.Name
  2470  					if gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef != nil {
  2471  						wantMachinePoolState.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Name = gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef.Name
  2472  					}
  2473  
  2474  					// Compare MachinePool.
  2475  					// Note: We're intentionally only comparing Spec as otherwise we would have to account for
  2476  					// empty vs. filled out TypeMeta.
  2477  					g.Expect(gotMachinePool.Spec).To(BeComparableTo(wantMachinePoolState.Object.Spec))
  2478  
  2479  					// Compare BootstrapObject.
  2480  					gotBootstrapObjectRef := gotMachinePool.Spec.Template.Spec.Bootstrap.ConfigRef
  2481  					gotBootstrapObject := unstructured.Unstructured{}
  2482  					gotBootstrapObject.SetKind(gotBootstrapObjectRef.Kind)
  2483  					gotBootstrapObject.SetAPIVersion(gotBootstrapObjectRef.APIVersion)
  2484  
  2485  					err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2486  						Namespace: gotBootstrapObjectRef.Namespace,
  2487  						Name:      gotBootstrapObjectRef.Name,
  2488  					}, &gotBootstrapObject)
  2489  
  2490  					g.Expect(err).ToNot(HaveOccurred())
  2491  
  2492  					g.Expect(&gotBootstrapObject).To(EqualObject(wantMachinePoolState.BootstrapObject, IgnoreAutogeneratedMetadata, IgnoreNameGenerated))
  2493  
  2494  					// Check BootstrapObject update.
  2495  					if currentMachinePoolState != nil && currentMachinePoolState.BootstrapObject != nil {
  2496  						if tt.wantBootstrapObjectUpdate[gotMachinePool.Name] {
  2497  							g.Expect(currentMachinePoolState.BootstrapObject.GetResourceVersion()).ToNot(Equal(gotBootstrapObject.GetResourceVersion()))
  2498  						} else {
  2499  							g.Expect(currentMachinePoolState.BootstrapObject.GetResourceVersion()).To(Equal(gotBootstrapObject.GetResourceVersion()))
  2500  						}
  2501  					}
  2502  
  2503  					// Compare InfrastructureMachinePoolObject.
  2504  					gotInfrastructureMachinePoolObjectRef := gotMachinePool.Spec.Template.Spec.InfrastructureRef
  2505  					gotInfrastructureMachinePoolObject := unstructured.Unstructured{}
  2506  					gotInfrastructureMachinePoolObject.SetKind(gotInfrastructureMachinePoolObjectRef.Kind)
  2507  					gotInfrastructureMachinePoolObject.SetAPIVersion(gotInfrastructureMachinePoolObjectRef.APIVersion)
  2508  
  2509  					err = env.GetAPIReader().Get(ctx, client.ObjectKey{
  2510  						Namespace: gotInfrastructureMachinePoolObjectRef.Namespace,
  2511  						Name:      gotInfrastructureMachinePoolObjectRef.Name,
  2512  					}, &gotInfrastructureMachinePoolObject)
  2513  
  2514  					g.Expect(err).ToNot(HaveOccurred())
  2515  
  2516  					g.Expect(&gotInfrastructureMachinePoolObject).To(EqualObject(wantMachinePoolState.InfrastructureMachinePoolObject, IgnoreAutogeneratedMetadata, IgnoreNameGenerated))
  2517  
  2518  					// Check InfrastructureMachinePoolObject update.
  2519  					if currentMachinePoolState != nil && currentMachinePoolState.InfrastructureMachinePoolObject != nil {
  2520  						if tt.wantInfrastructureMachinePoolObjectUpdate[gotMachinePool.Name] {
  2521  							g.Expect(currentMachinePoolState.InfrastructureMachinePoolObject.GetResourceVersion()).ToNot(Equal(gotInfrastructureMachinePoolObject.GetResourceVersion()))
  2522  						} else {
  2523  							g.Expect(currentMachinePoolState.InfrastructureMachinePoolObject.GetResourceVersion()).To(Equal(gotInfrastructureMachinePoolObject.GetResourceVersion()))
  2524  						}
  2525  					}
  2526  				}
  2527  			}
  2528  		})
  2529  	}
  2530  }
  2531  
  2532  // TestReconcileReferencedObjectSequences tests multiple subsequent calls to reconcileReferencedObject
  2533  // for a control-plane object to verify that the objects are reconciled as expected by tracking managed fields correctly.
  2534  // NOTE: by Extension this tests validates managed field handling in mergePatches, and thus its usage in other parts of the
  2535  // codebase.
  2536  func TestReconcileReferencedObjectSequences(t *testing.T) {
  2537  	// g := NewWithT(t)
  2538  	// Write the config file to access the test env for debugging.
  2539  	// g.Expect(os.WriteFile("test.conf", kubeconfig.FromEnvTestConfig(env.Config, &clusterv1.Cluster{
  2540  	// 	ObjectMeta: metav1.ObjectMeta{Name: "test"},
  2541  	// }), 0777)).To(Succeed())
  2542  
  2543  	type object struct {
  2544  		spec map[string]interface{}
  2545  	}
  2546  
  2547  	type externalStep struct {
  2548  		name string
  2549  		// object is the state of the control-plane object after an external modification.
  2550  		object object
  2551  	}
  2552  	type reconcileStep struct {
  2553  		name string
  2554  		// desired is the desired control-plane object handed over to reconcileReferencedObject.
  2555  		desired object
  2556  		// want is the expected control-plane object after calling reconcileReferencedObject.
  2557  		want object
  2558  	}
  2559  
  2560  	tests := []struct {
  2561  		name           string
  2562  		reconcileSteps []interface{}
  2563  	}{
  2564  		{
  2565  			name: "Should drop nested field",
  2566  			// Note: This test verifies that reconcileReferencedObject treats changes to fields existing in templates as authoritative
  2567  			// and most specifically it verifies that when a field in a template is deleted, it gets deleted
  2568  			// from the generated object (and it is not being treated as instance specific value).
  2569  			reconcileSteps: []interface{}{
  2570  				reconcileStep{
  2571  					name: "Initially reconcile KCP",
  2572  					desired: object{
  2573  						spec: map[string]interface{}{
  2574  							"kubeadmConfigSpec": map[string]interface{}{
  2575  								"clusterConfiguration": map[string]interface{}{
  2576  									"controllerManager": map[string]interface{}{
  2577  										"extraArgs": map[string]interface{}{
  2578  											"enable-hostpath-provisioner": "true",
  2579  										},
  2580  									},
  2581  								},
  2582  							},
  2583  						},
  2584  					},
  2585  					want: object{
  2586  						spec: map[string]interface{}{
  2587  							"kubeadmConfigSpec": map[string]interface{}{
  2588  								"clusterConfiguration": map[string]interface{}{
  2589  									"controllerManager": map[string]interface{}{
  2590  										"extraArgs": map[string]interface{}{
  2591  											"enable-hostpath-provisioner": "true",
  2592  										},
  2593  									},
  2594  								},
  2595  							},
  2596  						},
  2597  					},
  2598  				},
  2599  				reconcileStep{
  2600  					name: "Drop enable-hostpath-provisioner",
  2601  					desired: object{
  2602  						spec: nil,
  2603  					},
  2604  					want: object{
  2605  						spec: nil,
  2606  					},
  2607  				},
  2608  			},
  2609  		},
  2610  		{
  2611  			name: "Should drop label",
  2612  			// Note: This test verifies that reconcileReferencedObject treats changes to fields existing in templates as authoritative
  2613  			// and most specifically it verifies that when a template label is deleted, it gets deleted
  2614  			// from the generated object (and it is not being treated as instance specific value).
  2615  			reconcileSteps: []interface{}{
  2616  				reconcileStep{
  2617  					name: "Initially reconcile KCP",
  2618  					desired: object{
  2619  						spec: map[string]interface{}{
  2620  							"machineTemplate": map[string]interface{}{
  2621  								"metadata": map[string]interface{}{
  2622  									"labels": map[string]interface{}{
  2623  										"label.with.dots/owned": "true",
  2624  										"anotherLabel":          "true",
  2625  									},
  2626  								},
  2627  							},
  2628  						},
  2629  					},
  2630  					want: object{
  2631  						spec: map[string]interface{}{
  2632  							"machineTemplate": map[string]interface{}{
  2633  								"metadata": map[string]interface{}{
  2634  									"labels": map[string]interface{}{
  2635  										"label.with.dots/owned": "true",
  2636  										"anotherLabel":          "true",
  2637  									},
  2638  								},
  2639  							},
  2640  						},
  2641  					},
  2642  				},
  2643  				reconcileStep{
  2644  					name: "Drop the label with dots",
  2645  					desired: object{
  2646  						spec: map[string]interface{}{
  2647  							"machineTemplate": map[string]interface{}{
  2648  								"metadata": map[string]interface{}{
  2649  									"labels": map[string]interface{}{
  2650  										// label.with.dots/owned has been removed by e.g a change in Cluster.Topology.ControlPlane.Labels.
  2651  										"anotherLabel": "true",
  2652  									},
  2653  								},
  2654  							},
  2655  						},
  2656  					},
  2657  					want: object{
  2658  						spec: map[string]interface{}{
  2659  							"machineTemplate": map[string]interface{}{
  2660  								"metadata": map[string]interface{}{
  2661  									"labels": map[string]interface{}{
  2662  										// Reconcile to drop label.with.dots/owned label.
  2663  										"anotherLabel": "true",
  2664  									},
  2665  								},
  2666  							},
  2667  						},
  2668  					},
  2669  				},
  2670  			},
  2671  		},
  2672  		{
  2673  			name: "Should enforce field",
  2674  			// Note: This test verifies that reconcileReferencedObject treats changes to fields existing in templates as authoritative
  2675  			// by reverting user changes to a manged field.
  2676  			reconcileSteps: []interface{}{
  2677  				reconcileStep{
  2678  					name: "Initially reconcile",
  2679  					desired: object{
  2680  						spec: map[string]interface{}{
  2681  							"foo": "ccValue",
  2682  						},
  2683  					},
  2684  					want: object{
  2685  						spec: map[string]interface{}{
  2686  							"foo": "ccValue",
  2687  						},
  2688  					},
  2689  				},
  2690  				externalStep{
  2691  					name: "User changes value",
  2692  					object: object{
  2693  						spec: map[string]interface{}{
  2694  							"foo": "userValue",
  2695  						},
  2696  					},
  2697  				},
  2698  				reconcileStep{
  2699  					name: "Reconcile overwrites value",
  2700  					desired: object{
  2701  						spec: map[string]interface{}{
  2702  							// ClusterClass still proposing the old value.
  2703  							"foo": "ccValue",
  2704  						},
  2705  					},
  2706  					want: object{
  2707  						spec: map[string]interface{}{
  2708  							// Reconcile to restore the old value.
  2709  							"foo": "ccValue",
  2710  						},
  2711  					},
  2712  				},
  2713  			},
  2714  		},
  2715  		{
  2716  			name: "Should preserve user-defined field while dropping managed field",
  2717  			// Note: This test verifies that Topology treats changes to fields existing in templates as authoritative
  2718  			// but allows setting additional/instance-specific values.
  2719  			reconcileSteps: []interface{}{
  2720  				reconcileStep{
  2721  					name: "Initially reconcile KCP",
  2722  					desired: object{
  2723  						spec: map[string]interface{}{
  2724  							"kubeadmConfigSpec": map[string]interface{}{
  2725  								"clusterConfiguration": map[string]interface{}{
  2726  									"controllerManager": map[string]interface{}{
  2727  										"extraArgs": map[string]interface{}{
  2728  											"enable-hostpath-provisioner": "true",
  2729  										},
  2730  									},
  2731  								},
  2732  							},
  2733  						},
  2734  					},
  2735  					want: object{
  2736  						spec: map[string]interface{}{
  2737  							"kubeadmConfigSpec": map[string]interface{}{
  2738  								"clusterConfiguration": map[string]interface{}{
  2739  									"controllerManager": map[string]interface{}{
  2740  										"extraArgs": map[string]interface{}{
  2741  											"enable-hostpath-provisioner": "true",
  2742  										},
  2743  									},
  2744  								},
  2745  							},
  2746  						},
  2747  					},
  2748  				},
  2749  				externalStep{
  2750  					name: "User adds an additional extraArg",
  2751  					object: object{
  2752  						spec: map[string]interface{}{
  2753  							"kubeadmConfigSpec": map[string]interface{}{
  2754  								"clusterConfiguration": map[string]interface{}{
  2755  									"controllerManager": map[string]interface{}{
  2756  										"extraArgs": map[string]interface{}{
  2757  											// User adds enable-garbage-collector.
  2758  											"enable-garbage-collector": "true",
  2759  										},
  2760  									},
  2761  								},
  2762  							},
  2763  						},
  2764  					},
  2765  				},
  2766  				reconcileStep{
  2767  					name: "Previously set extraArgs is dropped from KCP, user-specified field is preserved.",
  2768  					desired: object{
  2769  						spec: map[string]interface{}{
  2770  							"kubeadmConfigSpec": map[string]interface{}{
  2771  								"clusterConfiguration": map[string]interface{}{},
  2772  							},
  2773  						},
  2774  					},
  2775  					want: object{
  2776  						spec: map[string]interface{}{
  2777  							"kubeadmConfigSpec": map[string]interface{}{
  2778  								"clusterConfiguration": map[string]interface{}{
  2779  									"controllerManager": map[string]interface{}{
  2780  										"extraArgs": map[string]interface{}{
  2781  											// Reconcile to drop enable-hostpath-provisioner,
  2782  											// while preserving user-defined enable-garbage-collector field.
  2783  											"enable-garbage-collector": "true",
  2784  										},
  2785  									},
  2786  								},
  2787  							},
  2788  						},
  2789  					},
  2790  				},
  2791  			},
  2792  		},
  2793  		{
  2794  			name: "Should preserve user-defined object field while dropping managed fields",
  2795  			// Note: This test verifies that reconcileReferencedObject treats changes to fields existing in templates as authoritative
  2796  			// but allows setting additional/instance-specific values.
  2797  			reconcileSteps: []interface{}{
  2798  				reconcileStep{
  2799  					name: "Initially reconcile",
  2800  					desired: object{
  2801  						spec: map[string]interface{}{
  2802  							"machineTemplate": map[string]interface{}{},
  2803  						},
  2804  					},
  2805  					want: object{
  2806  						spec: map[string]interface{}{
  2807  							"machineTemplate": map[string]interface{}{},
  2808  						},
  2809  					},
  2810  				},
  2811  				externalStep{
  2812  					name: "User adds an additional object",
  2813  					object: object{
  2814  						spec: map[string]interface{}{
  2815  							"machineTemplate": map[string]interface{}{
  2816  								"infrastructureRef": map[string]interface{}{
  2817  									"apiVersion": "foo/v1alpha1",
  2818  									"kind":       "Foo",
  2819  								},
  2820  							},
  2821  						},
  2822  					},
  2823  				},
  2824  				reconcileStep{
  2825  					name: "ClusterClass starts having an opinion about some fields",
  2826  					desired: object{
  2827  						spec: map[string]interface{}{
  2828  							"machineTemplate": map[string]interface{}{
  2829  								"metadata": map[string]interface{}{
  2830  									"labels": map[string]interface{}{
  2831  										"foo": "foo",
  2832  									},
  2833  								},
  2834  								"nodeDeletionTimeout": "10m",
  2835  							},
  2836  						},
  2837  					},
  2838  					want: object{
  2839  						spec: map[string]interface{}{
  2840  							"machineTemplate": map[string]interface{}{
  2841  								// User fields are preserved.
  2842  								"infrastructureRef": map[string]interface{}{
  2843  									"apiVersion": "foo/v1alpha1",
  2844  									"kind":       "Foo",
  2845  								},
  2846  								// ClusterClass authoritative fields are added.
  2847  								"metadata": map[string]interface{}{
  2848  									"labels": map[string]interface{}{
  2849  										"foo": "foo",
  2850  									},
  2851  								},
  2852  								"nodeDeletionTimeout": "10m",
  2853  							},
  2854  						},
  2855  					},
  2856  				},
  2857  				reconcileStep{
  2858  					name: "ClusterClass stops having an opinion on the field",
  2859  					desired: object{
  2860  						spec: map[string]interface{}{
  2861  							"machineTemplate": map[string]interface{}{
  2862  								"metadata": map[string]interface{}{
  2863  									"labels": map[string]interface{}{
  2864  										"foo": "foo",
  2865  									},
  2866  								},
  2867  								// clusterClassField has been removed by e.g a change in ClusterClass (and extraArgs with it).
  2868  							},
  2869  						},
  2870  					},
  2871  					want: object{
  2872  						spec: map[string]interface{}{
  2873  							"machineTemplate": map[string]interface{}{
  2874  								// Reconcile to drop clusterClassField,
  2875  								// while preserving user-defined field and clusterClassField.
  2876  								"infrastructureRef": map[string]interface{}{
  2877  									"apiVersion": "foo/v1alpha1",
  2878  									"kind":       "Foo",
  2879  								},
  2880  								"metadata": map[string]interface{}{
  2881  									"labels": map[string]interface{}{
  2882  										"foo": "foo",
  2883  									},
  2884  								},
  2885  							},
  2886  						},
  2887  					},
  2888  				},
  2889  				reconcileStep{
  2890  					name: "ClusterClass stops having an opinion on the object",
  2891  					desired: object{
  2892  						spec: map[string]interface{}{
  2893  							"machineTemplate": map[string]interface{}{
  2894  								// clusterClassObject has been removed by e.g a change in ClusterClass (and extraArgs with it).
  2895  							},
  2896  						},
  2897  					},
  2898  					want: object{
  2899  						spec: map[string]interface{}{
  2900  							"machineTemplate": map[string]interface{}{
  2901  								// Reconcile to drop clusterClassObject,
  2902  								// while preserving user-defined field.
  2903  								"infrastructureRef": map[string]interface{}{
  2904  									"apiVersion": "foo/v1alpha1",
  2905  									"kind":       "Foo",
  2906  								},
  2907  							},
  2908  						},
  2909  					},
  2910  				},
  2911  			},
  2912  		},
  2913  	}
  2914  
  2915  	for _, tt := range tests {
  2916  		t.Run(tt.name, func(t *testing.T) {
  2917  			g := NewWithT(t)
  2918  
  2919  			// Create namespace and modify input to have correct namespace set
  2920  			namespace, err := env.CreateNamespace(ctx, "reconcile-ref-obj-seq")
  2921  			g.Expect(err).ToNot(HaveOccurred())
  2922  
  2923  			r := Reconciler{
  2924  				Client:             env,
  2925  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  2926  				recorder:           env.GetEventRecorderFor("test"),
  2927  			}
  2928  
  2929  			s := scope.New(&clusterv1.Cluster{})
  2930  			s.Blueprint = &scope.ClusterBlueprint{
  2931  				ClusterClass: &clusterv1.ClusterClass{},
  2932  			}
  2933  
  2934  			for i, step := range tt.reconcileSteps {
  2935  				var currentControlPlane *unstructured.Unstructured
  2936  
  2937  				// Get current ControlPlane (on later steps).
  2938  				if i > 0 {
  2939  					currentControlPlane = &unstructured.Unstructured{
  2940  						Object: map[string]interface{}{
  2941  							"kind":       builder.TestControlPlaneKind,
  2942  							"apiVersion": builder.ControlPlaneGroupVersion.String(),
  2943  						},
  2944  					}
  2945  					g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: namespace.GetName(), Name: "my-cluster"}, currentControlPlane)).To(Succeed())
  2946  				}
  2947  
  2948  				if step, ok := step.(externalStep); ok {
  2949  					// This is a user step, so let's just update the object using SSA.
  2950  					obj := &unstructured.Unstructured{
  2951  						Object: map[string]interface{}{
  2952  							"kind":       builder.TestControlPlaneKind,
  2953  							"apiVersion": builder.ControlPlaneGroupVersion.String(),
  2954  							"metadata": map[string]interface{}{
  2955  								"name":      "my-cluster",
  2956  								"namespace": namespace.GetName(),
  2957  							},
  2958  							"spec": step.object.spec,
  2959  						},
  2960  					}
  2961  					err := env.PatchAndWait(ctx, obj, client.FieldOwner("other-controller"), client.ForceOwnership)
  2962  					g.Expect(err).ToNot(HaveOccurred())
  2963  					continue
  2964  				}
  2965  
  2966  				if step, ok := step.(reconcileStep); ok {
  2967  					// This is a reconcile step, so let's execute a reconcile and then validate the result.
  2968  
  2969  					// Set the current control plane.
  2970  					s.Current.ControlPlane = &scope.ControlPlaneState{
  2971  						Object: currentControlPlane,
  2972  					}
  2973  					// Set the desired control plane.
  2974  					s.Desired = &scope.ClusterState{
  2975  						ControlPlane: &scope.ControlPlaneState{
  2976  							Object: &unstructured.Unstructured{
  2977  								Object: map[string]interface{}{
  2978  									"kind":       builder.TestControlPlaneKind,
  2979  									"apiVersion": builder.ControlPlaneGroupVersion.String(),
  2980  									"metadata": map[string]interface{}{
  2981  										"name":      "my-cluster",
  2982  										"namespace": namespace.GetName(),
  2983  									},
  2984  								},
  2985  							},
  2986  						},
  2987  					}
  2988  					if step.desired.spec != nil {
  2989  						s.Desired.ControlPlane.Object.Object["spec"] = step.desired.spec
  2990  					}
  2991  
  2992  					// Execute a reconcile.0
  2993  					g.Expect(r.reconcileReferencedObject(ctx, reconcileReferencedObjectInput{
  2994  						cluster: s.Current.Cluster,
  2995  						current: s.Current.ControlPlane.Object,
  2996  						desired: s.Desired.ControlPlane.Object,
  2997  					})).To(Succeed())
  2998  
  2999  					// Build the object for comparison.
  3000  					want := &unstructured.Unstructured{
  3001  						Object: map[string]interface{}{
  3002  							"kind":       builder.TestControlPlaneKind,
  3003  							"apiVersion": builder.ControlPlaneGroupVersion.String(),
  3004  							"metadata": map[string]interface{}{
  3005  								"name":      "my-cluster",
  3006  								"namespace": namespace.GetName(),
  3007  							},
  3008  						},
  3009  					}
  3010  					if step.want.spec != nil {
  3011  						want.Object["spec"] = step.want.spec
  3012  					}
  3013  
  3014  					// Get the reconciled object.
  3015  					got := want.DeepCopy() // this is required otherwise Get will modify want
  3016  					g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Namespace: namespace.GetName(), Name: "my-cluster"}, got)).To(Succeed())
  3017  
  3018  					// Compare want with got.
  3019  					// Ignore .metadata.resourceVersion and .metadata.annotations as we don't care about them in this test.
  3020  					unstructured.RemoveNestedField(got.Object, "metadata", "resourceVersion")
  3021  					unstructured.RemoveNestedField(got.Object, "metadata", "annotations")
  3022  					unstructured.RemoveNestedField(got.Object, "metadata", "creationTimestamp")
  3023  					unstructured.RemoveNestedField(got.Object, "metadata", "generation")
  3024  					unstructured.RemoveNestedField(got.Object, "metadata", "managedFields")
  3025  					unstructured.RemoveNestedField(got.Object, "metadata", "uid")
  3026  					unstructured.RemoveNestedField(got.Object, "metadata", "selfLink")
  3027  					g.Expect(got).To(EqualObject(want), fmt.Sprintf("Step %q failed: %v", step.name, cmp.Diff(want, got)))
  3028  					continue
  3029  				}
  3030  
  3031  				panic(fmt.Errorf("unknown step type %T", step))
  3032  			}
  3033  		})
  3034  	}
  3035  }
  3036  
  3037  func TestReconcileMachineDeploymentMachineHealthCheck(t *testing.T) {
  3038  	md := builder.MachineDeployment(metav1.NamespaceDefault, "md-1").WithLabels(
  3039  		map[string]string{
  3040  			clusterv1.ClusterTopologyMachineDeploymentNameLabel: "machine-deployment-one",
  3041  		}).
  3042  		Build()
  3043  
  3044  	maxUnhealthy := intstr.Parse("45%")
  3045  	mhcBuilder := builder.MachineHealthCheck(metav1.NamespaceDefault, "md-1").
  3046  		WithSelector(*selectorForMachineDeploymentMHC(md)).
  3047  		WithUnhealthyConditions([]clusterv1.UnhealthyCondition{
  3048  			{
  3049  				Type:    corev1.NodeReady,
  3050  				Status:  corev1.ConditionUnknown,
  3051  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  3052  			},
  3053  		}).
  3054  		WithClusterName("cluster1")
  3055  
  3056  	infrastructureMachineTemplate := builder.TestInfrastructureMachineTemplate(metav1.NamespaceDefault, "infrastructure-machine-1").Build()
  3057  	bootstrapTemplate := builder.TestBootstrapTemplate(metav1.NamespaceDefault, "bootstrap-config-1").Build()
  3058  
  3059  	tests := []struct {
  3060  		name    string
  3061  		current []*scope.MachineDeploymentState
  3062  		desired []*scope.MachineDeploymentState
  3063  		want    []*clusterv1.MachineHealthCheck
  3064  	}{
  3065  		{
  3066  			name:    "Create a MachineHealthCheck if the MachineDeployment is being created",
  3067  			current: nil,
  3068  			desired: []*scope.MachineDeploymentState{
  3069  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3070  					mhcBuilder.DeepCopy().Build()),
  3071  			},
  3072  			want: []*clusterv1.MachineHealthCheck{
  3073  				mhcBuilder.DeepCopy().Build()},
  3074  		},
  3075  		{
  3076  			name: "Create a new MachineHealthCheck if the MachineDeployment is modified to include one",
  3077  			current: []*scope.MachineDeploymentState{
  3078  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3079  					nil)},
  3080  			// MHC is added in the desired state of the MachineDeployment
  3081  			desired: []*scope.MachineDeploymentState{
  3082  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3083  					mhcBuilder.DeepCopy().Build()),
  3084  			},
  3085  			want: []*clusterv1.MachineHealthCheck{
  3086  				mhcBuilder.DeepCopy().Build()}},
  3087  		{
  3088  			name: "Update MachineHealthCheck spec adding a field if the spec adds a field",
  3089  			current: []*scope.MachineDeploymentState{
  3090  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3091  					mhcBuilder.DeepCopy().Build()),
  3092  			},
  3093  			desired: []*scope.MachineDeploymentState{
  3094  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3095  					mhcBuilder.DeepCopy().WithMaxUnhealthy(&maxUnhealthy).Build())},
  3096  			want: []*clusterv1.MachineHealthCheck{
  3097  				mhcBuilder.DeepCopy().
  3098  					WithMaxUnhealthy(&maxUnhealthy).
  3099  					Build()},
  3100  		},
  3101  		{
  3102  			name: "Update MachineHealthCheck spec removing a field if the spec removes a field",
  3103  			current: []*scope.MachineDeploymentState{
  3104  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3105  					mhcBuilder.DeepCopy().WithMaxUnhealthy(&maxUnhealthy).Build()),
  3106  			},
  3107  			desired: []*scope.MachineDeploymentState{
  3108  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3109  					mhcBuilder.DeepCopy().Build()),
  3110  			},
  3111  			want: []*clusterv1.MachineHealthCheck{
  3112  				mhcBuilder.DeepCopy().Build(),
  3113  			},
  3114  		},
  3115  		{
  3116  			name: "Delete MachineHealthCheck spec if the MachineDeployment is modified to remove an existing one",
  3117  			current: []*scope.MachineDeploymentState{
  3118  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3119  					mhcBuilder.DeepCopy().Build()),
  3120  			},
  3121  			desired: []*scope.MachineDeploymentState{newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate, nil)},
  3122  			want:    []*clusterv1.MachineHealthCheck{},
  3123  		},
  3124  		{
  3125  			name: "Delete MachineHealthCheck spec if the MachineDeployment is deleted",
  3126  			current: []*scope.MachineDeploymentState{
  3127  				newFakeMachineDeploymentTopologyState("md-1", infrastructureMachineTemplate, bootstrapTemplate,
  3128  					mhcBuilder.DeepCopy().Build()),
  3129  			},
  3130  			desired: []*scope.MachineDeploymentState{},
  3131  			want:    []*clusterv1.MachineHealthCheck{},
  3132  		},
  3133  	}
  3134  	for _, tt := range tests {
  3135  		t.Run(tt.name, func(t *testing.T) {
  3136  			g := NewWithT(t)
  3137  
  3138  			// Create namespace and modify input to have correct namespace set
  3139  			namespace, err := env.CreateNamespace(ctx, "reconcile-md-mhc")
  3140  			g.Expect(err).ToNot(HaveOccurred())
  3141  			for i, s := range tt.current {
  3142  				tt.current[i] = prepareMachineDeploymentState(s, namespace.GetName())
  3143  			}
  3144  			for i, s := range tt.desired {
  3145  				tt.desired[i] = prepareMachineDeploymentState(s, namespace.GetName())
  3146  			}
  3147  			for i, mhc := range tt.want {
  3148  				tt.want[i] = mhc.DeepCopy()
  3149  				tt.want[i].SetNamespace(namespace.GetName())
  3150  			}
  3151  
  3152  			uidsByName := map[string]types.UID{}
  3153  
  3154  			for _, mdts := range tt.current {
  3155  				g.Expect(env.PatchAndWait(ctx, mdts.Object, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  3156  				g.Expect(env.PatchAndWait(ctx, mdts.InfrastructureMachineTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  3157  				g.Expect(env.PatchAndWait(ctx, mdts.BootstrapTemplate, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  3158  
  3159  				uidsByName[mdts.Object.Name] = mdts.Object.GetUID()
  3160  
  3161  				if mdts.MachineHealthCheck != nil {
  3162  					for i, ref := range mdts.MachineHealthCheck.OwnerReferences {
  3163  						ref.UID = mdts.Object.GetUID()
  3164  						mdts.MachineHealthCheck.OwnerReferences[i] = ref
  3165  					}
  3166  					g.Expect(env.PatchAndWait(ctx, mdts.MachineHealthCheck, client.ForceOwnership, client.FieldOwner(structuredmerge.TopologyManagerName))).To(Succeed())
  3167  				}
  3168  			}
  3169  
  3170  			// copy over ownerReference for desired MachineHealthCheck
  3171  			for _, mdts := range tt.desired {
  3172  				if mdts.MachineHealthCheck != nil {
  3173  					for i, ref := range mdts.MachineHealthCheck.OwnerReferences {
  3174  						if uid, ok := uidsByName[ref.Name]; ok {
  3175  							ref.UID = uid
  3176  							mdts.MachineHealthCheck.OwnerReferences[i] = ref
  3177  						}
  3178  					}
  3179  				}
  3180  			}
  3181  
  3182  			currentMachineDeploymentStates := toMachineDeploymentTopologyStateMap(tt.current)
  3183  			s := scope.New(builder.Cluster(namespace.GetName(), "cluster-1").Build())
  3184  			s.Current.MachineDeployments = currentMachineDeploymentStates
  3185  
  3186  			s.Desired = &scope.ClusterState{MachineDeployments: toMachineDeploymentTopologyStateMap(tt.desired)}
  3187  
  3188  			r := Reconciler{
  3189  				Client:             env.GetClient(),
  3190  				APIReader:          env.GetAPIReader(),
  3191  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  3192  				recorder:           env.GetEventRecorderFor("test"),
  3193  			}
  3194  
  3195  			err = r.reconcileMachineDeployments(ctx, s)
  3196  			g.Expect(err).ToNot(HaveOccurred())
  3197  
  3198  			var gotMachineHealthCheckList clusterv1.MachineHealthCheckList
  3199  			g.Expect(env.GetAPIReader().List(ctx, &gotMachineHealthCheckList, &client.ListOptions{Namespace: namespace.GetName()})).To(Succeed())
  3200  			g.Expect(gotMachineHealthCheckList.Items).To(HaveLen(len(tt.want)))
  3201  
  3202  			g.Expect(tt.want).To(HaveLen(len(gotMachineHealthCheckList.Items)))
  3203  
  3204  			for _, wantMHCOrig := range tt.want {
  3205  				wantMHC := wantMHCOrig.DeepCopy()
  3206  				g.Expect((&webhooks.MachineHealthCheck{}).Default(ctx, wantMHC)).To(Succeed())
  3207  
  3208  				for _, gotMHC := range gotMachineHealthCheckList.Items {
  3209  					if wantMHC.Name == gotMHC.Name {
  3210  						actual := gotMHC
  3211  						// unset UID because it got generated
  3212  						for i, ref := range actual.OwnerReferences {
  3213  							ref.UID = ""
  3214  							actual.OwnerReferences[i] = ref
  3215  						}
  3216  						g.Expect(wantMHC).To(EqualObject(&actual, IgnoreAutogeneratedMetadata))
  3217  					}
  3218  				}
  3219  			}
  3220  		})
  3221  	}
  3222  }
  3223  
  3224  func newFakeMachineDeploymentTopologyState(name string, infrastructureMachineTemplate, bootstrapTemplate *unstructured.Unstructured, machineHealthCheck *clusterv1.MachineHealthCheck) *scope.MachineDeploymentState {
  3225  	mdState := &scope.MachineDeploymentState{
  3226  		Object: builder.MachineDeployment(metav1.NamespaceDefault, name).
  3227  			WithInfrastructureTemplate(infrastructureMachineTemplate).
  3228  			WithBootstrapTemplate(bootstrapTemplate).
  3229  			WithLabels(map[string]string{
  3230  				clusterv1.ClusterTopologyMachineDeploymentNameLabel: name + "-topology",
  3231  				clusterv1.ClusterTopologyOwnedLabel:                 "",
  3232  			}).
  3233  			WithClusterName("cluster-1").
  3234  			WithReplicas(1).
  3235  			WithMinReadySeconds(1).
  3236  			Build(),
  3237  		InfrastructureMachineTemplate: infrastructureMachineTemplate.DeepCopy(),
  3238  		BootstrapTemplate:             bootstrapTemplate.DeepCopy(),
  3239  		MachineHealthCheck:            machineHealthCheck.DeepCopy(),
  3240  	}
  3241  
  3242  	scheme := runtime.NewScheme()
  3243  	_ = clusterv1.AddToScheme(scheme)
  3244  	if err := (&webhooks.MachineDeployment{
  3245  		Decoder: admission.NewDecoder(scheme),
  3246  	}).Default(admission.NewContextWithRequest(ctx, admission.Request{}), mdState.Object); err != nil {
  3247  		panic(err)
  3248  	}
  3249  	return mdState
  3250  }
  3251  
  3252  func newFakeMachinePoolTopologyState(name string, infrastructureMachinePool, bootstrapObject *unstructured.Unstructured) *scope.MachinePoolState {
  3253  	mpState := &scope.MachinePoolState{
  3254  		Object: builder.MachinePool(metav1.NamespaceDefault, name).
  3255  			WithInfrastructure(infrastructureMachinePool).
  3256  			WithBootstrap(bootstrapObject).
  3257  			WithLabels(map[string]string{
  3258  				clusterv1.ClusterTopologyMachinePoolNameLabel: name + "-topology",
  3259  				clusterv1.ClusterTopologyOwnedLabel:           "",
  3260  			}).
  3261  			WithClusterName("cluster-1").
  3262  			WithReplicas(1).
  3263  			WithMinReadySeconds(1).
  3264  			Build(),
  3265  		InfrastructureMachinePoolObject: infrastructureMachinePool.DeepCopy(),
  3266  		BootstrapObject:                 bootstrapObject.DeepCopy(),
  3267  	}
  3268  
  3269  	return mpState
  3270  }
  3271  
  3272  func toMachineDeploymentTopologyStateMap(states []*scope.MachineDeploymentState) map[string]*scope.MachineDeploymentState {
  3273  	ret := map[string]*scope.MachineDeploymentState{}
  3274  	for _, state := range states {
  3275  		ret[state.Object.Labels[clusterv1.ClusterTopologyMachineDeploymentNameLabel]] = state
  3276  	}
  3277  	return ret
  3278  }
  3279  
  3280  func toMachinePoolTopologyStateMap(states []*scope.MachinePoolState) map[string]*scope.MachinePoolState {
  3281  	ret := map[string]*scope.MachinePoolState{}
  3282  	for _, state := range states {
  3283  		ret[state.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]] = state
  3284  	}
  3285  	return ret
  3286  }
  3287  
  3288  func TestReconciler_reconcileMachineHealthCheck(t *testing.T) {
  3289  	// create a controlPlane object with enough information to be used as an OwnerReference for the MachineHealthCheck.
  3290  	cp := builder.ControlPlane(metav1.NamespaceDefault, "cp1").Build()
  3291  	mhcBuilder := builder.MachineHealthCheck(metav1.NamespaceDefault, "cp1").
  3292  		WithSelector(*selectorForControlPlaneMHC()).
  3293  		WithUnhealthyConditions([]clusterv1.UnhealthyCondition{
  3294  			{
  3295  				Type:    corev1.NodeReady,
  3296  				Status:  corev1.ConditionUnknown,
  3297  				Timeout: metav1.Duration{Duration: 5 * time.Minute},
  3298  			},
  3299  		}).
  3300  		WithClusterName("cluster1")
  3301  	tests := []struct {
  3302  		name    string
  3303  		current *clusterv1.MachineHealthCheck
  3304  		desired *clusterv1.MachineHealthCheck
  3305  		want    *clusterv1.MachineHealthCheck
  3306  		wantErr bool
  3307  	}{
  3308  		{
  3309  			name:    "Create a MachineHealthCheck",
  3310  			current: nil,
  3311  			desired: mhcBuilder.DeepCopy().Build(),
  3312  			want:    mhcBuilder.DeepCopy().Build(),
  3313  		},
  3314  		{
  3315  			name:    "Update a MachineHealthCheck with changes",
  3316  			current: mhcBuilder.DeepCopy().Build(),
  3317  			// update the unhealthy conditions in the MachineHealthCheck
  3318  			desired: mhcBuilder.DeepCopy().WithUnhealthyConditions([]clusterv1.UnhealthyCondition{
  3319  				{
  3320  					Type:    corev1.NodeReady,
  3321  					Status:  corev1.ConditionUnknown,
  3322  					Timeout: metav1.Duration{Duration: 1000 * time.Minute},
  3323  				},
  3324  			}).Build(),
  3325  			want: mhcBuilder.DeepCopy().WithUnhealthyConditions([]clusterv1.UnhealthyCondition{
  3326  				{
  3327  					Type:    corev1.NodeReady,
  3328  					Status:  corev1.ConditionUnknown,
  3329  					Timeout: metav1.Duration{Duration: 1000 * time.Minute},
  3330  				},
  3331  			}).Build(),
  3332  		},
  3333  		{
  3334  			name:    "Don't change a MachineHealthCheck with no difference between desired and current",
  3335  			current: mhcBuilder.DeepCopy().Build(),
  3336  			// update the unhealthy conditions in the MachineHealthCheck
  3337  			desired: mhcBuilder.DeepCopy().Build(),
  3338  			want:    mhcBuilder.DeepCopy().Build(),
  3339  		},
  3340  		{
  3341  			name:    "Delete a MachineHealthCheck",
  3342  			current: mhcBuilder.DeepCopy().Build(),
  3343  			// update the unhealthy conditions in the MachineHealthCheck
  3344  			desired: nil,
  3345  			want:    nil,
  3346  			wantErr: true,
  3347  		},
  3348  	}
  3349  	for _, tt := range tests {
  3350  		t.Run(tt.name, func(t *testing.T) {
  3351  			g := NewWithT(t)
  3352  			got := &clusterv1.MachineHealthCheck{}
  3353  
  3354  			// Create namespace
  3355  			namespace, err := env.CreateNamespace(ctx, "reconcile-mhc")
  3356  			// Create control plane
  3357  			g.Expect(err).ToNot(HaveOccurred())
  3358  			localCP := cp.DeepCopy()
  3359  			localCP.SetNamespace(namespace.GetName())
  3360  			g.Expect(env.CreateAndWait(ctx, localCP)).To(Succeed())
  3361  			// Modify test input and re-use control plane uid if necessary
  3362  			if tt.current != nil {
  3363  				tt.current = tt.current.DeepCopy()
  3364  				tt.current.SetNamespace(namespace.GetName())
  3365  			}
  3366  			if tt.desired != nil {
  3367  				tt.desired = tt.desired.DeepCopy()
  3368  				tt.desired.SetNamespace(namespace.GetName())
  3369  			}
  3370  			if tt.want != nil {
  3371  				tt.want = tt.want.DeepCopy()
  3372  				tt.want.SetNamespace(namespace.GetName())
  3373  				if len(tt.want.OwnerReferences) == 1 {
  3374  					tt.want.OwnerReferences[0].UID = localCP.GetUID()
  3375  				}
  3376  			}
  3377  
  3378  			r := Reconciler{
  3379  				Client:             env,
  3380  				patchHelperFactory: serverSideApplyPatchHelperFactory(env, ssa.NewCache()),
  3381  				recorder:           env.GetEventRecorderFor("test"),
  3382  			}
  3383  			if tt.current != nil {
  3384  				g.Expect(env.CreateAndWait(ctx, tt.current)).To(Succeed())
  3385  			}
  3386  			if err := r.reconcileMachineHealthCheck(ctx, tt.current, tt.desired); err != nil {
  3387  				if !tt.wantErr {
  3388  					t.Errorf("reconcileMachineHealthCheck() error = %v, wantErr %v", err, tt.wantErr)
  3389  				}
  3390  			}
  3391  
  3392  			key := mhcBuilder.Build()
  3393  			key.SetNamespace(namespace.GetName())
  3394  			if err := env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(key), got); err != nil {
  3395  				if !tt.wantErr {
  3396  					t.Errorf("reconcileMachineHealthCheck() error = %v, wantErr %v", err, tt.wantErr)
  3397  				}
  3398  				if apierrors.IsNotFound(err) {
  3399  					got = nil
  3400  				}
  3401  			}
  3402  
  3403  			want := tt.want.DeepCopy()
  3404  			if want != nil {
  3405  				g.Expect((&webhooks.MachineHealthCheck{}).Default(ctx, want)).To(Succeed())
  3406  			}
  3407  
  3408  			g.Expect(got).To(EqualObject(want, IgnoreAutogeneratedMetadata, IgnorePaths{".kind", ".apiVersion"}))
  3409  		})
  3410  	}
  3411  }
  3412  
  3413  // prepareControlPlaneBluePrint deep-copies and returns the input scope and sets
  3414  // the given namespace to all relevant objects.
  3415  func prepareControlPlaneBluePrint(in *scope.ControlPlaneBlueprint, namespace string) *scope.ControlPlaneBlueprint {
  3416  	s := &scope.ControlPlaneBlueprint{}
  3417  	if in.InfrastructureMachineTemplate != nil {
  3418  		s.InfrastructureMachineTemplate = in.InfrastructureMachineTemplate.DeepCopy()
  3419  		if s.InfrastructureMachineTemplate.GetNamespace() == metav1.NamespaceDefault {
  3420  			s.InfrastructureMachineTemplate.SetNamespace(namespace)
  3421  		}
  3422  	}
  3423  	if in.MachineHealthCheck != nil {
  3424  		s.MachineHealthCheck = in.MachineHealthCheck.DeepCopy()
  3425  	}
  3426  	if in.Template != nil {
  3427  		s.Template = in.Template.DeepCopy()
  3428  		if s.Template.GetNamespace() == metav1.NamespaceDefault {
  3429  			s.Template.SetNamespace(namespace)
  3430  		}
  3431  	}
  3432  	return s
  3433  }
  3434  
  3435  // prepareControlPlaneState deep-copies and returns the input scope and sets
  3436  // the given namespace to all relevant objects.
  3437  func prepareControlPlaneState(g *WithT, in *scope.ControlPlaneState, namespace string) *scope.ControlPlaneState {
  3438  	s := &scope.ControlPlaneState{}
  3439  	if in.InfrastructureMachineTemplate != nil {
  3440  		s.InfrastructureMachineTemplate = in.InfrastructureMachineTemplate.DeepCopy()
  3441  		if s.InfrastructureMachineTemplate.GetNamespace() == metav1.NamespaceDefault {
  3442  			s.InfrastructureMachineTemplate.SetNamespace(namespace)
  3443  		}
  3444  	}
  3445  	if in.MachineHealthCheck != nil {
  3446  		s.MachineHealthCheck = in.MachineHealthCheck.DeepCopy()
  3447  		if s.MachineHealthCheck.GetNamespace() == metav1.NamespaceDefault {
  3448  			s.MachineHealthCheck.SetNamespace(namespace)
  3449  		}
  3450  	}
  3451  	if in.Object != nil {
  3452  		s.Object = in.Object.DeepCopy()
  3453  		if s.Object.GetNamespace() == metav1.NamespaceDefault {
  3454  			s.Object.SetNamespace(namespace)
  3455  		}
  3456  		if current, ok, err := unstructured.NestedString(s.Object.Object, "spec", "machineTemplate", "infrastructureRef", "namespace"); ok && err == nil && current == metav1.NamespaceDefault {
  3457  			g.Expect(unstructured.SetNestedField(s.Object.Object, namespace, "spec", "machineTemplate", "infrastructureRef", "namespace")).To(Succeed())
  3458  		}
  3459  	}
  3460  	return s
  3461  }
  3462  
  3463  // prepareMachineDeploymentState deep-copies and returns the input scope and sets
  3464  // the given namespace to all relevant objects.
  3465  func prepareMachineDeploymentState(in *scope.MachineDeploymentState, namespace string) *scope.MachineDeploymentState {
  3466  	s := &scope.MachineDeploymentState{}
  3467  	if in.BootstrapTemplate != nil {
  3468  		s.BootstrapTemplate = in.BootstrapTemplate.DeepCopy()
  3469  		if s.BootstrapTemplate.GetNamespace() == metav1.NamespaceDefault {
  3470  			s.BootstrapTemplate.SetNamespace(namespace)
  3471  		}
  3472  	}
  3473  	if in.InfrastructureMachineTemplate != nil {
  3474  		s.InfrastructureMachineTemplate = in.InfrastructureMachineTemplate.DeepCopy()
  3475  		if s.InfrastructureMachineTemplate.GetNamespace() == metav1.NamespaceDefault {
  3476  			s.InfrastructureMachineTemplate.SetNamespace(namespace)
  3477  		}
  3478  	}
  3479  	if in.MachineHealthCheck != nil {
  3480  		s.MachineHealthCheck = in.MachineHealthCheck.DeepCopy()
  3481  		if s.MachineHealthCheck.GetNamespace() == metav1.NamespaceDefault {
  3482  			s.MachineHealthCheck.SetNamespace(namespace)
  3483  		}
  3484  	}
  3485  	if in.Object != nil {
  3486  		s.Object = in.Object.DeepCopy()
  3487  		if s.Object.GetNamespace() == metav1.NamespaceDefault {
  3488  			s.Object.SetNamespace(namespace)
  3489  		}
  3490  		if s.Object.Spec.Template.Spec.Bootstrap.ConfigRef != nil && s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace == metav1.NamespaceDefault {
  3491  			s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace = namespace
  3492  		}
  3493  		if s.Object.Spec.Template.Spec.InfrastructureRef.Namespace == metav1.NamespaceDefault {
  3494  			s.Object.Spec.Template.Spec.InfrastructureRef.Namespace = namespace
  3495  		}
  3496  	}
  3497  	return s
  3498  }
  3499  
  3500  // prepareMachinePoolState deep-copies and returns the input scope and sets
  3501  // the given namespace to all relevant objects.
  3502  func prepareMachinePoolState(in *scope.MachinePoolState, namespace string) *scope.MachinePoolState {
  3503  	s := &scope.MachinePoolState{}
  3504  	if in.BootstrapObject != nil {
  3505  		s.BootstrapObject = in.BootstrapObject.DeepCopy()
  3506  		if s.BootstrapObject.GetNamespace() == metav1.NamespaceDefault {
  3507  			s.BootstrapObject.SetNamespace(namespace)
  3508  		}
  3509  	}
  3510  	if in.InfrastructureMachinePoolObject != nil {
  3511  		s.InfrastructureMachinePoolObject = in.InfrastructureMachinePoolObject.DeepCopy()
  3512  		if s.InfrastructureMachinePoolObject.GetNamespace() == metav1.NamespaceDefault {
  3513  			s.InfrastructureMachinePoolObject.SetNamespace(namespace)
  3514  		}
  3515  	}
  3516  	if in.Object != nil {
  3517  		s.Object = in.Object.DeepCopy()
  3518  		if s.Object.GetNamespace() == metav1.NamespaceDefault {
  3519  			s.Object.SetNamespace(namespace)
  3520  		}
  3521  		if s.Object.Spec.Template.Spec.Bootstrap.ConfigRef != nil && s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace == metav1.NamespaceDefault {
  3522  			s.Object.Spec.Template.Spec.Bootstrap.ConfigRef.Namespace = namespace
  3523  		}
  3524  		if s.Object.Spec.Template.Spec.InfrastructureRef.Namespace == metav1.NamespaceDefault {
  3525  			s.Object.Spec.Template.Spec.InfrastructureRef.Namespace = namespace
  3526  		}
  3527  	}
  3528  	return s
  3529  }
  3530  
  3531  // prepareCluster deep-copies and returns the input Cluster and sets
  3532  // the given namespace to all relevant objects.
  3533  func prepareCluster(in *clusterv1.Cluster, namespace string) *clusterv1.Cluster {
  3534  	c := in.DeepCopy()
  3535  	if c.Namespace == metav1.NamespaceDefault {
  3536  		c.SetNamespace(namespace)
  3537  	}
  3538  	if c.Spec.InfrastructureRef != nil && c.Spec.InfrastructureRef.Namespace == metav1.NamespaceDefault {
  3539  		c.Spec.InfrastructureRef.Namespace = namespace
  3540  	}
  3541  	if c.Spec.ControlPlaneRef != nil && c.Spec.ControlPlaneRef.Namespace == metav1.NamespaceDefault {
  3542  		c.Spec.ControlPlaneRef.Namespace = namespace
  3543  	}
  3544  	return c
  3545  }
  3546  
  3547  func Test_createErrorWithoutObjectName(t *testing.T) {
  3548  	detailsError := &apierrors.StatusError{
  3549  		ErrStatus: metav1.Status{
  3550  			Status:  metav1.StatusFailure,
  3551  			Code:    http.StatusUnprocessableEntity,
  3552  			Reason:  metav1.StatusReasonInvalid,
  3553  			Message: "DockerMachineTemplate.infrastructure.cluster.x-k8s.io \"docker-template-one\" is invalid: spec.template.spec.preLoadImages: Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3554  			Details: &metav1.StatusDetails{
  3555  				Group: "infrastructure.cluster.x-k8s.io",
  3556  				Kind:  "DockerMachineTemplate",
  3557  				Name:  "docker-template-one",
  3558  				Causes: []metav1.StatusCause{
  3559  					{
  3560  						Type:    "FieldValueInvalid",
  3561  						Message: "Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3562  						Field:   "spec.template.spec.preLoadImages",
  3563  					},
  3564  				},
  3565  			},
  3566  		},
  3567  	}
  3568  	expectedDetailsError := &apierrors.StatusError{
  3569  		ErrStatus: metav1.Status{
  3570  			Status: metav1.StatusFailure,
  3571  			Code:   http.StatusUnprocessableEntity,
  3572  			Reason: metav1.StatusReasonInvalid,
  3573  			// The only difference between the two objects should be in the Message section.
  3574  			Message: "failed to create DockerMachineTemplate.infrastructure.cluster.x-k8s.io: FieldValueInvalid: spec.template.spec.preLoadImages: Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3575  			Details: &metav1.StatusDetails{
  3576  				Group: "infrastructure.cluster.x-k8s.io",
  3577  				Kind:  "DockerMachineTemplate",
  3578  				Name:  "docker-template-one",
  3579  				Causes: []metav1.StatusCause{
  3580  					{
  3581  						Type:    "FieldValueInvalid",
  3582  						Message: "Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3583  						Field:   "spec.template.spec.preLoadImages",
  3584  					},
  3585  				},
  3586  			},
  3587  		},
  3588  	}
  3589  	NoCausesDetailsError := &apierrors.StatusError{
  3590  		ErrStatus: metav1.Status{
  3591  			Status:  metav1.StatusFailure,
  3592  			Code:    http.StatusUnprocessableEntity,
  3593  			Reason:  metav1.StatusReasonInvalid,
  3594  			Message: "DockerMachineTemplate.infrastructure.cluster.x-k8s.io \"docker-template-one\" is invalid: spec.template.spec.preLoadImages: Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3595  			Details: &metav1.StatusDetails{
  3596  				Group: "infrastructure.cluster.x-k8s.io",
  3597  				Kind:  "DockerMachineTemplate",
  3598  				Name:  "docker-template-one",
  3599  			},
  3600  		},
  3601  	}
  3602  	expectedNoCausesDetailsError := &apierrors.StatusError{
  3603  		ErrStatus: metav1.Status{
  3604  			Status: metav1.StatusFailure,
  3605  			Code:   http.StatusUnprocessableEntity,
  3606  			Reason: metav1.StatusReasonInvalid,
  3607  			// The only difference between the two objects should be in the Message section.
  3608  			Message: "failed to create DockerMachineTemplate.infrastructure.cluster.x-k8s.io",
  3609  			Details: &metav1.StatusDetails{
  3610  				Group: "infrastructure.cluster.x-k8s.io",
  3611  				Kind:  "DockerMachineTemplate",
  3612  				Name:  "docker-template-one",
  3613  			},
  3614  		},
  3615  	}
  3616  	noDetailsError := &apierrors.StatusError{
  3617  		ErrStatus: metav1.Status{
  3618  			Status:  metav1.StatusFailure,
  3619  			Code:    http.StatusUnprocessableEntity,
  3620  			Reason:  metav1.StatusReasonInvalid,
  3621  			Message: "DockerMachineTemplate.infrastructure.cluster.x-k8s.io \"docker-template-one\" is invalid: spec.template.spec.preLoadImages: Invalid value: \"array\": spec.template.spec.preLoadImages in body must be of type string: \"array\"",
  3622  		},
  3623  	}
  3624  	expectedNoDetailsError := &apierrors.StatusError{
  3625  		ErrStatus: metav1.Status{
  3626  			Status: metav1.StatusFailure,
  3627  			Code:   http.StatusUnprocessableEntity,
  3628  			Reason: metav1.StatusReasonInvalid,
  3629  			// The only difference between the two objects should be in the Message section.
  3630  			Message: "failed to create TestControlPlane.controlplane.cluster.x-k8s.io",
  3631  		},
  3632  	}
  3633  	expectedObjectNilError := &apierrors.StatusError{
  3634  		ErrStatus: metav1.Status{
  3635  			Status: metav1.StatusFailure,
  3636  			Code:   http.StatusUnprocessableEntity,
  3637  			Reason: metav1.StatusReasonInvalid,
  3638  			// The only difference between the two objects should be in the Message section.
  3639  			Message: "failed to create object",
  3640  		},
  3641  	}
  3642  	nonStatusError := errors.New("an unexpected error with unknown information inside")
  3643  	expectedNonStatusError := errors.New("failed to create TestControlPlane.controlplane.cluster.x-k8s.io")
  3644  	expectedNilObjectNonStatusError := errors.New("failed to create object")
  3645  	tests := []struct {
  3646  		name     string
  3647  		input    error
  3648  		expected error
  3649  		obj      client.Object
  3650  	}{
  3651  		{
  3652  			name:     "Remove name from status error with details",
  3653  			input:    detailsError,
  3654  			expected: expectedDetailsError,
  3655  			obj:      builder.TestControlPlane("default", "cp1").Build(),
  3656  		},
  3657  		{
  3658  			name:     "Remove name from status error with details but no causes",
  3659  			input:    NoCausesDetailsError,
  3660  			expected: expectedNoCausesDetailsError,
  3661  			obj:      builder.TestControlPlane("default", "cp1").Build(),
  3662  		},
  3663  		{
  3664  			name:     "Remove name from status error with no details",
  3665  			input:    noDetailsError,
  3666  			expected: expectedNoDetailsError,
  3667  			obj:      builder.TestControlPlane("default", "cp1").Build(),
  3668  		},
  3669  		{
  3670  			name:     "Remove name from status error with nil object",
  3671  			input:    noDetailsError,
  3672  			expected: expectedObjectNilError,
  3673  			obj:      nil,
  3674  		},
  3675  		{
  3676  			name:     "Remove name from status error with nil object",
  3677  			input:    noDetailsError,
  3678  			expected: expectedObjectNilError,
  3679  			obj:      nil,
  3680  		},
  3681  		{
  3682  			name:     "Replace message of non status error",
  3683  			input:    nonStatusError,
  3684  			expected: expectedNonStatusError,
  3685  			obj:      builder.TestControlPlane("default", "cp1").Build(),
  3686  		},
  3687  		{
  3688  			name:     "Replace message of non status error with nil object",
  3689  			input:    nonStatusError,
  3690  			expected: expectedNilObjectNonStatusError,
  3691  			obj:      nil,
  3692  		},
  3693  	}
  3694  	for _, tt := range tests {
  3695  		t.Run(tt.name, func(t *testing.T) {
  3696  			g := NewWithT(t)
  3697  			err := createErrorWithoutObjectName(ctx, tt.input, tt.obj)
  3698  			g.Expect(err.Error()).To(Equal(tt.expected.Error()))
  3699  		})
  3700  	}
  3701  }