sigs.k8s.io/cluster-api@v1.7.1/controlplane/kubeadm/internal/controllers/controller_test.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package controllers
    18  
    19  import (
    20  	"context"
    21  	"crypto/rand"
    22  	"crypto/rsa"
    23  	"crypto/x509"
    24  	"crypto/x509/pkix"
    25  	"fmt"
    26  	"math/big"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/blang/semver/v4"
    32  	. "github.com/onsi/gomega"
    33  	appsv1 "k8s.io/api/apps/v1"
    34  	corev1 "k8s.io/api/core/v1"
    35  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    36  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    37  	"k8s.io/apimachinery/pkg/types"
    38  	"k8s.io/apimachinery/pkg/util/intstr"
    39  	"k8s.io/client-go/tools/record"
    40  	"k8s.io/utils/ptr"
    41  	ctrl "sigs.k8s.io/controller-runtime"
    42  	"sigs.k8s.io/controller-runtime/pkg/client"
    43  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    44  	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
    45  
    46  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    47  	bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
    48  	"sigs.k8s.io/cluster-api/controllers/external"
    49  	controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
    50  	"sigs.k8s.io/cluster-api/controlplane/kubeadm/internal"
    51  	controlplanev1webhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/webhooks"
    52  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    53  	"sigs.k8s.io/cluster-api/feature"
    54  	"sigs.k8s.io/cluster-api/internal/contract"
    55  	"sigs.k8s.io/cluster-api/internal/test/builder"
    56  	"sigs.k8s.io/cluster-api/internal/util/ssa"
    57  	"sigs.k8s.io/cluster-api/internal/webhooks"
    58  	"sigs.k8s.io/cluster-api/util"
    59  	"sigs.k8s.io/cluster-api/util/certs"
    60  	"sigs.k8s.io/cluster-api/util/collections"
    61  	"sigs.k8s.io/cluster-api/util/conditions"
    62  	"sigs.k8s.io/cluster-api/util/kubeconfig"
    63  	"sigs.k8s.io/cluster-api/util/patch"
    64  	"sigs.k8s.io/cluster-api/util/secret"
    65  )
    66  
    67  func TestClusterToKubeadmControlPlane(t *testing.T) {
    68  	g := NewWithT(t)
    69  	fakeClient := newFakeClient()
    70  
    71  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
    72  	cluster.Spec = clusterv1.ClusterSpec{
    73  		ControlPlaneRef: &corev1.ObjectReference{
    74  			Kind:       "KubeadmControlPlane",
    75  			Namespace:  metav1.NamespaceDefault,
    76  			Name:       "kcp-foo",
    77  			APIVersion: controlplanev1.GroupVersion.String(),
    78  		},
    79  	}
    80  
    81  	expectedResult := []ctrl.Request{
    82  		{
    83  			NamespacedName: client.ObjectKey{
    84  				Namespace: cluster.Spec.ControlPlaneRef.Namespace,
    85  				Name:      cluster.Spec.ControlPlaneRef.Name},
    86  		},
    87  	}
    88  
    89  	r := &KubeadmControlPlaneReconciler{
    90  		Client:              fakeClient,
    91  		SecretCachingClient: fakeClient,
    92  		recorder:            record.NewFakeRecorder(32),
    93  	}
    94  
    95  	got := r.ClusterToKubeadmControlPlane(ctx, cluster)
    96  	g.Expect(got).To(BeComparableTo(expectedResult))
    97  }
    98  
    99  func TestClusterToKubeadmControlPlaneNoControlPlane(t *testing.T) {
   100  	g := NewWithT(t)
   101  	fakeClient := newFakeClient()
   102  
   103  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
   104  
   105  	r := &KubeadmControlPlaneReconciler{
   106  		Client:              fakeClient,
   107  		SecretCachingClient: fakeClient,
   108  		recorder:            record.NewFakeRecorder(32),
   109  	}
   110  
   111  	got := r.ClusterToKubeadmControlPlane(ctx, cluster)
   112  	g.Expect(got).To(BeNil())
   113  }
   114  
   115  func TestClusterToKubeadmControlPlaneOtherControlPlane(t *testing.T) {
   116  	g := NewWithT(t)
   117  	fakeClient := newFakeClient()
   118  
   119  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
   120  	cluster.Spec = clusterv1.ClusterSpec{
   121  		ControlPlaneRef: &corev1.ObjectReference{
   122  			Kind:       "OtherControlPlane",
   123  			Namespace:  metav1.NamespaceDefault,
   124  			Name:       "other-foo",
   125  			APIVersion: controlplanev1.GroupVersion.String(),
   126  		},
   127  	}
   128  
   129  	r := &KubeadmControlPlaneReconciler{
   130  		Client:              fakeClient,
   131  		SecretCachingClient: fakeClient,
   132  		recorder:            record.NewFakeRecorder(32),
   133  	}
   134  
   135  	got := r.ClusterToKubeadmControlPlane(ctx, cluster)
   136  	g.Expect(got).To(BeNil())
   137  }
   138  
   139  func TestReconcileReturnErrorWhenOwnerClusterIsMissing(t *testing.T) {
   140  	g := NewWithT(t)
   141  
   142  	ns, err := env.CreateNamespace(ctx, "test-reconcile-return-error")
   143  	g.Expect(err).ToNot(HaveOccurred())
   144  
   145  	cluster, kcp, _ := createClusterWithControlPlane(ns.Name)
   146  	g.Expect(env.Create(ctx, cluster)).To(Succeed())
   147  	g.Expect(env.Create(ctx, kcp)).To(Succeed())
   148  	defer func(do ...client.Object) {
   149  		g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
   150  	}(kcp, ns)
   151  
   152  	r := &KubeadmControlPlaneReconciler{
   153  		Client:              env,
   154  		SecretCachingClient: secretCachingClient,
   155  		recorder:            record.NewFakeRecorder(32),
   156  	}
   157  
   158  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   159  	g.Expect(err).ToNot(HaveOccurred())
   160  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
   161  
   162  	// calling reconcile should return error
   163  	g.Expect(env.CleanupAndWait(ctx, cluster)).To(Succeed())
   164  
   165  	g.Eventually(func() error {
   166  		_, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   167  		return err
   168  	}, 10*time.Second).Should(HaveOccurred())
   169  }
   170  
   171  func TestReconcileUpdateObservedGeneration(t *testing.T) {
   172  	t.Skip("Disabling this test temporarily until we can get a fix for https://github.com/kubernetes/kubernetes/issues/80609 in controller runtime + switch to a live client in test env.")
   173  
   174  	g := NewWithT(t)
   175  	r := &KubeadmControlPlaneReconciler{
   176  		Client:              env,
   177  		SecretCachingClient: secretCachingClient,
   178  		recorder:            record.NewFakeRecorder(32),
   179  		managementCluster:   &internal.Management{Client: env.Client, Tracker: nil},
   180  	}
   181  
   182  	ns, err := env.CreateNamespace(ctx, "test-reconcile-upd-og")
   183  	g.Expect(err).ToNot(HaveOccurred())
   184  
   185  	cluster, kcp, _ := createClusterWithControlPlane(ns.Name)
   186  	g.Expect(env.Create(ctx, cluster)).To(Succeed())
   187  	g.Expect(env.Create(ctx, kcp)).To(Succeed())
   188  	defer func(do ...client.Object) {
   189  		g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
   190  	}(cluster, kcp, ns)
   191  
   192  	// read kcp.Generation after create
   193  	errGettingObject := env.Get(ctx, util.ObjectKey(kcp), kcp)
   194  	g.Expect(errGettingObject).ToNot(HaveOccurred())
   195  	generation := kcp.Generation
   196  
   197  	// Set cluster.status.InfrastructureReady so we actually enter in the reconcile loop
   198  	patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"status\":{\"infrastructureReady\":%t}}", true)))
   199  	g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())
   200  
   201  	// call reconcile the first time, so we can check if observedGeneration is set when adding a finalizer
   202  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   203  	g.Expect(err).ToNot(HaveOccurred())
   204  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
   205  
   206  	g.Eventually(func() int64 {
   207  		errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp)
   208  		g.Expect(errGettingObject).ToNot(HaveOccurred())
   209  		return kcp.Status.ObservedGeneration
   210  	}, 10*time.Second).Should(Equal(generation))
   211  
   212  	// triggers a generation change by changing the spec
   213  	kcp.Spec.Replicas = ptr.To[int32](*kcp.Spec.Replicas + 2)
   214  	g.Expect(env.Update(ctx, kcp)).To(Succeed())
   215  
   216  	// read kcp.Generation after the update
   217  	errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp)
   218  	g.Expect(errGettingObject).ToNot(HaveOccurred())
   219  	generation = kcp.Generation
   220  
   221  	// call reconcile the second time, so we can check if observedGeneration is set when calling defer patch
   222  	// NB. The call to reconcile fails because KCP is not properly setup (e.g. missing InfrastructureTemplate)
   223  	// but this is not important because what we want is KCP to be patched
   224  	_, _ = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   225  
   226  	g.Eventually(func() int64 {
   227  		errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp)
   228  		g.Expect(errGettingObject).ToNot(HaveOccurred())
   229  		return kcp.Status.ObservedGeneration
   230  	}, 10*time.Second).Should(Equal(generation))
   231  }
   232  
   233  func TestReconcileNoClusterOwnerRef(t *testing.T) {
   234  	g := NewWithT(t)
   235  
   236  	kcp := &controlplanev1.KubeadmControlPlane{
   237  		ObjectMeta: metav1.ObjectMeta{
   238  			Namespace: metav1.NamespaceDefault,
   239  			Name:      "foo",
   240  		},
   241  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   242  			Version: "v1.16.6",
   243  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   244  				InfrastructureRef: corev1.ObjectReference{
   245  					Kind:       "UnknownInfraMachine",
   246  					APIVersion: "test/v1alpha1",
   247  					Name:       "foo",
   248  					Namespace:  metav1.NamespaceDefault,
   249  				},
   250  			},
   251  		},
   252  	}
   253  	webhook := &controlplanev1webhooks.KubeadmControlPlane{}
   254  	g.Expect(webhook.Default(ctx, kcp)).To(Succeed())
   255  	_, err := webhook.ValidateCreate(ctx, kcp)
   256  	g.Expect(err).ToNot(HaveOccurred())
   257  
   258  	fakeClient := newFakeClient(kcp.DeepCopy())
   259  	r := &KubeadmControlPlaneReconciler{
   260  		Client:              fakeClient,
   261  		SecretCachingClient: fakeClient,
   262  		recorder:            record.NewFakeRecorder(32),
   263  	}
   264  
   265  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   266  	g.Expect(err).ToNot(HaveOccurred())
   267  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
   268  
   269  	machineList := &clusterv1.MachineList{}
   270  	g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed())
   271  	g.Expect(machineList.Items).To(BeEmpty())
   272  }
   273  
   274  func TestReconcileNoKCP(t *testing.T) {
   275  	g := NewWithT(t)
   276  
   277  	kcp := &controlplanev1.KubeadmControlPlane{
   278  		ObjectMeta: metav1.ObjectMeta{
   279  			Namespace: metav1.NamespaceDefault,
   280  			Name:      "foo",
   281  		},
   282  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   283  			Version: "v1.16.6",
   284  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   285  				InfrastructureRef: corev1.ObjectReference{
   286  					Kind:       "UnknownInfraMachine",
   287  					APIVersion: "test/v1alpha1",
   288  					Name:       "foo",
   289  					Namespace:  metav1.NamespaceDefault,
   290  				},
   291  			},
   292  		},
   293  	}
   294  
   295  	fakeClient := newFakeClient()
   296  	r := &KubeadmControlPlaneReconciler{
   297  		Client:              fakeClient,
   298  		SecretCachingClient: fakeClient,
   299  		recorder:            record.NewFakeRecorder(32),
   300  	}
   301  
   302  	_, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   303  	g.Expect(err).ToNot(HaveOccurred())
   304  }
   305  
   306  func TestReconcileNoCluster(t *testing.T) {
   307  	g := NewWithT(t)
   308  
   309  	kcp := &controlplanev1.KubeadmControlPlane{
   310  		ObjectMeta: metav1.ObjectMeta{
   311  			Namespace: metav1.NamespaceDefault,
   312  			Name:      "foo",
   313  			OwnerReferences: []metav1.OwnerReference{
   314  				{
   315  					Kind:       "Cluster",
   316  					APIVersion: clusterv1.GroupVersion.String(),
   317  					Name:       "foo",
   318  				},
   319  			},
   320  		},
   321  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   322  			Version: "v1.16.6",
   323  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   324  				InfrastructureRef: corev1.ObjectReference{
   325  					Kind:       "UnknownInfraMachine",
   326  					APIVersion: "test/v1alpha1",
   327  					Name:       "foo",
   328  					Namespace:  metav1.NamespaceDefault,
   329  				},
   330  			},
   331  		},
   332  	}
   333  	webhook := &controlplanev1webhooks.KubeadmControlPlane{}
   334  	g.Expect(webhook.Default(ctx, kcp)).To(Succeed())
   335  	_, err := webhook.ValidateCreate(ctx, kcp)
   336  	g.Expect(err).ToNot(HaveOccurred())
   337  
   338  	fakeClient := newFakeClient(kcp.DeepCopy())
   339  	r := &KubeadmControlPlaneReconciler{
   340  		Client:              fakeClient,
   341  		SecretCachingClient: fakeClient,
   342  		recorder:            record.NewFakeRecorder(32),
   343  	}
   344  
   345  	_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   346  	g.Expect(err).To(HaveOccurred())
   347  
   348  	machineList := &clusterv1.MachineList{}
   349  	g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed())
   350  	g.Expect(machineList.Items).To(BeEmpty())
   351  }
   352  
   353  func TestReconcilePaused(t *testing.T) {
   354  	g := NewWithT(t)
   355  
   356  	clusterName := "foo"
   357  
   358  	// Test: cluster is paused and kcp is not
   359  	cluster := newCluster(&types.NamespacedName{Namespace: metav1.NamespaceDefault, Name: clusterName})
   360  	cluster.Spec.Paused = true
   361  	kcp := &controlplanev1.KubeadmControlPlane{
   362  		ObjectMeta: metav1.ObjectMeta{
   363  			Namespace: metav1.NamespaceDefault,
   364  			Name:      clusterName,
   365  			OwnerReferences: []metav1.OwnerReference{
   366  				{
   367  					Kind:       "Cluster",
   368  					APIVersion: clusterv1.GroupVersion.String(),
   369  					Name:       clusterName,
   370  				},
   371  			},
   372  		},
   373  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   374  			Version: "v1.16.6",
   375  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   376  				InfrastructureRef: corev1.ObjectReference{
   377  					Kind:       "UnknownInfraMachine",
   378  					APIVersion: "test/v1alpha1",
   379  					Name:       "foo",
   380  					Namespace:  metav1.NamespaceDefault,
   381  				},
   382  			},
   383  		},
   384  	}
   385  	webhook := &controlplanev1webhooks.KubeadmControlPlane{}
   386  	g.Expect(webhook.Default(ctx, kcp)).To(Succeed())
   387  	_, err := webhook.ValidateCreate(ctx, kcp)
   388  	g.Expect(err).ToNot(HaveOccurred())
   389  	fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy())
   390  	r := &KubeadmControlPlaneReconciler{
   391  		Client:              fakeClient,
   392  		SecretCachingClient: fakeClient,
   393  		recorder:            record.NewFakeRecorder(32),
   394  	}
   395  
   396  	_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   397  	g.Expect(err).ToNot(HaveOccurred())
   398  
   399  	machineList := &clusterv1.MachineList{}
   400  	g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed())
   401  	g.Expect(machineList.Items).To(BeEmpty())
   402  
   403  	// Test: kcp is paused and cluster is not
   404  	cluster.Spec.Paused = false
   405  	kcp.ObjectMeta.Annotations = map[string]string{}
   406  	kcp.ObjectMeta.Annotations[clusterv1.PausedAnnotation] = "paused"
   407  	_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   408  	g.Expect(err).ToNot(HaveOccurred())
   409  }
   410  
   411  func TestReconcileClusterNoEndpoints(t *testing.T) {
   412  	g := NewWithT(t)
   413  
   414  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
   415  	cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true}
   416  
   417  	kcp := &controlplanev1.KubeadmControlPlane{
   418  		ObjectMeta: metav1.ObjectMeta{
   419  			Namespace: cluster.Namespace,
   420  			Name:      "foo",
   421  			OwnerReferences: []metav1.OwnerReference{
   422  				{
   423  					Kind:       "Cluster",
   424  					APIVersion: clusterv1.GroupVersion.String(),
   425  					Name:       cluster.Name,
   426  				},
   427  			},
   428  		},
   429  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   430  			Version: "v1.16.6",
   431  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   432  				InfrastructureRef: corev1.ObjectReference{
   433  					Kind:       "UnknownInfraMachine",
   434  					APIVersion: "test/v1alpha1",
   435  					Name:       "foo",
   436  					Namespace:  metav1.NamespaceDefault,
   437  				},
   438  			},
   439  		},
   440  	}
   441  	webhook := &controlplanev1webhooks.KubeadmControlPlane{}
   442  	g.Expect(webhook.Default(ctx, kcp)).To(Succeed())
   443  	_, err := webhook.ValidateCreate(ctx, kcp)
   444  	g.Expect(err).ToNot(HaveOccurred())
   445  
   446  	fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy())
   447  	r := &KubeadmControlPlaneReconciler{
   448  		Client:              fakeClient,
   449  		SecretCachingClient: fakeClient,
   450  		recorder:            record.NewFakeRecorder(32),
   451  		managementCluster: &fakeManagementCluster{
   452  			Management: &internal.Management{Client: fakeClient},
   453  			Workload:   fakeWorkloadCluster{},
   454  		},
   455  	}
   456  
   457  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   458  	g.Expect(err).ToNot(HaveOccurred())
   459  	// this first requeue is to add finalizer
   460  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
   461  	g.Expect(r.Client.Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed())
   462  	g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
   463  
   464  	result, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   465  	g.Expect(err).ToNot(HaveOccurred())
   466  	// TODO: this should stop to re-queue as soon as we have a proper remote cluster cache in place.
   467  	g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false, RequeueAfter: 20 * time.Second}))
   468  	g.Expect(r.Client.Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed())
   469  
   470  	// Always expect that the Finalizer is set on the passed in resource
   471  	g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
   472  
   473  	g.Expect(kcp.Status.Selector).NotTo(BeEmpty())
   474  
   475  	_, err = secret.GetFromNamespacedName(ctx, fakeClient, client.ObjectKey{Namespace: metav1.NamespaceDefault, Name: "foo"}, secret.ClusterCA)
   476  	g.Expect(err).ToNot(HaveOccurred())
   477  
   478  	machineList := &clusterv1.MachineList{}
   479  	g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed())
   480  	g.Expect(machineList.Items).To(BeEmpty())
   481  }
   482  
   483  func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) {
   484  	version := "v2.0.0"
   485  	t.Run("adopts existing Machines", func(t *testing.T) {
   486  		g := NewWithT(t)
   487  
   488  		cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   489  		cluster.Spec.ControlPlaneEndpoint.Host = "bar"
   490  		cluster.Spec.ControlPlaneEndpoint.Port = 6443
   491  		cluster.Status.InfrastructureReady = true
   492  		kcp.Spec.Version = version
   493  
   494  		fmc := &fakeManagementCluster{
   495  			Machines: collections.Machines{},
   496  			Workload: fakeWorkloadCluster{},
   497  		}
   498  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   499  		for i := 0; i < 3; i++ {
   500  			name := fmt.Sprintf("test-%d", i)
   501  			m := &clusterv1.Machine{
   502  				ObjectMeta: metav1.ObjectMeta{
   503  					Namespace: cluster.Namespace,
   504  					Name:      name,
   505  					Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
   506  				},
   507  				Spec: clusterv1.MachineSpec{
   508  					Bootstrap: clusterv1.Bootstrap{
   509  						ConfigRef: &corev1.ObjectReference{
   510  							APIVersion: bootstrapv1.GroupVersion.String(),
   511  							Kind:       "KubeadmConfig",
   512  							Name:       name,
   513  						},
   514  					},
   515  					Version: &version,
   516  				},
   517  			}
   518  			cfg := &bootstrapv1.KubeadmConfig{
   519  				ObjectMeta: metav1.ObjectMeta{
   520  					Namespace: cluster.Namespace,
   521  					Name:      name,
   522  				},
   523  			}
   524  			objs = append(objs, m, cfg)
   525  			fmc.Machines.Insert(m)
   526  		}
   527  
   528  		fakeClient := newFakeClient(objs...)
   529  		fmc.Reader = fakeClient
   530  		r := &KubeadmControlPlaneReconciler{
   531  			Client:                    fakeClient,
   532  			SecretCachingClient:       fakeClient,
   533  			managementCluster:         fmc,
   534  			managementClusterUncached: fmc,
   535  		}
   536  
   537  		_, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp)
   538  		g.Expect(err).ToNot(HaveOccurred())
   539  		g.Expect(adoptableMachineFound).To(BeTrue())
   540  
   541  		machineList := &clusterv1.MachineList{}
   542  		g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
   543  		g.Expect(machineList.Items).To(HaveLen(3))
   544  		for _, machine := range machineList.Items {
   545  			g.Expect(machine.OwnerReferences).To(HaveLen(1))
   546  			g.Expect(machine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   547  			// Machines are adopted but since they are not originally created by KCP, infra template annotation will be missing.
   548  			g.Expect(machine.GetAnnotations()).NotTo(HaveKey(clusterv1.TemplateClonedFromGroupKindAnnotation))
   549  			g.Expect(machine.GetAnnotations()).NotTo(HaveKey(clusterv1.TemplateClonedFromNameAnnotation))
   550  		}
   551  	})
   552  
   553  	t.Run("adopts v1alpha2 cluster secrets", func(t *testing.T) {
   554  		g := NewWithT(t)
   555  
   556  		cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   557  		cluster.Spec.ControlPlaneEndpoint.Host = "validhost"
   558  		cluster.Spec.ControlPlaneEndpoint.Port = 6443
   559  		cluster.Status.InfrastructureReady = true
   560  		kcp.Spec.Version = version
   561  
   562  		fmc := &fakeManagementCluster{
   563  			Machines: collections.Machines{},
   564  			Workload: fakeWorkloadCluster{},
   565  		}
   566  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   567  		for i := 0; i < 3; i++ {
   568  			name := fmt.Sprintf("test-%d", i)
   569  			m := &clusterv1.Machine{
   570  				ObjectMeta: metav1.ObjectMeta{
   571  					Namespace: cluster.Namespace,
   572  					Name:      name,
   573  					Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
   574  				},
   575  				Spec: clusterv1.MachineSpec{
   576  					Bootstrap: clusterv1.Bootstrap{
   577  						ConfigRef: &corev1.ObjectReference{
   578  							APIVersion: bootstrapv1.GroupVersion.String(),
   579  							Kind:       "KubeadmConfig",
   580  							Name:       name,
   581  						},
   582  					},
   583  					Version: &version,
   584  				},
   585  			}
   586  			cfg := &bootstrapv1.KubeadmConfig{
   587  				TypeMeta: metav1.TypeMeta{
   588  					APIVersion: bootstrapv1.GroupVersion.String(),
   589  					Kind:       "KubeadmConfig",
   590  				},
   591  				ObjectMeta: metav1.ObjectMeta{
   592  					Namespace: cluster.Namespace,
   593  					Name:      name,
   594  					UID:       types.UID(fmt.Sprintf("my-uid-%d", i)),
   595  				},
   596  			}
   597  
   598  			// A simulacrum of the various Certificate and kubeconfig secrets
   599  			// it's a little weird that this is one per KubeadmConfig rather than just whichever config was "first,"
   600  			// but the intent is to ensure that the owner is changed regardless of which Machine we start with
   601  			clusterSecret := &corev1.Secret{
   602  				ObjectMeta: metav1.ObjectMeta{
   603  					Namespace: cluster.Namespace,
   604  					Name:      fmt.Sprintf("important-cluster-secret-%d", i),
   605  					Labels: map[string]string{
   606  						"cluster.x-k8s.io/cluster-name": cluster.Name,
   607  						"previous-owner":                "kubeadmconfig",
   608  					},
   609  					// See: https://github.com/kubernetes-sigs/cluster-api-bootstrap-provider-kubeadm/blob/38af74d92056e64e571b9ea1d664311769779453/internal/cluster/certificates.go#L323-L330
   610  					OwnerReferences: []metav1.OwnerReference{
   611  						{
   612  							APIVersion: bootstrapv1.GroupVersion.String(),
   613  							Kind:       "KubeadmConfig",
   614  							Name:       cfg.Name,
   615  							UID:        cfg.UID,
   616  						},
   617  					},
   618  				},
   619  			}
   620  			objs = append(objs, m, cfg, clusterSecret)
   621  			fmc.Machines.Insert(m)
   622  		}
   623  
   624  		fakeClient := newFakeClient(objs...)
   625  		fmc.Reader = fakeClient
   626  		r := &KubeadmControlPlaneReconciler{
   627  			Client:                    fakeClient,
   628  			SecretCachingClient:       fakeClient,
   629  			managementCluster:         fmc,
   630  			managementClusterUncached: fmc,
   631  		}
   632  
   633  		_, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp)
   634  		g.Expect(err).ToNot(HaveOccurred())
   635  		g.Expect(adoptableMachineFound).To(BeTrue())
   636  
   637  		machineList := &clusterv1.MachineList{}
   638  		g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
   639  		g.Expect(machineList.Items).To(HaveLen(3))
   640  		for _, machine := range machineList.Items {
   641  			g.Expect(machine.OwnerReferences).To(HaveLen(1))
   642  			g.Expect(machine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   643  			// Machines are adopted but since they are not originally created by KCP, infra template annotation will be missing.
   644  			g.Expect(machine.GetAnnotations()).NotTo(HaveKey(clusterv1.TemplateClonedFromGroupKindAnnotation))
   645  			g.Expect(machine.GetAnnotations()).NotTo(HaveKey(clusterv1.TemplateClonedFromNameAnnotation))
   646  		}
   647  
   648  		secrets := &corev1.SecretList{}
   649  		g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"previous-owner": "kubeadmconfig"})).To(Succeed())
   650  		g.Expect(secrets.Items).To(HaveLen(3))
   651  		for _, secret := range secrets.Items {
   652  			g.Expect(secret.OwnerReferences).To(HaveLen(1))
   653  			g.Expect(secret.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   654  		}
   655  	})
   656  
   657  	t.Run("Deleted KubeadmControlPlanes don't adopt machines", func(t *testing.T) {
   658  		// Usually we won't get into the inner reconcile with a deleted control plane, but it's possible when deleting with "orphanDependents":
   659  		// 1. The deletion timestamp is set in the API server, but our cache has not yet updated
   660  		// 2. The garbage collector removes our ownership reference from a Machine, triggering a re-reconcile (or we get unlucky with the periodic reconciliation)
   661  		// 3. We get into the inner reconcile function and re-adopt the Machine
   662  		// 4. The update to our cache for our deletion timestamp arrives
   663  		g := NewWithT(t)
   664  
   665  		cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   666  		cluster.Spec.ControlPlaneEndpoint.Host = "nodomain.example.com1"
   667  		cluster.Spec.ControlPlaneEndpoint.Port = 6443
   668  		cluster.Status.InfrastructureReady = true
   669  		kcp.Spec.Version = version
   670  
   671  		now := metav1.Now()
   672  		kcp.DeletionTimestamp = &now
   673  		// We also have to set a finalizer as fake client doesn't accept objects
   674  		// with a deletionTimestamp without a finalizer.
   675  		kcp.Finalizers = []string{"block-deletion"}
   676  
   677  		fmc := &fakeManagementCluster{
   678  			Machines: collections.Machines{},
   679  			Workload: fakeWorkloadCluster{},
   680  		}
   681  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   682  		for i := 0; i < 3; i++ {
   683  			name := fmt.Sprintf("test-%d", i)
   684  			m := &clusterv1.Machine{
   685  				ObjectMeta: metav1.ObjectMeta{
   686  					Namespace: cluster.Namespace,
   687  					Name:      name,
   688  					Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
   689  				},
   690  				Spec: clusterv1.MachineSpec{
   691  					Bootstrap: clusterv1.Bootstrap{
   692  						ConfigRef: &corev1.ObjectReference{
   693  							APIVersion: bootstrapv1.GroupVersion.String(),
   694  							Kind:       "KubeadmConfig",
   695  							Name:       name,
   696  						},
   697  					},
   698  					Version: &version,
   699  				},
   700  			}
   701  			cfg := &bootstrapv1.KubeadmConfig{
   702  				ObjectMeta: metav1.ObjectMeta{
   703  					Namespace: cluster.Namespace,
   704  					Name:      name,
   705  				},
   706  			}
   707  			objs = append(objs, m, cfg)
   708  			fmc.Machines.Insert(m)
   709  		}
   710  		fakeClient := newFakeClient(objs...)
   711  		fmc.Reader = fakeClient
   712  		r := &KubeadmControlPlaneReconciler{
   713  			Client:                    fakeClient,
   714  			SecretCachingClient:       fakeClient,
   715  			managementCluster:         fmc,
   716  			managementClusterUncached: fmc,
   717  		}
   718  
   719  		_, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp)
   720  		g.Expect(err).ToNot(HaveOccurred())
   721  		g.Expect(adoptableMachineFound).To(BeFalse())
   722  
   723  		machineList := &clusterv1.MachineList{}
   724  		g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
   725  		g.Expect(machineList.Items).To(HaveLen(3))
   726  		for _, machine := range machineList.Items {
   727  			g.Expect(machine.OwnerReferences).To(BeEmpty())
   728  		}
   729  	})
   730  
   731  	t.Run("Do not adopt Machines that are more than one version old", func(t *testing.T) {
   732  		g := NewWithT(t)
   733  
   734  		cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   735  		cluster.Spec.ControlPlaneEndpoint.Host = "nodomain.example.com2"
   736  		cluster.Spec.ControlPlaneEndpoint.Port = 6443
   737  		cluster.Status.InfrastructureReady = true
   738  		kcp.Spec.Version = "v1.17.0"
   739  
   740  		fmc := &fakeManagementCluster{
   741  			Machines: collections.Machines{
   742  				"test0": &clusterv1.Machine{
   743  					ObjectMeta: metav1.ObjectMeta{
   744  						Namespace: cluster.Namespace,
   745  						Name:      "test0",
   746  						Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
   747  					},
   748  					Spec: clusterv1.MachineSpec{
   749  						Bootstrap: clusterv1.Bootstrap{
   750  							ConfigRef: &corev1.ObjectReference{
   751  								APIVersion: bootstrapv1.GroupVersion.String(),
   752  								Kind:       "KubeadmConfig",
   753  							},
   754  						},
   755  						Version: ptr.To("v1.15.0"),
   756  					},
   757  				},
   758  			},
   759  			Workload: fakeWorkloadCluster{},
   760  		}
   761  
   762  		fakeClient := newFakeClient(builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy(), fmc.Machines["test0"].DeepCopy())
   763  		fmc.Reader = fakeClient
   764  		recorder := record.NewFakeRecorder(32)
   765  		r := &KubeadmControlPlaneReconciler{
   766  			Client:                    fakeClient,
   767  			SecretCachingClient:       fakeClient,
   768  			recorder:                  recorder,
   769  			managementCluster:         fmc,
   770  			managementClusterUncached: fmc,
   771  		}
   772  
   773  		_, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp)
   774  		g.Expect(err).ToNot(HaveOccurred())
   775  		g.Expect(adoptableMachineFound).To(BeTrue())
   776  
   777  		// Message: Warning AdoptionFailed Could not adopt Machine test/test0: its version ("v1.15.0") is outside supported +/- one minor version skew from KCP's ("v1.17.0")
   778  		g.Expect(recorder.Events).To(Receive(ContainSubstring("minor version")))
   779  
   780  		machineList := &clusterv1.MachineList{}
   781  		g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
   782  		g.Expect(machineList.Items).To(HaveLen(1))
   783  		for _, machine := range machineList.Items {
   784  			g.Expect(machine.OwnerReferences).To(BeEmpty())
   785  		}
   786  	})
   787  }
   788  
   789  func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) {
   790  	g := NewWithT(t)
   791  
   792  	cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   793  	cluster.Spec.ControlPlaneEndpoint.Host = "bar"
   794  	cluster.Spec.ControlPlaneEndpoint.Port = 6443
   795  	cluster.Status.InfrastructureReady = true
   796  	kcp.Spec.Version = "v1.21.0"
   797  	key, err := certs.NewPrivateKey()
   798  	g.Expect(err).ToNot(HaveOccurred())
   799  	crt, err := getTestCACert(key)
   800  	g.Expect(err).ToNot(HaveOccurred())
   801  
   802  	clusterSecret := &corev1.Secret{
   803  		// The Secret's Type is used by KCP to determine whether it is user-provided.
   804  		// clusterv1.ClusterSecretType signals that the Secret is CAPI-provided.
   805  		ObjectMeta: metav1.ObjectMeta{
   806  			Namespace: cluster.Namespace,
   807  			Name:      "",
   808  			Labels: map[string]string{
   809  				"cluster.x-k8s.io/cluster-name": cluster.Name,
   810  				"testing":                       "yes",
   811  			},
   812  		},
   813  		Data: map[string][]byte{
   814  			secret.TLSCrtDataName: certs.EncodeCertPEM(crt),
   815  			secret.TLSKeyDataName: certs.EncodePrivateKeyPEM(key),
   816  		},
   817  	}
   818  
   819  	kcpOwner := *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))
   820  
   821  	t.Run("add KCP owner for secrets with no controller reference", func(*testing.T) {
   822  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   823  		certificates := secret.Certificates{
   824  			{Purpose: secret.ClusterCA},
   825  			{Purpose: secret.FrontProxyCA},
   826  			{Purpose: secret.ServiceAccount},
   827  			{Purpose: secret.EtcdCA},
   828  		}
   829  		for _, c := range certificates {
   830  			s := clusterSecret.DeepCopy()
   831  			// Set the secret name to the purpose
   832  			s.Name = secret.Name(cluster.Name, c.Purpose)
   833  			// Set the Secret Type to clusterv1.ClusterSecretType which signals this Secret was generated by CAPI.
   834  			s.Type = clusterv1.ClusterSecretType
   835  
   836  			// Store the secret in the certificate.
   837  			c.Secret = s
   838  
   839  			objs = append(objs, s)
   840  		}
   841  
   842  		fakeClient := newFakeClient(objs...)
   843  
   844  		r := KubeadmControlPlaneReconciler{
   845  			Client:              fakeClient,
   846  			SecretCachingClient: fakeClient,
   847  		}
   848  		err = r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner)
   849  		g.Expect(err).ToNot(HaveOccurred())
   850  
   851  		secrets := &corev1.SecretList{}
   852  		g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"testing": "yes"})).To(Succeed())
   853  		for _, secret := range secrets.Items {
   854  			g.Expect(secret.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   855  		}
   856  	})
   857  
   858  	t.Run("replace non-KCP controller with KCP controller reference", func(*testing.T) {
   859  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   860  		certificates := secret.Certificates{
   861  			{Purpose: secret.ClusterCA},
   862  			{Purpose: secret.FrontProxyCA},
   863  			{Purpose: secret.ServiceAccount},
   864  			{Purpose: secret.EtcdCA},
   865  		}
   866  		for _, c := range certificates {
   867  			s := clusterSecret.DeepCopy()
   868  			// Set the secret name to the purpose
   869  			s.Name = secret.Name(cluster.Name, c.Purpose)
   870  			// Set the Secret Type to clusterv1.ClusterSecretType which signals this Secret was generated by CAPI.
   871  			s.Type = clusterv1.ClusterSecretType
   872  
   873  			// Set the a controller owner reference of an unknown type on the secret.
   874  			s.SetOwnerReferences([]metav1.OwnerReference{
   875  				{
   876  					APIVersion: bootstrapv1.GroupVersion.String(),
   877  					// KCP should take ownership of any Secret of the correct type linked to the Cluster.
   878  					Kind:       "OtherController",
   879  					Name:       "name",
   880  					UID:        "uid",
   881  					Controller: ptr.To(true),
   882  				},
   883  			})
   884  
   885  			// Store the secret in the certificate.
   886  			c.Secret = s
   887  
   888  			objs = append(objs, s)
   889  		}
   890  
   891  		fakeClient := newFakeClient(objs...)
   892  
   893  		r := KubeadmControlPlaneReconciler{
   894  			Client:              fakeClient,
   895  			SecretCachingClient: fakeClient,
   896  		}
   897  		err := r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner)
   898  		g.Expect(err).ToNot(HaveOccurred())
   899  
   900  		secrets := &corev1.SecretList{}
   901  		g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"testing": "yes"})).To(Succeed())
   902  		for _, secret := range secrets.Items {
   903  			g.Expect(secret.OwnerReferences).To(HaveLen(1))
   904  			g.Expect(secret.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   905  		}
   906  	})
   907  
   908  	t.Run("does not add owner reference to user-provided secrets", func(t *testing.T) {
   909  		g := NewWithT(t)
   910  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   911  		certificates := secret.Certificates{
   912  			{Purpose: secret.ClusterCA},
   913  			{Purpose: secret.FrontProxyCA},
   914  			{Purpose: secret.ServiceAccount},
   915  			{Purpose: secret.EtcdCA},
   916  		}
   917  		for _, c := range certificates {
   918  			s := clusterSecret.DeepCopy()
   919  			// Set the secret name to the purpose
   920  			s.Name = secret.Name(cluster.Name, c.Purpose)
   921  			// Set the Secret Type to any type which signals this Secret was is user-provided.
   922  			s.Type = corev1.SecretTypeOpaque
   923  			// Set the a controller owner reference of an unknown type on the secret.
   924  			s.SetOwnerReferences(util.EnsureOwnerRef(s.GetOwnerReferences(),
   925  				metav1.OwnerReference{
   926  					APIVersion: bootstrapv1.GroupVersion.String(),
   927  					// This owner reference to a different controller should be preserved.
   928  					Kind:               "OtherController",
   929  					Name:               kcp.Name,
   930  					UID:                kcp.UID,
   931  					Controller:         ptr.To(true),
   932  					BlockOwnerDeletion: ptr.To(true),
   933  				},
   934  			))
   935  
   936  			// Store the secret in the certificate.
   937  			c.Secret = s
   938  
   939  			objs = append(objs, s)
   940  		}
   941  
   942  		fakeClient := newFakeClient(objs...)
   943  
   944  		r := KubeadmControlPlaneReconciler{
   945  			Client:              fakeClient,
   946  			SecretCachingClient: fakeClient,
   947  		}
   948  		err := r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner)
   949  		g.Expect(err).ToNot(HaveOccurred())
   950  
   951  		secrets := &corev1.SecretList{}
   952  		g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"testing": "yes"})).To(Succeed())
   953  		for _, secret := range secrets.Items {
   954  			g.Expect(secret.OwnerReferences).To(HaveLen(1))
   955  			g.Expect(secret.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, bootstrapv1.GroupVersion.WithKind("OtherController"))))
   956  		}
   957  	})
   958  }
   959  
   960  func TestReconcileCertificateExpiries(t *testing.T) {
   961  	g := NewWithT(t)
   962  
   963  	preExistingExpiry := time.Now().Add(5 * 24 * time.Hour)
   964  	detectedExpiry := time.Now().Add(25 * 24 * time.Hour)
   965  
   966  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
   967  	kcp := &controlplanev1.KubeadmControlPlane{
   968  		Status: controlplanev1.KubeadmControlPlaneStatus{Initialized: true},
   969  	}
   970  	machineWithoutExpiryAnnotation := &clusterv1.Machine{
   971  		ObjectMeta: metav1.ObjectMeta{
   972  			Name: "machineWithoutExpiryAnnotation",
   973  		},
   974  		Spec: clusterv1.MachineSpec{
   975  			InfrastructureRef: corev1.ObjectReference{
   976  				Kind:       "GenericMachine",
   977  				APIVersion: "generic.io/v1",
   978  				Namespace:  metav1.NamespaceDefault,
   979  				Name:       "machineWithoutExpiryAnnotation-infra",
   980  			},
   981  			Bootstrap: clusterv1.Bootstrap{
   982  				ConfigRef: &corev1.ObjectReference{
   983  					Kind:       "KubeadmConfig",
   984  					APIVersion: bootstrapv1.GroupVersion.String(),
   985  					Namespace:  metav1.NamespaceDefault,
   986  					Name:       "machineWithoutExpiryAnnotation-bootstrap",
   987  				},
   988  			},
   989  		},
   990  		Status: clusterv1.MachineStatus{
   991  			NodeRef: &corev1.ObjectReference{
   992  				Name: "machineWithoutExpiryAnnotation",
   993  			},
   994  		},
   995  	}
   996  	machineWithoutExpiryAnnotationKubeadmConfig := &bootstrapv1.KubeadmConfig{
   997  		ObjectMeta: metav1.ObjectMeta{
   998  			Name: "machineWithoutExpiryAnnotation-bootstrap",
   999  		},
  1000  	}
  1001  	machineWithExpiryAnnotation := &clusterv1.Machine{
  1002  		ObjectMeta: metav1.ObjectMeta{
  1003  			Name: "machineWithExpiryAnnotation",
  1004  		},
  1005  		Spec: clusterv1.MachineSpec{
  1006  			InfrastructureRef: corev1.ObjectReference{
  1007  				Kind:       "GenericMachine",
  1008  				APIVersion: "generic.io/v1",
  1009  				Namespace:  metav1.NamespaceDefault,
  1010  				Name:       "machineWithExpiryAnnotation-infra",
  1011  			},
  1012  			Bootstrap: clusterv1.Bootstrap{
  1013  				ConfigRef: &corev1.ObjectReference{
  1014  					Kind:       "KubeadmConfig",
  1015  					APIVersion: bootstrapv1.GroupVersion.String(),
  1016  					Namespace:  metav1.NamespaceDefault,
  1017  					Name:       "machineWithExpiryAnnotation-bootstrap",
  1018  				},
  1019  			},
  1020  		},
  1021  		Status: clusterv1.MachineStatus{
  1022  			NodeRef: &corev1.ObjectReference{
  1023  				Name: "machineWithExpiryAnnotation",
  1024  			},
  1025  		},
  1026  	}
  1027  	machineWithExpiryAnnotationKubeadmConfig := &bootstrapv1.KubeadmConfig{
  1028  		ObjectMeta: metav1.ObjectMeta{
  1029  			Name: "machineWithExpiryAnnotation-bootstrap",
  1030  			Annotations: map[string]string{
  1031  				clusterv1.MachineCertificatesExpiryDateAnnotation: preExistingExpiry.Format(time.RFC3339),
  1032  			},
  1033  		},
  1034  	}
  1035  	machineWithDeletionTimestamp := &clusterv1.Machine{
  1036  		ObjectMeta: metav1.ObjectMeta{
  1037  			Name:              "machineWithDeletionTimestamp",
  1038  			DeletionTimestamp: &metav1.Time{Time: time.Now()},
  1039  		},
  1040  		Spec: clusterv1.MachineSpec{
  1041  			InfrastructureRef: corev1.ObjectReference{
  1042  				Kind:       "GenericMachine",
  1043  				APIVersion: "generic.io/v1",
  1044  				Namespace:  metav1.NamespaceDefault,
  1045  				Name:       "machineWithDeletionTimestamp-infra",
  1046  			},
  1047  			Bootstrap: clusterv1.Bootstrap{
  1048  				ConfigRef: &corev1.ObjectReference{
  1049  					Kind:       "KubeadmConfig",
  1050  					APIVersion: bootstrapv1.GroupVersion.String(),
  1051  					Namespace:  metav1.NamespaceDefault,
  1052  					Name:       "machineWithDeletionTimestamp-bootstrap",
  1053  				},
  1054  			},
  1055  		},
  1056  		Status: clusterv1.MachineStatus{
  1057  			NodeRef: &corev1.ObjectReference{
  1058  				Name: "machineWithDeletionTimestamp",
  1059  			},
  1060  		},
  1061  	}
  1062  	machineWithDeletionTimestampKubeadmConfig := &bootstrapv1.KubeadmConfig{
  1063  		ObjectMeta: metav1.ObjectMeta{
  1064  			Name: "machineWithDeletionTimestamp-bootstrap",
  1065  		},
  1066  	}
  1067  	machineWithoutNodeRef := &clusterv1.Machine{
  1068  		ObjectMeta: metav1.ObjectMeta{
  1069  			Name: "machineWithoutNodeRef",
  1070  		},
  1071  		Spec: clusterv1.MachineSpec{
  1072  			InfrastructureRef: corev1.ObjectReference{
  1073  				Kind:       "GenericMachine",
  1074  				APIVersion: "generic.io/v1",
  1075  				Namespace:  metav1.NamespaceDefault,
  1076  				Name:       "machineWithoutNodeRef-infra",
  1077  			},
  1078  			Bootstrap: clusterv1.Bootstrap{
  1079  				ConfigRef: &corev1.ObjectReference{
  1080  					Kind:       "KubeadmConfig",
  1081  					APIVersion: bootstrapv1.GroupVersion.String(),
  1082  					Namespace:  metav1.NamespaceDefault,
  1083  					Name:       "machineWithoutNodeRef-bootstrap",
  1084  				},
  1085  			},
  1086  		},
  1087  	}
  1088  	machineWithoutNodeRefKubeadmConfig := &bootstrapv1.KubeadmConfig{
  1089  		ObjectMeta: metav1.ObjectMeta{
  1090  			Name: "machineWithoutNodeRef-bootstrap",
  1091  		},
  1092  	}
  1093  	machineWithoutKubeadmConfig := &clusterv1.Machine{
  1094  		ObjectMeta: metav1.ObjectMeta{
  1095  			Name: "machineWithoutKubeadmConfig",
  1096  		},
  1097  		Spec: clusterv1.MachineSpec{
  1098  			InfrastructureRef: corev1.ObjectReference{
  1099  				Kind:       "GenericMachine",
  1100  				APIVersion: "generic.io/v1",
  1101  				Namespace:  metav1.NamespaceDefault,
  1102  				Name:       "machineWithoutKubeadmConfig-infra",
  1103  			},
  1104  			Bootstrap: clusterv1.Bootstrap{
  1105  				ConfigRef: &corev1.ObjectReference{
  1106  					Kind:       "KubeadmConfig",
  1107  					APIVersion: bootstrapv1.GroupVersion.String(),
  1108  					Namespace:  metav1.NamespaceDefault,
  1109  					Name:       "machineWithoutKubeadmConfig-bootstrap",
  1110  				},
  1111  			},
  1112  		},
  1113  		Status: clusterv1.MachineStatus{
  1114  			NodeRef: &corev1.ObjectReference{
  1115  				Name: "machineWithoutKubeadmConfig",
  1116  			},
  1117  		},
  1118  	}
  1119  
  1120  	ownedMachines := collections.FromMachines(
  1121  		machineWithoutExpiryAnnotation,
  1122  		machineWithExpiryAnnotation,
  1123  		machineWithDeletionTimestamp,
  1124  		machineWithoutNodeRef,
  1125  		machineWithoutKubeadmConfig,
  1126  	)
  1127  
  1128  	fakeClient := newFakeClient(
  1129  		machineWithoutExpiryAnnotationKubeadmConfig,
  1130  		machineWithExpiryAnnotationKubeadmConfig,
  1131  		machineWithDeletionTimestampKubeadmConfig,
  1132  		machineWithoutNodeRefKubeadmConfig,
  1133  	)
  1134  
  1135  	managementCluster := &fakeManagementCluster{
  1136  		Workload: fakeWorkloadCluster{
  1137  			APIServerCertificateExpiry: &detectedExpiry,
  1138  		},
  1139  	}
  1140  
  1141  	r := &KubeadmControlPlaneReconciler{
  1142  		Client:              fakeClient,
  1143  		SecretCachingClient: fakeClient,
  1144  		managementCluster:   managementCluster,
  1145  	}
  1146  
  1147  	controlPlane, err := internal.NewControlPlane(ctx, managementCluster, fakeClient, cluster, kcp, ownedMachines)
  1148  	g.Expect(err).ToNot(HaveOccurred())
  1149  
  1150  	err = r.reconcileCertificateExpiries(ctx, controlPlane)
  1151  	g.Expect(err).ToNot(HaveOccurred())
  1152  
  1153  	// Verify machineWithoutExpiryAnnotation has detectedExpiry.
  1154  	actualKubeadmConfig := bootstrapv1.KubeadmConfig{}
  1155  	err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithoutExpiryAnnotationKubeadmConfig), &actualKubeadmConfig)
  1156  	g.Expect(err).ToNot(HaveOccurred())
  1157  	actualExpiry := actualKubeadmConfig.Annotations[clusterv1.MachineCertificatesExpiryDateAnnotation]
  1158  	g.Expect(actualExpiry).To(Equal(detectedExpiry.Format(time.RFC3339)))
  1159  
  1160  	// Verify machineWithExpiryAnnotation has still preExistingExpiry.
  1161  	err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithExpiryAnnotationKubeadmConfig), &actualKubeadmConfig)
  1162  	g.Expect(err).ToNot(HaveOccurred())
  1163  	actualExpiry = actualKubeadmConfig.Annotations[clusterv1.MachineCertificatesExpiryDateAnnotation]
  1164  	g.Expect(actualExpiry).To(Equal(preExistingExpiry.Format(time.RFC3339)))
  1165  
  1166  	// Verify machineWithDeletionTimestamp has still no expiry annotation.
  1167  	err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithDeletionTimestampKubeadmConfig), &actualKubeadmConfig)
  1168  	g.Expect(err).ToNot(HaveOccurred())
  1169  	g.Expect(actualKubeadmConfig.Annotations).ToNot(ContainElement(clusterv1.MachineCertificatesExpiryDateAnnotation))
  1170  
  1171  	// Verify machineWithoutNodeRef has still no expiry annotation.
  1172  	err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithoutNodeRefKubeadmConfig), &actualKubeadmConfig)
  1173  	g.Expect(err).ToNot(HaveOccurred())
  1174  	g.Expect(actualKubeadmConfig.Annotations).ToNot(ContainElement(clusterv1.MachineCertificatesExpiryDateAnnotation))
  1175  }
  1176  
  1177  func TestReconcileInitializeControlPlane(t *testing.T) {
  1178  	setup := func(t *testing.T, g *WithT) *corev1.Namespace {
  1179  		t.Helper()
  1180  
  1181  		t.Log("Creating the namespace")
  1182  		ns, err := env.CreateNamespace(ctx, "test-kcp-reconcile-initializecontrolplane")
  1183  		g.Expect(err).ToNot(HaveOccurred())
  1184  
  1185  		return ns
  1186  	}
  1187  
  1188  	teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) {
  1189  		t.Helper()
  1190  
  1191  		t.Log("Deleting the namespace")
  1192  		g.Expect(env.Delete(ctx, ns)).To(Succeed())
  1193  	}
  1194  
  1195  	g := NewWithT(t)
  1196  	namespace := setup(t, g)
  1197  	defer teardown(t, g, namespace)
  1198  
  1199  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: namespace.Name})
  1200  	cluster.Spec = clusterv1.ClusterSpec{
  1201  		ControlPlaneEndpoint: clusterv1.APIEndpoint{
  1202  			Host: "test.local",
  1203  			Port: 9999,
  1204  		},
  1205  	}
  1206  	g.Expect(env.Create(ctx, cluster)).To(Succeed())
  1207  	patchHelper, err := patch.NewHelper(cluster, env)
  1208  	g.Expect(err).ToNot(HaveOccurred())
  1209  	cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true}
  1210  	g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed())
  1211  
  1212  	genericInfrastructureMachineTemplate := &unstructured.Unstructured{
  1213  		Object: map[string]interface{}{
  1214  			"kind":       "GenericInfrastructureMachineTemplate",
  1215  			"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  1216  			"metadata": map[string]interface{}{
  1217  				"name":      "infra-foo",
  1218  				"namespace": cluster.Namespace,
  1219  			},
  1220  			"spec": map[string]interface{}{
  1221  				"template": map[string]interface{}{
  1222  					"spec": map[string]interface{}{
  1223  						"hello": "world",
  1224  					},
  1225  				},
  1226  			},
  1227  		},
  1228  	}
  1229  	g.Expect(env.Create(ctx, genericInfrastructureMachineTemplate)).To(Succeed())
  1230  
  1231  	kcp := &controlplanev1.KubeadmControlPlane{
  1232  		ObjectMeta: metav1.ObjectMeta{
  1233  			Namespace: cluster.Namespace,
  1234  			Name:      "foo",
  1235  			OwnerReferences: []metav1.OwnerReference{
  1236  				{
  1237  					Kind:       "Cluster",
  1238  					APIVersion: clusterv1.GroupVersion.String(),
  1239  					Name:       cluster.Name,
  1240  					UID:        cluster.UID,
  1241  				},
  1242  			},
  1243  		},
  1244  		Spec: controlplanev1.KubeadmControlPlaneSpec{
  1245  			Replicas: nil,
  1246  			Version:  "v1.16.6",
  1247  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
  1248  				InfrastructureRef: corev1.ObjectReference{
  1249  					Kind:       genericInfrastructureMachineTemplate.GetKind(),
  1250  					APIVersion: genericInfrastructureMachineTemplate.GetAPIVersion(),
  1251  					Name:       genericInfrastructureMachineTemplate.GetName(),
  1252  					Namespace:  cluster.Namespace,
  1253  				},
  1254  			},
  1255  			KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{},
  1256  		},
  1257  	}
  1258  	g.Expect(env.Create(ctx, kcp)).To(Succeed())
  1259  
  1260  	corednsCM := &corev1.ConfigMap{
  1261  		ObjectMeta: metav1.ObjectMeta{
  1262  			Name:      "coredns",
  1263  			Namespace: namespace.Name,
  1264  		},
  1265  		Data: map[string]string{
  1266  			"Corefile": "original-core-file",
  1267  		},
  1268  	}
  1269  	g.Expect(env.Create(ctx, corednsCM)).To(Succeed())
  1270  
  1271  	kubeadmCM := &corev1.ConfigMap{
  1272  		ObjectMeta: metav1.ObjectMeta{
  1273  			Name:      "kubeadm-config",
  1274  			Namespace: namespace.Name,
  1275  		},
  1276  		Data: map[string]string{
  1277  			"ClusterConfiguration": `apiServer:
  1278  dns:
  1279    type: CoreDNS
  1280  imageRepository: registry.k8s.io
  1281  kind: ClusterConfiguration
  1282  kubernetesVersion: metav1.16.1
  1283  `,
  1284  		},
  1285  	}
  1286  	g.Expect(env.Create(ctx, kubeadmCM)).To(Succeed())
  1287  
  1288  	corednsDepl := &appsv1.Deployment{
  1289  		ObjectMeta: metav1.ObjectMeta{
  1290  			Name:      "coredns",
  1291  			Namespace: namespace.Name,
  1292  		},
  1293  		Spec: appsv1.DeploymentSpec{
  1294  			Selector: &metav1.LabelSelector{
  1295  				MatchLabels: map[string]string{
  1296  					"coredns": "",
  1297  				},
  1298  			},
  1299  			Template: corev1.PodTemplateSpec{
  1300  				ObjectMeta: metav1.ObjectMeta{
  1301  					Name: "coredns",
  1302  					Labels: map[string]string{
  1303  						"coredns": "",
  1304  					},
  1305  				},
  1306  				Spec: corev1.PodSpec{
  1307  					Containers: []corev1.Container{{
  1308  						Name:  "coredns",
  1309  						Image: "registry.k8s.io/coredns:1.6.2",
  1310  					}},
  1311  				},
  1312  			},
  1313  		},
  1314  	}
  1315  	g.Expect(env.Create(ctx, corednsDepl)).To(Succeed())
  1316  
  1317  	expectedLabels := map[string]string{clusterv1.ClusterNameLabel: "foo"}
  1318  	r := &KubeadmControlPlaneReconciler{
  1319  		Client:              env,
  1320  		SecretCachingClient: secretCachingClient,
  1321  		recorder:            record.NewFakeRecorder(32),
  1322  		managementCluster: &fakeManagementCluster{
  1323  			Management: &internal.Management{Client: env},
  1324  			Workload: fakeWorkloadCluster{
  1325  				Workload: &internal.Workload{
  1326  					Client: env,
  1327  				},
  1328  				Status: internal.ClusterStatus{},
  1329  			},
  1330  		},
  1331  		managementClusterUncached: &fakeManagementCluster{
  1332  			Management: &internal.Management{Client: env},
  1333  			Workload: fakeWorkloadCluster{
  1334  				Workload: &internal.Workload{
  1335  					Client: env,
  1336  				},
  1337  				Status: internal.ClusterStatus{},
  1338  			},
  1339  		},
  1340  		ssaCache: ssa.NewCache(),
  1341  	}
  1342  
  1343  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
  1344  	g.Expect(err).ToNot(HaveOccurred())
  1345  	// this first requeue is to add finalizer
  1346  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
  1347  	g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed())
  1348  	g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  1349  
  1350  	g.Eventually(func(g Gomega) {
  1351  		_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
  1352  		g.Expect(err).ToNot(HaveOccurred())
  1353  		g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Name: kcp.Name, Namespace: kcp.Namespace}, kcp)).To(Succeed())
  1354  		// Expect the referenced infrastructure template to have a Cluster Owner Reference.
  1355  		g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(genericInfrastructureMachineTemplate), genericInfrastructureMachineTemplate)).To(Succeed())
  1356  		g.Expect(genericInfrastructureMachineTemplate.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{
  1357  			APIVersion: clusterv1.GroupVersion.String(),
  1358  			Kind:       "Cluster",
  1359  			Name:       cluster.Name,
  1360  			UID:        cluster.UID,
  1361  		}))
  1362  
  1363  		// Always expect that the Finalizer is set on the passed in resource
  1364  		g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  1365  
  1366  		g.Expect(kcp.Status.Selector).NotTo(BeEmpty())
  1367  		g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1))
  1368  		g.Expect(conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue())
  1369  
  1370  		s, err := secret.GetFromNamespacedName(ctx, env, client.ObjectKey{Namespace: cluster.Namespace, Name: "foo"}, secret.ClusterCA)
  1371  		g.Expect(err).ToNot(HaveOccurred())
  1372  		g.Expect(s).NotTo(BeNil())
  1373  		g.Expect(s.Data).NotTo(BeEmpty())
  1374  		g.Expect(s.Labels).To(Equal(expectedLabels))
  1375  
  1376  		k, err := kubeconfig.FromSecret(ctx, env, util.ObjectKey(cluster))
  1377  		g.Expect(err).ToNot(HaveOccurred())
  1378  		g.Expect(k).NotTo(BeEmpty())
  1379  
  1380  		machineList := &clusterv1.MachineList{}
  1381  		g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
  1382  		g.Expect(machineList.Items).To(HaveLen(1))
  1383  
  1384  		machine := machineList.Items[0]
  1385  		g.Expect(machine.Name).To(HavePrefix(kcp.Name))
  1386  		// Newly cloned infra objects should have the infraref annotation.
  1387  		infraObj, err := external.Get(ctx, r.Client, &machine.Spec.InfrastructureRef, machine.Spec.InfrastructureRef.Namespace)
  1388  		g.Expect(err).ToNot(HaveOccurred())
  1389  		g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName()))
  1390  		g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String()))
  1391  	}, 30*time.Second).Should(Succeed())
  1392  }
  1393  
  1394  func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) {
  1395  	setup := func(t *testing.T, g *WithT) (*corev1.Namespace, *clusterv1.Cluster) {
  1396  		t.Helper()
  1397  
  1398  		t.Log("Creating the namespace")
  1399  		ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-sync-machines")
  1400  		g.Expect(err).ToNot(HaveOccurred())
  1401  
  1402  		t.Log("Creating the Cluster")
  1403  		cluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: ns.Name, Name: "test-cluster"}}
  1404  		g.Expect(env.Create(ctx, cluster)).To(Succeed())
  1405  
  1406  		t.Log("Creating the Cluster Kubeconfig Secret")
  1407  		g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed())
  1408  
  1409  		return ns, cluster
  1410  	}
  1411  
  1412  	teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace, cluster *clusterv1.Cluster) {
  1413  		t.Helper()
  1414  
  1415  		t.Log("Deleting the Cluster")
  1416  		g.Expect(env.Delete(ctx, cluster)).To(Succeed())
  1417  		t.Log("Deleting the namespace")
  1418  		g.Expect(env.Delete(ctx, ns)).To(Succeed())
  1419  	}
  1420  
  1421  	g := NewWithT(t)
  1422  	namespace, testCluster := setup(t, g)
  1423  	defer teardown(t, g, namespace, testCluster)
  1424  
  1425  	classicManager := "manager"
  1426  	duration5s := &metav1.Duration{Duration: 5 * time.Second}
  1427  	duration10s := &metav1.Duration{Duration: 10 * time.Second}
  1428  
  1429  	// Existing InfraMachine
  1430  	infraMachineSpec := map[string]interface{}{
  1431  		"infra-field": "infra-value",
  1432  	}
  1433  	existingInfraMachine := &unstructured.Unstructured{
  1434  		Object: map[string]interface{}{
  1435  			"kind":       "GenericInfrastructureMachine",
  1436  			"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  1437  			"metadata": map[string]interface{}{
  1438  				"name":      "existing-inframachine",
  1439  				"namespace": testCluster.Namespace,
  1440  				"labels": map[string]string{
  1441  					"preserved-label": "preserved-value",
  1442  					"dropped-label":   "dropped-value",
  1443  					"modified-label":  "modified-value",
  1444  				},
  1445  				"annotations": map[string]string{
  1446  					"preserved-annotation": "preserved-value",
  1447  					"dropped-annotation":   "dropped-value",
  1448  					"modified-annotation":  "modified-value",
  1449  				},
  1450  			},
  1451  			"spec": infraMachineSpec,
  1452  		},
  1453  	}
  1454  	infraMachineRef := &corev1.ObjectReference{
  1455  		Kind:       "GenericInfrastructureMachine",
  1456  		Namespace:  namespace.Name,
  1457  		Name:       "existing-inframachine",
  1458  		APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  1459  	}
  1460  	// Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0.
  1461  	g.Expect(env.Create(ctx, existingInfraMachine, client.FieldOwner("manager"))).To(Succeed())
  1462  
  1463  	// Existing KubeadmConfig
  1464  	bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{
  1465  		Users:             []bootstrapv1.User{{Name: "test-user"}},
  1466  		JoinConfiguration: &bootstrapv1.JoinConfiguration{},
  1467  	}
  1468  	existingKubeadmConfig := &bootstrapv1.KubeadmConfig{
  1469  		TypeMeta: metav1.TypeMeta{
  1470  			Kind:       "KubeadmConfig",
  1471  			APIVersion: bootstrapv1.GroupVersion.String(),
  1472  		},
  1473  		ObjectMeta: metav1.ObjectMeta{
  1474  			Name:      "existing-kubeadmconfig",
  1475  			Namespace: namespace.Name,
  1476  			Labels: map[string]string{
  1477  				"preserved-label": "preserved-value",
  1478  				"dropped-label":   "dropped-value",
  1479  				"modified-label":  "modified-value",
  1480  			},
  1481  			Annotations: map[string]string{
  1482  				"preserved-annotation": "preserved-value",
  1483  				"dropped-annotation":   "dropped-value",
  1484  				"modified-annotation":  "modified-value",
  1485  			},
  1486  		},
  1487  		Spec: *bootstrapSpec,
  1488  	}
  1489  	bootstrapRef := &corev1.ObjectReference{
  1490  		Kind:       "KubeadmConfig",
  1491  		Namespace:  namespace.Name,
  1492  		Name:       "existing-kubeadmconfig",
  1493  		APIVersion: bootstrapv1.GroupVersion.String(),
  1494  	}
  1495  	// Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0.
  1496  	g.Expect(env.Create(ctx, existingKubeadmConfig, client.FieldOwner("manager"))).To(Succeed())
  1497  
  1498  	// Existing Machine to validate in-place mutation
  1499  	fd := ptr.To("fd1")
  1500  	inPlaceMutatingMachine := &clusterv1.Machine{
  1501  		TypeMeta: metav1.TypeMeta{
  1502  			Kind:       "Machine",
  1503  			APIVersion: clusterv1.GroupVersion.String(),
  1504  		},
  1505  		ObjectMeta: metav1.ObjectMeta{
  1506  			Name:      "existing-machine",
  1507  			Namespace: namespace.Name,
  1508  			Labels: map[string]string{
  1509  				"preserved-label": "preserved-value",
  1510  				"dropped-label":   "dropped-value",
  1511  				"modified-label":  "modified-value",
  1512  			},
  1513  			Annotations: map[string]string{
  1514  				"preserved-annotation": "preserved-value",
  1515  				"dropped-annotation":   "dropped-value",
  1516  				"modified-annotation":  "modified-value",
  1517  			},
  1518  		},
  1519  		Spec: clusterv1.MachineSpec{
  1520  			ClusterName: testCluster.Name,
  1521  			Bootstrap: clusterv1.Bootstrap{
  1522  				ConfigRef: bootstrapRef,
  1523  			},
  1524  			InfrastructureRef:       *infraMachineRef,
  1525  			Version:                 ptr.To("v1.25.3"),
  1526  			FailureDomain:           fd,
  1527  			ProviderID:              ptr.To("provider-id"),
  1528  			NodeDrainTimeout:        duration5s,
  1529  			NodeVolumeDetachTimeout: duration5s,
  1530  			NodeDeletionTimeout:     duration5s,
  1531  		},
  1532  	}
  1533  	// Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0.
  1534  	g.Expect(env.Create(ctx, inPlaceMutatingMachine, client.FieldOwner("manager"))).To(Succeed())
  1535  
  1536  	// Existing machine that is in deleting state
  1537  	deletingMachine := &clusterv1.Machine{
  1538  		TypeMeta: metav1.TypeMeta{
  1539  			APIVersion: clusterv1.GroupVersion.String(),
  1540  			Kind:       "Machine",
  1541  		},
  1542  		ObjectMeta: metav1.ObjectMeta{
  1543  			Name:        "deleting-machine",
  1544  			Namespace:   namespace.Name,
  1545  			Labels:      map[string]string{},
  1546  			Annotations: map[string]string{},
  1547  			Finalizers:  []string{"testing-finalizer"},
  1548  		},
  1549  		Spec: clusterv1.MachineSpec{
  1550  			ClusterName: testCluster.Name,
  1551  			InfrastructureRef: corev1.ObjectReference{
  1552  				Namespace: namespace.Name,
  1553  			},
  1554  			Bootstrap: clusterv1.Bootstrap{
  1555  				DataSecretName: ptr.To("machine-bootstrap-secret"),
  1556  			},
  1557  		},
  1558  	}
  1559  	g.Expect(env.Create(ctx, deletingMachine, client.FieldOwner(classicManager))).To(Succeed())
  1560  	// Delete the machine to put it in the deleting state
  1561  	g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed())
  1562  	// Wait till the machine is marked for deletion
  1563  	g.Eventually(func() bool {
  1564  		if err := env.Get(ctx, client.ObjectKeyFromObject(deletingMachine), deletingMachine); err != nil {
  1565  			return false
  1566  		}
  1567  		return !deletingMachine.DeletionTimestamp.IsZero()
  1568  	}, 30*time.Second).Should(BeTrue())
  1569  
  1570  	// Existing machine that has a InfrastructureRef which does not exist.
  1571  	nilInfraMachineMachine := &clusterv1.Machine{
  1572  		TypeMeta: metav1.TypeMeta{
  1573  			APIVersion: clusterv1.GroupVersion.String(),
  1574  			Kind:       "Machine",
  1575  		},
  1576  		ObjectMeta: metav1.ObjectMeta{
  1577  			Name:        "nil-infra-machine-machine",
  1578  			Namespace:   namespace.Name,
  1579  			Labels:      map[string]string{},
  1580  			Annotations: map[string]string{},
  1581  			Finalizers:  []string{"testing-finalizer"},
  1582  		},
  1583  		Spec: clusterv1.MachineSpec{
  1584  			ClusterName: testCluster.Name,
  1585  			InfrastructureRef: corev1.ObjectReference{
  1586  				Namespace: namespace.Name,
  1587  			},
  1588  			Bootstrap: clusterv1.Bootstrap{
  1589  				DataSecretName: ptr.To("machine-bootstrap-secret"),
  1590  			},
  1591  		},
  1592  	}
  1593  	g.Expect(env.Create(ctx, nilInfraMachineMachine, client.FieldOwner(classicManager))).To(Succeed())
  1594  	// Delete the machine to put it in the deleting state
  1595  
  1596  	kcp := &controlplanev1.KubeadmControlPlane{
  1597  		TypeMeta: metav1.TypeMeta{
  1598  			Kind:       "KubeadmControlPlane",
  1599  			APIVersion: controlplanev1.GroupVersion.String(),
  1600  		},
  1601  		ObjectMeta: metav1.ObjectMeta{
  1602  			UID:       types.UID("abc-123-control-plane"),
  1603  			Name:      "existing-kcp",
  1604  			Namespace: namespace.Name,
  1605  		},
  1606  		Spec: controlplanev1.KubeadmControlPlaneSpec{
  1607  			Version:           "v1.26.1",
  1608  			KubeadmConfigSpec: *bootstrapSpec,
  1609  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
  1610  				ObjectMeta: clusterv1.ObjectMeta{
  1611  					Labels: map[string]string{
  1612  						"preserved-label": "preserved-value", // Label will be preserved while testing in-place mutation.
  1613  						"dropped-label":   "dropped-value",   // Label will be dropped while testing in-place mutation.
  1614  						"modified-label":  "modified-value",  // Label value will be modified while testing in-place mutation.
  1615  					},
  1616  					Annotations: map[string]string{
  1617  						"preserved-annotation": "preserved-value", // Annotation will be preserved while testing in-place mutation.
  1618  						"dropped-annotation":   "dropped-value",   // Annotation will be dropped while testing in-place mutation.
  1619  						"modified-annotation":  "modified-value",  // Annotation value will be modified while testing in-place mutation.
  1620  					},
  1621  				},
  1622  				InfrastructureRef: corev1.ObjectReference{
  1623  					Kind:       "GenericInfrastructureMachineTemplate",
  1624  					Namespace:  namespace.Name,
  1625  					Name:       "infra-foo",
  1626  					APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  1627  				},
  1628  				NodeDrainTimeout:        duration5s,
  1629  				NodeVolumeDetachTimeout: duration5s,
  1630  				NodeDeletionTimeout:     duration5s,
  1631  			},
  1632  		},
  1633  	}
  1634  
  1635  	controlPlane := &internal.ControlPlane{
  1636  		KCP:     kcp,
  1637  		Cluster: testCluster,
  1638  		Machines: collections.Machines{
  1639  			inPlaceMutatingMachine.Name: inPlaceMutatingMachine,
  1640  			deletingMachine.Name:        deletingMachine,
  1641  			nilInfraMachineMachine.Name: nilInfraMachineMachine,
  1642  		},
  1643  		KubeadmConfigs: map[string]*bootstrapv1.KubeadmConfig{
  1644  			inPlaceMutatingMachine.Name: existingKubeadmConfig,
  1645  			deletingMachine.Name:        nil,
  1646  		},
  1647  		InfraResources: map[string]*unstructured.Unstructured{
  1648  			inPlaceMutatingMachine.Name: existingInfraMachine,
  1649  			deletingMachine.Name:        nil,
  1650  		},
  1651  	}
  1652  
  1653  	//
  1654  	// Verify Managed Fields
  1655  	//
  1656  
  1657  	// Run syncMachines to clean up managed fields and have proper field ownership
  1658  	// for Machines, InfrastructureMachines and KubeadmConfigs.
  1659  	reconciler := &KubeadmControlPlaneReconciler{
  1660  		Client:              env,
  1661  		SecretCachingClient: secretCachingClient,
  1662  		ssaCache:            ssa.NewCache(),
  1663  	}
  1664  	g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed())
  1665  
  1666  	// The inPlaceMutatingMachine should have cleaned up managed fields.
  1667  	updatedInplaceMutatingMachine := inPlaceMutatingMachine.DeepCopy()
  1668  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed())
  1669  	// Verify ManagedFields
  1670  	g.Expect(updatedInplaceMutatingMachine.ManagedFields).Should(
  1671  		ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)),
  1672  		"in-place mutable machine should contain an entry for SSA manager",
  1673  	)
  1674  	g.Expect(updatedInplaceMutatingMachine.ManagedFields).ShouldNot(
  1675  		ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)),
  1676  		"in-place mutable machine should not contain an entry for old manager",
  1677  	)
  1678  
  1679  	// The InfrastructureMachine should have ownership of "labels" and "annotations" transferred to
  1680  	// "capi-kubeadmcontrolplane" manager.
  1681  	updatedInfraMachine := existingInfraMachine.DeepCopy()
  1682  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed())
  1683  
  1684  	// Verify ManagedFields
  1685  	g.Expect(updatedInfraMachine.GetManagedFields()).Should(
  1686  		ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"}))
  1687  	g.Expect(updatedInfraMachine.GetManagedFields()).Should(
  1688  		ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"}))
  1689  	g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot(
  1690  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"}))
  1691  	g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot(
  1692  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"}))
  1693  	// Verify ownership of other fields is not changed.
  1694  	g.Expect(updatedInfraMachine.GetManagedFields()).Should(
  1695  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"}))
  1696  
  1697  	// The KubeadmConfig should have ownership of "labels" and "annotations" transferred to
  1698  	// "capi-kubeadmcontrolplane" manager.
  1699  	updatedKubeadmConfig := existingKubeadmConfig.DeepCopy()
  1700  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed())
  1701  
  1702  	// Verify ManagedFields
  1703  	g.Expect(updatedKubeadmConfig.GetManagedFields()).Should(
  1704  		ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"}))
  1705  	g.Expect(updatedKubeadmConfig.GetManagedFields()).Should(
  1706  		ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"}))
  1707  	g.Expect(updatedKubeadmConfig.GetManagedFields()).ShouldNot(
  1708  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"}))
  1709  	g.Expect(updatedKubeadmConfig.GetManagedFields()).ShouldNot(
  1710  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"}))
  1711  	// Verify ownership of other fields is not changed.
  1712  	g.Expect(updatedKubeadmConfig.GetManagedFields()).Should(
  1713  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"}))
  1714  
  1715  	//
  1716  	// Verify In-place mutating fields
  1717  	//
  1718  
  1719  	// Update KCP and verify the in-place mutating fields are propagated.
  1720  	kcp.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{
  1721  		"preserved-label": "preserved-value",  // Keep the label and value as is
  1722  		"modified-label":  "modified-value-2", // Modify the value of the label
  1723  		// Drop "dropped-label"
  1724  	}
  1725  	expectedLabels := map[string]string{
  1726  		"preserved-label":                      "preserved-value",
  1727  		"modified-label":                       "modified-value-2",
  1728  		clusterv1.ClusterNameLabel:             testCluster.Name,
  1729  		clusterv1.MachineControlPlaneLabel:     "",
  1730  		clusterv1.MachineControlPlaneNameLabel: kcp.Name,
  1731  	}
  1732  	kcp.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{
  1733  		"preserved-annotation": "preserved-value",  // Keep the annotation and value as is
  1734  		"modified-annotation":  "modified-value-2", // Modify the value of the annotation
  1735  		// Drop "dropped-annotation"
  1736  	}
  1737  	kcp.Spec.MachineTemplate.NodeDrainTimeout = duration10s
  1738  	kcp.Spec.MachineTemplate.NodeDeletionTimeout = duration10s
  1739  	kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout = duration10s
  1740  
  1741  	// Use the updated KCP.
  1742  	controlPlane.KCP = kcp
  1743  	g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed())
  1744  
  1745  	// Verify in-place mutable fields are updated on the Machine.
  1746  	updatedInplaceMutatingMachine = inPlaceMutatingMachine.DeepCopy()
  1747  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed())
  1748  	// Verify Labels
  1749  	g.Expect(updatedInplaceMutatingMachine.Labels).Should(Equal(expectedLabels))
  1750  	// Verify Annotations
  1751  	g.Expect(updatedInplaceMutatingMachine.Annotations).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations))
  1752  	// Verify Node timeout values
  1753  	g.Expect(updatedInplaceMutatingMachine.Spec.NodeDrainTimeout).Should(And(
  1754  		Not(BeNil()),
  1755  		HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDrainTimeout)),
  1756  	))
  1757  	g.Expect(updatedInplaceMutatingMachine.Spec.NodeDeletionTimeout).Should(And(
  1758  		Not(BeNil()),
  1759  		HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDeletionTimeout)),
  1760  	))
  1761  	g.Expect(updatedInplaceMutatingMachine.Spec.NodeVolumeDetachTimeout).Should(And(
  1762  		Not(BeNil()),
  1763  		HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout)),
  1764  	))
  1765  	// Verify that the non in-place mutating fields remain the same.
  1766  	g.Expect(updatedInplaceMutatingMachine.Spec.FailureDomain).Should(Equal(inPlaceMutatingMachine.Spec.FailureDomain))
  1767  	g.Expect(updatedInplaceMutatingMachine.Spec.ProviderID).Should(Equal(inPlaceMutatingMachine.Spec.ProviderID))
  1768  	g.Expect(updatedInplaceMutatingMachine.Spec.Version).Should(Equal(inPlaceMutatingMachine.Spec.Version))
  1769  	g.Expect(updatedInplaceMutatingMachine.Spec.InfrastructureRef).Should(BeComparableTo(inPlaceMutatingMachine.Spec.InfrastructureRef))
  1770  	g.Expect(updatedInplaceMutatingMachine.Spec.Bootstrap).Should(BeComparableTo(inPlaceMutatingMachine.Spec.Bootstrap))
  1771  
  1772  	// Verify in-place mutable fields are updated on InfrastructureMachine
  1773  	updatedInfraMachine = existingInfraMachine.DeepCopy()
  1774  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed())
  1775  	// Verify Labels
  1776  	g.Expect(updatedInfraMachine.GetLabels()).Should(Equal(expectedLabels))
  1777  	// Verify Annotations
  1778  	g.Expect(updatedInfraMachine.GetAnnotations()).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations))
  1779  	// Verify spec remains the same
  1780  	g.Expect(updatedInfraMachine.Object).Should(HaveKeyWithValue("spec", infraMachineSpec))
  1781  
  1782  	// Verify in-place mutable fields are updated on the KubeadmConfig.
  1783  	updatedKubeadmConfig = existingKubeadmConfig.DeepCopy()
  1784  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed())
  1785  	// Verify Labels
  1786  	g.Expect(updatedKubeadmConfig.GetLabels()).Should(Equal(expectedLabels))
  1787  	// Verify Annotations
  1788  	g.Expect(updatedKubeadmConfig.GetAnnotations()).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations))
  1789  	// Verify spec remains the same
  1790  	g.Expect(updatedKubeadmConfig.Spec).Should(BeComparableTo(existingKubeadmConfig.Spec))
  1791  
  1792  	// The deleting machine should not change.
  1793  	updatedDeletingMachine := deletingMachine.DeepCopy()
  1794  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed())
  1795  
  1796  	// Verify ManagedFields
  1797  	g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot(
  1798  		ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)),
  1799  		"deleting machine should not contain an entry for SSA manager",
  1800  	)
  1801  	g.Expect(updatedDeletingMachine.ManagedFields).Should(
  1802  		ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate)),
  1803  		"in-place mutable machine should still contain an entry for old manager",
  1804  	)
  1805  
  1806  	// Verify the machine labels and annotations are unchanged.
  1807  	g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels))
  1808  	g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations))
  1809  	// Verify the machine spec is unchanged.
  1810  	g.Expect(updatedDeletingMachine.Spec).Should(BeComparableTo(deletingMachine.Spec))
  1811  }
  1812  
  1813  func TestKubeadmControlPlaneReconciler_updateCoreDNS(t *testing.T) {
  1814  	// TODO: (wfernandes) This test could use some refactor love.
  1815  
  1816  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
  1817  	kcp := &controlplanev1.KubeadmControlPlane{
  1818  		ObjectMeta: metav1.ObjectMeta{
  1819  			Namespace: cluster.Namespace,
  1820  			Name:      "foo",
  1821  		},
  1822  		Spec: controlplanev1.KubeadmControlPlaneSpec{
  1823  			Replicas: nil,
  1824  			Version:  "v1.16.6",
  1825  			KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
  1826  				ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
  1827  					DNS: bootstrapv1.DNS{
  1828  						ImageMeta: bootstrapv1.ImageMeta{
  1829  							ImageRepository: "registry.k8s.io",
  1830  							ImageTag:        "1.7.2",
  1831  						},
  1832  					},
  1833  				},
  1834  			},
  1835  		},
  1836  	}
  1837  	depl := &appsv1.Deployment{
  1838  		ObjectMeta: metav1.ObjectMeta{
  1839  			Name:      "coredns",
  1840  			Namespace: metav1.NamespaceSystem,
  1841  		},
  1842  		Spec: appsv1.DeploymentSpec{
  1843  			Template: corev1.PodTemplateSpec{
  1844  				ObjectMeta: metav1.ObjectMeta{
  1845  					Name: "coredns",
  1846  				},
  1847  				Spec: corev1.PodSpec{
  1848  					Containers: []corev1.Container{{
  1849  						Name:  "coredns",
  1850  						Image: "registry.k8s.io/coredns:1.6.2",
  1851  					}},
  1852  					Volumes: []corev1.Volume{{
  1853  						Name: "config-volume",
  1854  						VolumeSource: corev1.VolumeSource{
  1855  							ConfigMap: &corev1.ConfigMapVolumeSource{
  1856  								LocalObjectReference: corev1.LocalObjectReference{
  1857  									Name: "coredns",
  1858  								},
  1859  								Items: []corev1.KeyToPath{{
  1860  									Key:  "Corefile",
  1861  									Path: "Corefile",
  1862  								}},
  1863  							},
  1864  						},
  1865  					}},
  1866  				},
  1867  			},
  1868  		},
  1869  	}
  1870  	originalCorefile := "original core file"
  1871  	corednsCM := &corev1.ConfigMap{
  1872  		ObjectMeta: metav1.ObjectMeta{
  1873  			Name:      "coredns",
  1874  			Namespace: metav1.NamespaceSystem,
  1875  		},
  1876  		Data: map[string]string{
  1877  			"Corefile": originalCorefile,
  1878  		},
  1879  	}
  1880  
  1881  	kubeadmCM := &corev1.ConfigMap{
  1882  		ObjectMeta: metav1.ObjectMeta{
  1883  			Name:      "kubeadm-config",
  1884  			Namespace: metav1.NamespaceSystem,
  1885  		},
  1886  		Data: map[string]string{
  1887  			"ClusterConfiguration": `apiServer:
  1888  dns:
  1889    type: CoreDNS
  1890  imageRepository: registry.k8s.io
  1891  kind: ClusterConfiguration
  1892  kubernetesVersion: metav1.16.1`,
  1893  		},
  1894  	}
  1895  
  1896  	t.Run("updates configmaps and deployments successfully", func(t *testing.T) {
  1897  		t.Skip("Updating the corefile, after updating controller runtime somehow makes this test fail in a conflict, needs investigation")
  1898  
  1899  		g := NewWithT(t)
  1900  		objs := []client.Object{
  1901  			cluster.DeepCopy(),
  1902  			kcp.DeepCopy(),
  1903  			depl.DeepCopy(),
  1904  			corednsCM.DeepCopy(),
  1905  			kubeadmCM.DeepCopy(),
  1906  		}
  1907  		fakeClient := newFakeClient(objs...)
  1908  
  1909  		workloadCluster := &fakeWorkloadCluster{
  1910  			Workload: &internal.Workload{
  1911  				Client: fakeClient,
  1912  				CoreDNSMigrator: &fakeMigrator{
  1913  					migratedCorefile: "new core file",
  1914  				},
  1915  			},
  1916  		}
  1917  
  1918  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  1919  
  1920  		var actualCoreDNSCM corev1.ConfigMap
  1921  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSCM)).To(Succeed())
  1922  		g.Expect(actualCoreDNSCM.Data).To(HaveLen(2))
  1923  		g.Expect(actualCoreDNSCM.Data).To(HaveKeyWithValue("Corefile", "new core file"))
  1924  		g.Expect(actualCoreDNSCM.Data).To(HaveKeyWithValue("Corefile-backup", originalCorefile))
  1925  
  1926  		var actualKubeadmConfig corev1.ConfigMap
  1927  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "kubeadm-config", Namespace: metav1.NamespaceSystem}, &actualKubeadmConfig)).To(Succeed())
  1928  		g.Expect(actualKubeadmConfig.Data).To(HaveKey("ClusterConfiguration"))
  1929  		g.Expect(actualKubeadmConfig.Data["ClusterConfiguration"]).To(ContainSubstring("1.7.2"))
  1930  
  1931  		expectedVolume := corev1.Volume{
  1932  			Name: "config-volume",
  1933  			VolumeSource: corev1.VolumeSource{
  1934  				ConfigMap: &corev1.ConfigMapVolumeSource{
  1935  					LocalObjectReference: corev1.LocalObjectReference{
  1936  						Name: "coredns",
  1937  					},
  1938  					Items: []corev1.KeyToPath{{
  1939  						Key:  "Corefile",
  1940  						Path: "Corefile",
  1941  					}},
  1942  				},
  1943  			},
  1944  		}
  1945  		var actualCoreDNSDeployment appsv1.Deployment
  1946  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSDeployment)).To(Succeed())
  1947  		g.Expect(actualCoreDNSDeployment.Spec.Template.Spec.Containers[0].Image).To(Equal("registry.k8s.io/coredns:1.7.2"))
  1948  		g.Expect(actualCoreDNSDeployment.Spec.Template.Spec.Volumes).To(ConsistOf(expectedVolume))
  1949  	})
  1950  
  1951  	t.Run("returns no error when no ClusterConfiguration is specified", func(t *testing.T) {
  1952  		g := NewWithT(t)
  1953  		kcp := kcp.DeepCopy()
  1954  		kcp.Spec.KubeadmConfigSpec.ClusterConfiguration = nil
  1955  
  1956  		objs := []client.Object{
  1957  			cluster.DeepCopy(),
  1958  			kcp,
  1959  			depl.DeepCopy(),
  1960  			corednsCM.DeepCopy(),
  1961  			kubeadmCM.DeepCopy(),
  1962  		}
  1963  
  1964  		fakeClient := newFakeClient(objs...)
  1965  
  1966  		workloadCluster := fakeWorkloadCluster{
  1967  			Workload: &internal.Workload{
  1968  				Client: fakeClient,
  1969  				CoreDNSMigrator: &fakeMigrator{
  1970  					migratedCorefile: "new core file",
  1971  				},
  1972  			},
  1973  		}
  1974  
  1975  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  1976  	})
  1977  
  1978  	t.Run("should not return an error when there is no CoreDNS configmap", func(t *testing.T) {
  1979  		g := NewWithT(t)
  1980  		objs := []client.Object{
  1981  			cluster.DeepCopy(),
  1982  			kcp.DeepCopy(),
  1983  			depl.DeepCopy(),
  1984  			kubeadmCM.DeepCopy(),
  1985  		}
  1986  
  1987  		fakeClient := newFakeClient(objs...)
  1988  		workloadCluster := fakeWorkloadCluster{
  1989  			Workload: &internal.Workload{
  1990  				Client: fakeClient,
  1991  				CoreDNSMigrator: &fakeMigrator{
  1992  					migratedCorefile: "new core file",
  1993  				},
  1994  			},
  1995  		}
  1996  
  1997  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  1998  	})
  1999  
  2000  	t.Run("should not return an error when there is no CoreDNS deployment", func(t *testing.T) {
  2001  		g := NewWithT(t)
  2002  		objs := []client.Object{
  2003  			cluster.DeepCopy(),
  2004  			kcp.DeepCopy(),
  2005  			corednsCM.DeepCopy(),
  2006  			kubeadmCM.DeepCopy(),
  2007  		}
  2008  
  2009  		fakeClient := newFakeClient(objs...)
  2010  
  2011  		workloadCluster := fakeWorkloadCluster{
  2012  			Workload: &internal.Workload{
  2013  				Client: fakeClient,
  2014  				CoreDNSMigrator: &fakeMigrator{
  2015  					migratedCorefile: "new core file",
  2016  				},
  2017  			},
  2018  		}
  2019  
  2020  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  2021  	})
  2022  
  2023  	t.Run("should not return an error when no DNS upgrade is requested", func(t *testing.T) {
  2024  		g := NewWithT(t)
  2025  		objs := []client.Object{
  2026  			cluster.DeepCopy(),
  2027  			corednsCM.DeepCopy(),
  2028  			kubeadmCM.DeepCopy(),
  2029  		}
  2030  		kcp := kcp.DeepCopy()
  2031  		kcp.Annotations = map[string]string{controlplanev1.SkipCoreDNSAnnotation: ""}
  2032  
  2033  		depl := depl.DeepCopy()
  2034  
  2035  		depl.Spec.Template.Spec.Containers[0].Image = "my-cool-image!!!!" // something very unlikely for getCoreDNSInfo to parse
  2036  		objs = append(objs, depl)
  2037  
  2038  		fakeClient := newFakeClient(objs...)
  2039  		workloadCluster := fakeWorkloadCluster{
  2040  			Workload: &internal.Workload{
  2041  				Client: fakeClient,
  2042  			},
  2043  		}
  2044  
  2045  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  2046  
  2047  		var actualCoreDNSCM corev1.ConfigMap
  2048  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSCM)).To(Succeed())
  2049  		g.Expect(actualCoreDNSCM.Data).To(Equal(corednsCM.Data))
  2050  
  2051  		var actualKubeadmConfig corev1.ConfigMap
  2052  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "kubeadm-config", Namespace: metav1.NamespaceSystem}, &actualKubeadmConfig)).To(Succeed())
  2053  		g.Expect(actualKubeadmConfig.Data).To(Equal(kubeadmCM.Data))
  2054  
  2055  		var actualCoreDNSDeployment appsv1.Deployment
  2056  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSDeployment)).To(Succeed())
  2057  		g.Expect(actualCoreDNSDeployment.Spec.Template.Spec.Containers[0].Image).ToNot(ContainSubstring("coredns"))
  2058  	})
  2059  
  2060  	t.Run("returns error when unable to UpdateCoreDNS", func(t *testing.T) {
  2061  		g := NewWithT(t)
  2062  		objs := []client.Object{
  2063  			cluster.DeepCopy(),
  2064  			kcp.DeepCopy(),
  2065  			depl.DeepCopy(),
  2066  			corednsCM.DeepCopy(),
  2067  		}
  2068  
  2069  		fakeClient := newFakeClient(objs...)
  2070  
  2071  		workloadCluster := fakeWorkloadCluster{
  2072  			Workload: &internal.Workload{
  2073  				Client: fakeClient,
  2074  				CoreDNSMigrator: &fakeMigrator{
  2075  					migratedCorefile: "new core file",
  2076  				},
  2077  			},
  2078  		}
  2079  
  2080  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).ToNot(Succeed())
  2081  	})
  2082  }
  2083  
  2084  func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) {
  2085  	t.Run("removes all control plane Machines", func(t *testing.T) {
  2086  		g := NewWithT(t)
  2087  
  2088  		cluster, kcp, _ := createClusterWithControlPlane(metav1.NamespaceDefault)
  2089  		controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
  2090  		initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()}
  2091  
  2092  		machines := collections.New()
  2093  		for i := 0; i < 3; i++ {
  2094  			m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
  2095  			initObjs = append(initObjs, m)
  2096  			machines.Insert(m)
  2097  		}
  2098  
  2099  		fakeClient := newFakeClient(initObjs...)
  2100  
  2101  		r := &KubeadmControlPlaneReconciler{
  2102  			Client:              fakeClient,
  2103  			SecretCachingClient: fakeClient,
  2104  			managementCluster: &fakeManagementCluster{
  2105  				Management: &internal.Management{Client: fakeClient},
  2106  				Workload:   fakeWorkloadCluster{},
  2107  			},
  2108  
  2109  			recorder: record.NewFakeRecorder(32),
  2110  		}
  2111  
  2112  		controlPlane := &internal.ControlPlane{
  2113  			KCP:      kcp,
  2114  			Cluster:  cluster,
  2115  			Machines: machines,
  2116  		}
  2117  
  2118  		result, err := r.reconcileDelete(ctx, controlPlane)
  2119  		g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: deleteRequeueAfter}))
  2120  		g.Expect(err).ToNot(HaveOccurred())
  2121  		g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  2122  
  2123  		controlPlaneMachines := clusterv1.MachineList{}
  2124  		g.Expect(fakeClient.List(ctx, &controlPlaneMachines)).To(Succeed())
  2125  		g.Expect(controlPlaneMachines.Items).To(BeEmpty())
  2126  
  2127  		controlPlane = &internal.ControlPlane{
  2128  			KCP:     kcp,
  2129  			Cluster: cluster,
  2130  		}
  2131  
  2132  		result, err = r.reconcileDelete(ctx, controlPlane)
  2133  		g.Expect(result).To(BeComparableTo(ctrl.Result{}))
  2134  		g.Expect(err).ToNot(HaveOccurred())
  2135  		g.Expect(kcp.Finalizers).To(BeEmpty())
  2136  	})
  2137  
  2138  	t.Run("does not remove any control plane Machines if other Machines exist", func(t *testing.T) {
  2139  		g := NewWithT(t)
  2140  
  2141  		cluster, kcp, _ := createClusterWithControlPlane(metav1.NamespaceDefault)
  2142  		controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
  2143  
  2144  		workerMachine := &clusterv1.Machine{
  2145  			ObjectMeta: metav1.ObjectMeta{
  2146  				Name:      "worker",
  2147  				Namespace: cluster.Namespace,
  2148  				Labels: map[string]string{
  2149  					clusterv1.ClusterNameLabel: cluster.Name,
  2150  				},
  2151  			},
  2152  		}
  2153  
  2154  		initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachine.DeepCopy()}
  2155  
  2156  		machines := collections.New()
  2157  		for i := 0; i < 3; i++ {
  2158  			m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
  2159  			initObjs = append(initObjs, m)
  2160  			machines.Insert(m)
  2161  		}
  2162  
  2163  		fakeClient := newFakeClient(initObjs...)
  2164  
  2165  		r := &KubeadmControlPlaneReconciler{
  2166  			Client:              fakeClient,
  2167  			SecretCachingClient: fakeClient,
  2168  			managementCluster: &fakeManagementCluster{
  2169  				Management: &internal.Management{Client: fakeClient},
  2170  				Workload:   fakeWorkloadCluster{},
  2171  			},
  2172  			recorder: record.NewFakeRecorder(32),
  2173  		}
  2174  
  2175  		controlPlane := &internal.ControlPlane{
  2176  			KCP:      kcp,
  2177  			Cluster:  cluster,
  2178  			Machines: machines,
  2179  		}
  2180  
  2181  		result, err := r.reconcileDelete(ctx, controlPlane)
  2182  		g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: deleteRequeueAfter}))
  2183  		g.Expect(err).ToNot(HaveOccurred())
  2184  
  2185  		g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  2186  
  2187  		controlPlaneMachines := clusterv1.MachineList{}
  2188  		labels := map[string]string{
  2189  			clusterv1.MachineControlPlaneLabel: "",
  2190  		}
  2191  		g.Expect(fakeClient.List(ctx, &controlPlaneMachines, client.MatchingLabels(labels))).To(Succeed())
  2192  		g.Expect(controlPlaneMachines.Items).To(HaveLen(3))
  2193  	})
  2194  
  2195  	t.Run("does not remove any control plane Machines if MachinePools exist", func(t *testing.T) {
  2196  		_ = feature.MutableGates.Set("MachinePool=true")
  2197  		g := NewWithT(t)
  2198  
  2199  		cluster, kcp, _ := createClusterWithControlPlane(metav1.NamespaceDefault)
  2200  		controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
  2201  
  2202  		workerMachinePool := &expv1.MachinePool{
  2203  			ObjectMeta: metav1.ObjectMeta{
  2204  				Name:      "worker",
  2205  				Namespace: cluster.Namespace,
  2206  				Labels: map[string]string{
  2207  					clusterv1.ClusterNameLabel: cluster.Name,
  2208  				},
  2209  			},
  2210  		}
  2211  
  2212  		initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachinePool.DeepCopy()}
  2213  
  2214  		machines := collections.New()
  2215  		for i := 0; i < 3; i++ {
  2216  			m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
  2217  			initObjs = append(initObjs, m)
  2218  			machines.Insert(m)
  2219  		}
  2220  
  2221  		fakeClient := newFakeClient(initObjs...)
  2222  
  2223  		r := &KubeadmControlPlaneReconciler{
  2224  			Client:              fakeClient,
  2225  			SecretCachingClient: fakeClient,
  2226  			managementCluster: &fakeManagementCluster{
  2227  				Management: &internal.Management{Client: fakeClient},
  2228  				Workload:   fakeWorkloadCluster{},
  2229  			},
  2230  			recorder: record.NewFakeRecorder(32),
  2231  		}
  2232  
  2233  		controlPlane := &internal.ControlPlane{
  2234  			KCP:      kcp,
  2235  			Cluster:  cluster,
  2236  			Machines: machines,
  2237  		}
  2238  
  2239  		result, err := r.reconcileDelete(ctx, controlPlane)
  2240  		g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: deleteRequeueAfter}))
  2241  		g.Expect(err).ToNot(HaveOccurred())
  2242  
  2243  		g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  2244  
  2245  		controlPlaneMachines := clusterv1.MachineList{}
  2246  		labels := map[string]string{
  2247  			clusterv1.MachineControlPlaneLabel: "",
  2248  		}
  2249  		g.Expect(fakeClient.List(ctx, &controlPlaneMachines, client.MatchingLabels(labels))).To(Succeed())
  2250  		g.Expect(controlPlaneMachines.Items).To(HaveLen(3))
  2251  	})
  2252  
  2253  	t.Run("removes the finalizer if no control plane Machines exist", func(t *testing.T) {
  2254  		g := NewWithT(t)
  2255  
  2256  		cluster, kcp, _ := createClusterWithControlPlane(metav1.NamespaceDefault)
  2257  		controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
  2258  
  2259  		fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy())
  2260  
  2261  		r := &KubeadmControlPlaneReconciler{
  2262  			Client:              fakeClient,
  2263  			SecretCachingClient: fakeClient,
  2264  			managementCluster: &fakeManagementCluster{
  2265  				Management: &internal.Management{Client: fakeClient},
  2266  				Workload:   fakeWorkloadCluster{},
  2267  			},
  2268  			recorder: record.NewFakeRecorder(32),
  2269  		}
  2270  
  2271  		controlPlane := &internal.ControlPlane{
  2272  			KCP:     kcp,
  2273  			Cluster: cluster,
  2274  		}
  2275  
  2276  		result, err := r.reconcileDelete(ctx, controlPlane)
  2277  		g.Expect(result).To(BeComparableTo(ctrl.Result{}))
  2278  		g.Expect(err).ToNot(HaveOccurred())
  2279  		g.Expect(kcp.Finalizers).To(BeEmpty())
  2280  	})
  2281  }
  2282  
  2283  // test utils.
  2284  
  2285  func newFakeClient(initObjs ...client.Object) client.Client {
  2286  	return &fakeClient{
  2287  		startTime: time.Now(),
  2288  		Client:    fake.NewClientBuilder().WithObjects(initObjs...).WithStatusSubresource(&controlplanev1.KubeadmControlPlane{}).Build(),
  2289  	}
  2290  }
  2291  
  2292  type fakeClient struct {
  2293  	startTime time.Time
  2294  	mux       sync.Mutex
  2295  	client.Client
  2296  }
  2297  
  2298  type fakeClientI interface {
  2299  	SetCreationTimestamp(timestamp metav1.Time)
  2300  }
  2301  
  2302  // controller-runtime's fake client doesn't set a CreationTimestamp
  2303  // this sets one that increments by a minute for each object created.
  2304  func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
  2305  	if f, ok := obj.(fakeClientI); ok {
  2306  		c.mux.Lock()
  2307  		c.startTime = c.startTime.Add(time.Minute)
  2308  		f.SetCreationTimestamp(metav1.NewTime(c.startTime))
  2309  		c.mux.Unlock()
  2310  	}
  2311  	return c.Client.Create(ctx, obj, opts...)
  2312  }
  2313  
  2314  func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *controlplanev1.KubeadmControlPlane, *unstructured.Unstructured) {
  2315  	kcpName := fmt.Sprintf("kcp-foo-%s", util.RandomString(6))
  2316  
  2317  	cluster := newCluster(&types.NamespacedName{Name: kcpName, Namespace: namespace})
  2318  	cluster.Spec = clusterv1.ClusterSpec{
  2319  		ControlPlaneRef: &corev1.ObjectReference{
  2320  			Kind:       "KubeadmControlPlane",
  2321  			Namespace:  namespace,
  2322  			Name:       kcpName,
  2323  			APIVersion: controlplanev1.GroupVersion.String(),
  2324  		},
  2325  	}
  2326  
  2327  	kcp := &controlplanev1.KubeadmControlPlane{
  2328  		TypeMeta: metav1.TypeMeta{
  2329  			APIVersion: controlplanev1.GroupVersion.String(),
  2330  			Kind:       "KubeadmControlPlane",
  2331  		},
  2332  		ObjectMeta: metav1.ObjectMeta{
  2333  			Name:      kcpName,
  2334  			Namespace: namespace,
  2335  			OwnerReferences: []metav1.OwnerReference{
  2336  				{
  2337  					Kind:       "Cluster",
  2338  					APIVersion: clusterv1.GroupVersion.String(),
  2339  					Name:       kcpName,
  2340  					UID:        "1",
  2341  				},
  2342  			},
  2343  		},
  2344  		Spec: controlplanev1.KubeadmControlPlaneSpec{
  2345  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
  2346  				InfrastructureRef: corev1.ObjectReference{
  2347  					Kind:       "GenericInfrastructureMachineTemplate",
  2348  					Namespace:  namespace,
  2349  					Name:       "infra-foo",
  2350  					APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  2351  				},
  2352  			},
  2353  			Replicas: ptr.To[int32](int32(3)),
  2354  			Version:  "v1.16.6",
  2355  			RolloutStrategy: &controlplanev1.RolloutStrategy{
  2356  				Type: "RollingUpdate",
  2357  				RollingUpdate: &controlplanev1.RollingUpdate{
  2358  					MaxSurge: &intstr.IntOrString{
  2359  						IntVal: 1,
  2360  					},
  2361  				},
  2362  			},
  2363  		},
  2364  	}
  2365  
  2366  	genericMachineTemplate := &unstructured.Unstructured{
  2367  		Object: map[string]interface{}{
  2368  			"kind":       "GenericInfrastructureMachineTemplate",
  2369  			"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  2370  			"metadata": map[string]interface{}{
  2371  				"name":      "infra-foo",
  2372  				"namespace": namespace,
  2373  			},
  2374  			"spec": map[string]interface{}{
  2375  				"template": map[string]interface{}{
  2376  					"spec": map[string]interface{}{
  2377  						"hello": "world",
  2378  					},
  2379  				},
  2380  			},
  2381  		},
  2382  	}
  2383  	return cluster, kcp, genericMachineTemplate
  2384  }
  2385  
  2386  func setKCPHealthy(kcp *controlplanev1.KubeadmControlPlane) {
  2387  	conditions.MarkTrue(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition)
  2388  	conditions.MarkTrue(kcp, controlplanev1.EtcdClusterHealthyCondition)
  2389  }
  2390  
  2391  func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, ready bool) (*clusterv1.Machine, *corev1.Node) {
  2392  	machine := &clusterv1.Machine{
  2393  		TypeMeta: metav1.TypeMeta{
  2394  			Kind:       "Machine",
  2395  			APIVersion: clusterv1.GroupVersion.String(),
  2396  		},
  2397  		ObjectMeta: metav1.ObjectMeta{
  2398  			Namespace: cluster.Namespace,
  2399  			Name:      name,
  2400  			Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
  2401  			OwnerReferences: []metav1.OwnerReference{
  2402  				*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")),
  2403  			},
  2404  		},
  2405  		Spec: clusterv1.MachineSpec{
  2406  			ClusterName: cluster.Name,
  2407  			InfrastructureRef: corev1.ObjectReference{
  2408  				Kind:       builder.GenericInfrastructureMachineCRD.Kind,
  2409  				APIVersion: builder.GenericInfrastructureMachineCRD.APIVersion,
  2410  				Name:       builder.GenericInfrastructureMachineCRD.Name,
  2411  				Namespace:  builder.GenericInfrastructureMachineCRD.Namespace,
  2412  			},
  2413  		},
  2414  		Status: clusterv1.MachineStatus{
  2415  			NodeRef: &corev1.ObjectReference{
  2416  				Kind:       "Node",
  2417  				APIVersion: corev1.SchemeGroupVersion.String(),
  2418  				Name:       name,
  2419  			},
  2420  		},
  2421  	}
  2422  	webhook := webhooks.Machine{}
  2423  	if err := webhook.Default(ctx, machine); err != nil {
  2424  		panic(err)
  2425  	}
  2426  
  2427  	node := &corev1.Node{
  2428  		ObjectMeta: metav1.ObjectMeta{
  2429  			Name:   name,
  2430  			Labels: map[string]string{"node-role.kubernetes.io/control-plane": ""},
  2431  		},
  2432  	}
  2433  
  2434  	if ready {
  2435  		node.Spec.ProviderID = fmt.Sprintf("test://%s", machine.GetName())
  2436  		node.Status.Conditions = []corev1.NodeCondition{
  2437  			{
  2438  				Type:   corev1.NodeReady,
  2439  				Status: corev1.ConditionTrue,
  2440  			},
  2441  		}
  2442  	}
  2443  	return machine, node
  2444  }
  2445  
  2446  func setMachineHealthy(m *clusterv1.Machine) {
  2447  	m.Status.NodeRef = &corev1.ObjectReference{
  2448  		Kind: "Node",
  2449  		Name: "node-1",
  2450  	}
  2451  	conditions.MarkTrue(m, controlplanev1.MachineAPIServerPodHealthyCondition)
  2452  	conditions.MarkTrue(m, controlplanev1.MachineControllerManagerPodHealthyCondition)
  2453  	conditions.MarkTrue(m, controlplanev1.MachineSchedulerPodHealthyCondition)
  2454  	conditions.MarkTrue(m, controlplanev1.MachineEtcdPodHealthyCondition)
  2455  	conditions.MarkTrue(m, controlplanev1.MachineEtcdMemberHealthyCondition)
  2456  }
  2457  
  2458  // newCluster return a CAPI cluster object.
  2459  func newCluster(namespacedName *types.NamespacedName) *clusterv1.Cluster {
  2460  	return &clusterv1.Cluster{
  2461  		TypeMeta: metav1.TypeMeta{
  2462  			Kind:       "Cluster",
  2463  			APIVersion: clusterv1.GroupVersion.String(),
  2464  		},
  2465  		ObjectMeta: metav1.ObjectMeta{
  2466  			Namespace: namespacedName.Namespace,
  2467  			Name:      namespacedName.Name,
  2468  		},
  2469  	}
  2470  }
  2471  
  2472  func getTestCACert(key *rsa.PrivateKey) (*x509.Certificate, error) {
  2473  	cfg := certs.Config{
  2474  		CommonName: "kubernetes",
  2475  	}
  2476  
  2477  	now := time.Now().UTC()
  2478  
  2479  	tmpl := x509.Certificate{
  2480  		SerialNumber: new(big.Int).SetInt64(0),
  2481  		Subject: pkix.Name{
  2482  			CommonName:   cfg.CommonName,
  2483  			Organization: cfg.Organization,
  2484  		},
  2485  		NotBefore:             now.Add(time.Minute * -5),
  2486  		NotAfter:              now.Add(time.Hour * 24), // 1 day
  2487  		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
  2488  		MaxPathLenZero:        true,
  2489  		BasicConstraintsValid: true,
  2490  		MaxPathLen:            0,
  2491  		IsCA:                  true,
  2492  	}
  2493  
  2494  	b, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, key.Public(), key)
  2495  	if err != nil {
  2496  		return nil, err
  2497  	}
  2498  
  2499  	c, err := x509.ParseCertificate(b)
  2500  	return c, err
  2501  }