sigs.k8s.io/cluster-api@v1.6.3/controlplane/kubeadm/internal/controllers/controller_test.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package controllers
    18  
    19  import (
    20  	"context"
    21  	"crypto/rand"
    22  	"crypto/rsa"
    23  	"crypto/x509"
    24  	"crypto/x509/pkix"
    25  	"fmt"
    26  	"math/big"
    27  	"sync"
    28  	"testing"
    29  	"time"
    30  
    31  	"github.com/blang/semver/v4"
    32  	. "github.com/onsi/gomega"
    33  	appsv1 "k8s.io/api/apps/v1"
    34  	corev1 "k8s.io/api/core/v1"
    35  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    36  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    37  	"k8s.io/apimachinery/pkg/types"
    38  	"k8s.io/apimachinery/pkg/util/intstr"
    39  	"k8s.io/client-go/tools/record"
    40  	"k8s.io/klog/v2/klogr"
    41  	"k8s.io/utils/pointer"
    42  	ctrl "sigs.k8s.io/controller-runtime"
    43  	"sigs.k8s.io/controller-runtime/pkg/client"
    44  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    45  	"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
    46  	"sigs.k8s.io/controller-runtime/pkg/log"
    47  
    48  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    49  	bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
    50  	"sigs.k8s.io/cluster-api/controllers/external"
    51  	controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
    52  	"sigs.k8s.io/cluster-api/controlplane/kubeadm/internal"
    53  	controlplanev1webhooks "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/webhooks"
    54  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    55  	"sigs.k8s.io/cluster-api/feature"
    56  	"sigs.k8s.io/cluster-api/internal/contract"
    57  	"sigs.k8s.io/cluster-api/internal/test/builder"
    58  	"sigs.k8s.io/cluster-api/internal/util/ssa"
    59  	"sigs.k8s.io/cluster-api/internal/webhooks"
    60  	"sigs.k8s.io/cluster-api/util"
    61  	"sigs.k8s.io/cluster-api/util/certs"
    62  	"sigs.k8s.io/cluster-api/util/collections"
    63  	"sigs.k8s.io/cluster-api/util/conditions"
    64  	"sigs.k8s.io/cluster-api/util/kubeconfig"
    65  	"sigs.k8s.io/cluster-api/util/patch"
    66  	"sigs.k8s.io/cluster-api/util/secret"
    67  )
    68  
    69  func TestClusterToKubeadmControlPlane(t *testing.T) {
    70  	g := NewWithT(t)
    71  	fakeClient := newFakeClient()
    72  
    73  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
    74  	cluster.Spec = clusterv1.ClusterSpec{
    75  		ControlPlaneRef: &corev1.ObjectReference{
    76  			Kind:       "KubeadmControlPlane",
    77  			Namespace:  metav1.NamespaceDefault,
    78  			Name:       "kcp-foo",
    79  			APIVersion: controlplanev1.GroupVersion.String(),
    80  		},
    81  	}
    82  
    83  	expectedResult := []ctrl.Request{
    84  		{
    85  			NamespacedName: client.ObjectKey{
    86  				Namespace: cluster.Spec.ControlPlaneRef.Namespace,
    87  				Name:      cluster.Spec.ControlPlaneRef.Name},
    88  		},
    89  	}
    90  
    91  	r := &KubeadmControlPlaneReconciler{
    92  		Client:              fakeClient,
    93  		SecretCachingClient: fakeClient,
    94  		recorder:            record.NewFakeRecorder(32),
    95  	}
    96  
    97  	got := r.ClusterToKubeadmControlPlane(ctx, cluster)
    98  	g.Expect(got).To(BeComparableTo(expectedResult))
    99  }
   100  
   101  func TestClusterToKubeadmControlPlaneNoControlPlane(t *testing.T) {
   102  	g := NewWithT(t)
   103  	fakeClient := newFakeClient()
   104  
   105  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
   106  
   107  	r := &KubeadmControlPlaneReconciler{
   108  		Client:              fakeClient,
   109  		SecretCachingClient: fakeClient,
   110  		recorder:            record.NewFakeRecorder(32),
   111  	}
   112  
   113  	got := r.ClusterToKubeadmControlPlane(ctx, cluster)
   114  	g.Expect(got).To(BeNil())
   115  }
   116  
   117  func TestClusterToKubeadmControlPlaneOtherControlPlane(t *testing.T) {
   118  	g := NewWithT(t)
   119  	fakeClient := newFakeClient()
   120  
   121  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
   122  	cluster.Spec = clusterv1.ClusterSpec{
   123  		ControlPlaneRef: &corev1.ObjectReference{
   124  			Kind:       "OtherControlPlane",
   125  			Namespace:  metav1.NamespaceDefault,
   126  			Name:       "other-foo",
   127  			APIVersion: controlplanev1.GroupVersion.String(),
   128  		},
   129  	}
   130  
   131  	r := &KubeadmControlPlaneReconciler{
   132  		Client:              fakeClient,
   133  		SecretCachingClient: fakeClient,
   134  		recorder:            record.NewFakeRecorder(32),
   135  	}
   136  
   137  	got := r.ClusterToKubeadmControlPlane(ctx, cluster)
   138  	g.Expect(got).To(BeNil())
   139  }
   140  
   141  func TestReconcileReturnErrorWhenOwnerClusterIsMissing(t *testing.T) {
   142  	g := NewWithT(t)
   143  
   144  	ns, err := env.CreateNamespace(ctx, "test-reconcile-return-error")
   145  	g.Expect(err).ToNot(HaveOccurred())
   146  
   147  	cluster, kcp, _ := createClusterWithControlPlane(ns.Name)
   148  	g.Expect(env.Create(ctx, cluster)).To(Succeed())
   149  	g.Expect(env.Create(ctx, kcp)).To(Succeed())
   150  	defer func(do ...client.Object) {
   151  		g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
   152  	}(kcp, ns)
   153  
   154  	r := &KubeadmControlPlaneReconciler{
   155  		Client:              env,
   156  		SecretCachingClient: secretCachingClient,
   157  		recorder:            record.NewFakeRecorder(32),
   158  	}
   159  
   160  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   161  	g.Expect(err).ToNot(HaveOccurred())
   162  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
   163  
   164  	// calling reconcile should return error
   165  	g.Expect(env.CleanupAndWait(ctx, cluster)).To(Succeed())
   166  
   167  	g.Eventually(func() error {
   168  		_, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   169  		return err
   170  	}, 10*time.Second).Should(HaveOccurred())
   171  }
   172  
   173  func TestReconcileUpdateObservedGeneration(t *testing.T) {
   174  	t.Skip("Disabling this test temporarily until we can get a fix for https://github.com/kubernetes/kubernetes/issues/80609 in controller runtime + switch to a live client in test env.")
   175  
   176  	g := NewWithT(t)
   177  	r := &KubeadmControlPlaneReconciler{
   178  		Client:              env,
   179  		SecretCachingClient: secretCachingClient,
   180  		recorder:            record.NewFakeRecorder(32),
   181  		managementCluster:   &internal.Management{Client: env.Client, Tracker: nil},
   182  	}
   183  
   184  	ns, err := env.CreateNamespace(ctx, "test-reconcile-upd-og")
   185  	g.Expect(err).ToNot(HaveOccurred())
   186  
   187  	cluster, kcp, _ := createClusterWithControlPlane(ns.Name)
   188  	g.Expect(env.Create(ctx, cluster)).To(Succeed())
   189  	g.Expect(env.Create(ctx, kcp)).To(Succeed())
   190  	defer func(do ...client.Object) {
   191  		g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
   192  	}(cluster, kcp, ns)
   193  
   194  	// read kcp.Generation after create
   195  	errGettingObject := env.Get(ctx, util.ObjectKey(kcp), kcp)
   196  	g.Expect(errGettingObject).ToNot(HaveOccurred())
   197  	generation := kcp.Generation
   198  
   199  	// Set cluster.status.InfrastructureReady so we actually enter in the reconcile loop
   200  	patch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"status\":{\"infrastructureReady\":%t}}", true)))
   201  	g.Expect(env.Status().Patch(ctx, cluster, patch)).To(Succeed())
   202  
   203  	// call reconcile the first time, so we can check if observedGeneration is set when adding a finalizer
   204  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   205  	g.Expect(err).ToNot(HaveOccurred())
   206  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
   207  
   208  	g.Eventually(func() int64 {
   209  		errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp)
   210  		g.Expect(errGettingObject).ToNot(HaveOccurred())
   211  		return kcp.Status.ObservedGeneration
   212  	}, 10*time.Second).Should(Equal(generation))
   213  
   214  	// triggers a generation change by changing the spec
   215  	kcp.Spec.Replicas = pointer.Int32(*kcp.Spec.Replicas + 2)
   216  	g.Expect(env.Update(ctx, kcp)).To(Succeed())
   217  
   218  	// read kcp.Generation after the update
   219  	errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp)
   220  	g.Expect(errGettingObject).ToNot(HaveOccurred())
   221  	generation = kcp.Generation
   222  
   223  	// call reconcile the second time, so we can check if observedGeneration is set when calling defer patch
   224  	// NB. The call to reconcile fails because KCP is not properly setup (e.g. missing InfrastructureTemplate)
   225  	// but this is not important because what we want is KCP to be patched
   226  	_, _ = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   227  
   228  	g.Eventually(func() int64 {
   229  		errGettingObject = env.Get(ctx, util.ObjectKey(kcp), kcp)
   230  		g.Expect(errGettingObject).ToNot(HaveOccurred())
   231  		return kcp.Status.ObservedGeneration
   232  	}, 10*time.Second).Should(Equal(generation))
   233  }
   234  
   235  func TestReconcileNoClusterOwnerRef(t *testing.T) {
   236  	g := NewWithT(t)
   237  
   238  	kcp := &controlplanev1.KubeadmControlPlane{
   239  		ObjectMeta: metav1.ObjectMeta{
   240  			Namespace: metav1.NamespaceDefault,
   241  			Name:      "foo",
   242  		},
   243  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   244  			Version: "v1.16.6",
   245  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   246  				InfrastructureRef: corev1.ObjectReference{
   247  					Kind:       "UnknownInfraMachine",
   248  					APIVersion: "test/v1alpha1",
   249  					Name:       "foo",
   250  					Namespace:  metav1.NamespaceDefault,
   251  				},
   252  			},
   253  		},
   254  	}
   255  	webhook := &controlplanev1webhooks.KubeadmControlPlane{}
   256  	g.Expect(webhook.Default(ctx, kcp)).To(Succeed())
   257  	_, err := webhook.ValidateCreate(ctx, kcp)
   258  	g.Expect(err).ToNot(HaveOccurred())
   259  
   260  	fakeClient := newFakeClient(kcp.DeepCopy())
   261  	r := &KubeadmControlPlaneReconciler{
   262  		Client:              fakeClient,
   263  		SecretCachingClient: fakeClient,
   264  		recorder:            record.NewFakeRecorder(32),
   265  	}
   266  
   267  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   268  	g.Expect(err).ToNot(HaveOccurred())
   269  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
   270  
   271  	machineList := &clusterv1.MachineList{}
   272  	g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed())
   273  	g.Expect(machineList.Items).To(BeEmpty())
   274  }
   275  
   276  func TestReconcileNoKCP(t *testing.T) {
   277  	g := NewWithT(t)
   278  
   279  	kcp := &controlplanev1.KubeadmControlPlane{
   280  		ObjectMeta: metav1.ObjectMeta{
   281  			Namespace: metav1.NamespaceDefault,
   282  			Name:      "foo",
   283  		},
   284  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   285  			Version: "v1.16.6",
   286  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   287  				InfrastructureRef: corev1.ObjectReference{
   288  					Kind:       "UnknownInfraMachine",
   289  					APIVersion: "test/v1alpha1",
   290  					Name:       "foo",
   291  					Namespace:  metav1.NamespaceDefault,
   292  				},
   293  			},
   294  		},
   295  	}
   296  
   297  	fakeClient := newFakeClient()
   298  	r := &KubeadmControlPlaneReconciler{
   299  		Client:              fakeClient,
   300  		SecretCachingClient: fakeClient,
   301  		recorder:            record.NewFakeRecorder(32),
   302  	}
   303  
   304  	_, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   305  	g.Expect(err).ToNot(HaveOccurred())
   306  }
   307  
   308  func TestReconcileNoCluster(t *testing.T) {
   309  	g := NewWithT(t)
   310  
   311  	kcp := &controlplanev1.KubeadmControlPlane{
   312  		ObjectMeta: metav1.ObjectMeta{
   313  			Namespace: metav1.NamespaceDefault,
   314  			Name:      "foo",
   315  			OwnerReferences: []metav1.OwnerReference{
   316  				{
   317  					Kind:       "Cluster",
   318  					APIVersion: clusterv1.GroupVersion.String(),
   319  					Name:       "foo",
   320  				},
   321  			},
   322  		},
   323  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   324  			Version: "v1.16.6",
   325  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   326  				InfrastructureRef: corev1.ObjectReference{
   327  					Kind:       "UnknownInfraMachine",
   328  					APIVersion: "test/v1alpha1",
   329  					Name:       "foo",
   330  					Namespace:  metav1.NamespaceDefault,
   331  				},
   332  			},
   333  		},
   334  	}
   335  	webhook := &controlplanev1webhooks.KubeadmControlPlane{}
   336  	g.Expect(webhook.Default(ctx, kcp)).To(Succeed())
   337  	_, err := webhook.ValidateCreate(ctx, kcp)
   338  	g.Expect(err).ToNot(HaveOccurred())
   339  
   340  	fakeClient := newFakeClient(kcp.DeepCopy())
   341  	r := &KubeadmControlPlaneReconciler{
   342  		Client:              fakeClient,
   343  		SecretCachingClient: fakeClient,
   344  		recorder:            record.NewFakeRecorder(32),
   345  	}
   346  
   347  	_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   348  	g.Expect(err).To(HaveOccurred())
   349  
   350  	machineList := &clusterv1.MachineList{}
   351  	g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed())
   352  	g.Expect(machineList.Items).To(BeEmpty())
   353  }
   354  
   355  func TestReconcilePaused(t *testing.T) {
   356  	g := NewWithT(t)
   357  
   358  	clusterName := "foo"
   359  
   360  	// Test: cluster is paused and kcp is not
   361  	cluster := newCluster(&types.NamespacedName{Namespace: metav1.NamespaceDefault, Name: clusterName})
   362  	cluster.Spec.Paused = true
   363  	kcp := &controlplanev1.KubeadmControlPlane{
   364  		ObjectMeta: metav1.ObjectMeta{
   365  			Namespace: metav1.NamespaceDefault,
   366  			Name:      clusterName,
   367  			OwnerReferences: []metav1.OwnerReference{
   368  				{
   369  					Kind:       "Cluster",
   370  					APIVersion: clusterv1.GroupVersion.String(),
   371  					Name:       clusterName,
   372  				},
   373  			},
   374  		},
   375  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   376  			Version: "v1.16.6",
   377  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   378  				InfrastructureRef: corev1.ObjectReference{
   379  					Kind:       "UnknownInfraMachine",
   380  					APIVersion: "test/v1alpha1",
   381  					Name:       "foo",
   382  					Namespace:  metav1.NamespaceDefault,
   383  				},
   384  			},
   385  		},
   386  	}
   387  	webhook := &controlplanev1webhooks.KubeadmControlPlane{}
   388  	g.Expect(webhook.Default(ctx, kcp)).To(Succeed())
   389  	_, err := webhook.ValidateCreate(ctx, kcp)
   390  	g.Expect(err).ToNot(HaveOccurred())
   391  	fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy())
   392  	r := &KubeadmControlPlaneReconciler{
   393  		Client:              fakeClient,
   394  		SecretCachingClient: fakeClient,
   395  		recorder:            record.NewFakeRecorder(32),
   396  	}
   397  
   398  	_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   399  	g.Expect(err).ToNot(HaveOccurred())
   400  
   401  	machineList := &clusterv1.MachineList{}
   402  	g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed())
   403  	g.Expect(machineList.Items).To(BeEmpty())
   404  
   405  	// Test: kcp is paused and cluster is not
   406  	cluster.Spec.Paused = false
   407  	kcp.ObjectMeta.Annotations = map[string]string{}
   408  	kcp.ObjectMeta.Annotations[clusterv1.PausedAnnotation] = "paused"
   409  	_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   410  	g.Expect(err).ToNot(HaveOccurred())
   411  }
   412  
   413  func TestReconcileClusterNoEndpoints(t *testing.T) {
   414  	g := NewWithT(t)
   415  
   416  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
   417  	cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true}
   418  
   419  	kcp := &controlplanev1.KubeadmControlPlane{
   420  		ObjectMeta: metav1.ObjectMeta{
   421  			Namespace: cluster.Namespace,
   422  			Name:      "foo",
   423  			OwnerReferences: []metav1.OwnerReference{
   424  				{
   425  					Kind:       "Cluster",
   426  					APIVersion: clusterv1.GroupVersion.String(),
   427  					Name:       cluster.Name,
   428  				},
   429  			},
   430  		},
   431  		Spec: controlplanev1.KubeadmControlPlaneSpec{
   432  			Version: "v1.16.6",
   433  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
   434  				InfrastructureRef: corev1.ObjectReference{
   435  					Kind:       "UnknownInfraMachine",
   436  					APIVersion: "test/v1alpha1",
   437  					Name:       "foo",
   438  					Namespace:  metav1.NamespaceDefault,
   439  				},
   440  			},
   441  		},
   442  	}
   443  	webhook := &controlplanev1webhooks.KubeadmControlPlane{}
   444  	g.Expect(webhook.Default(ctx, kcp)).To(Succeed())
   445  	_, err := webhook.ValidateCreate(ctx, kcp)
   446  	g.Expect(err).ToNot(HaveOccurred())
   447  
   448  	fakeClient := newFakeClient(kcp.DeepCopy(), cluster.DeepCopy())
   449  	r := &KubeadmControlPlaneReconciler{
   450  		Client:              fakeClient,
   451  		SecretCachingClient: fakeClient,
   452  		recorder:            record.NewFakeRecorder(32),
   453  		managementCluster: &fakeManagementCluster{
   454  			Management: &internal.Management{Client: fakeClient},
   455  			Workload:   fakeWorkloadCluster{},
   456  		},
   457  	}
   458  
   459  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   460  	g.Expect(err).ToNot(HaveOccurred())
   461  	// this first requeue is to add finalizer
   462  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
   463  	g.Expect(r.Client.Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed())
   464  	g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
   465  
   466  	result, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
   467  	g.Expect(err).ToNot(HaveOccurred())
   468  	// TODO: this should stop to re-queue as soon as we have a proper remote cluster cache in place.
   469  	g.Expect(result).To(BeComparableTo(ctrl.Result{Requeue: false, RequeueAfter: 20 * time.Second}))
   470  	g.Expect(r.Client.Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed())
   471  
   472  	// Always expect that the Finalizer is set on the passed in resource
   473  	g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
   474  
   475  	g.Expect(kcp.Status.Selector).NotTo(BeEmpty())
   476  
   477  	_, err = secret.GetFromNamespacedName(ctx, fakeClient, client.ObjectKey{Namespace: metav1.NamespaceDefault, Name: "foo"}, secret.ClusterCA)
   478  	g.Expect(err).ToNot(HaveOccurred())
   479  
   480  	machineList := &clusterv1.MachineList{}
   481  	g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(metav1.NamespaceDefault))).To(Succeed())
   482  	g.Expect(machineList.Items).To(BeEmpty())
   483  }
   484  
   485  func TestKubeadmControlPlaneReconciler_adoption(t *testing.T) {
   486  	version := "v2.0.0"
   487  	t.Run("adopts existing Machines", func(t *testing.T) {
   488  		g := NewWithT(t)
   489  
   490  		cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   491  		cluster.Spec.ControlPlaneEndpoint.Host = "bar"
   492  		cluster.Spec.ControlPlaneEndpoint.Port = 6443
   493  		cluster.Status.InfrastructureReady = true
   494  		kcp.Spec.Version = version
   495  
   496  		fmc := &fakeManagementCluster{
   497  			Machines: collections.Machines{},
   498  			Workload: fakeWorkloadCluster{},
   499  		}
   500  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   501  		for i := 0; i < 3; i++ {
   502  			name := fmt.Sprintf("test-%d", i)
   503  			m := &clusterv1.Machine{
   504  				ObjectMeta: metav1.ObjectMeta{
   505  					Namespace: cluster.Namespace,
   506  					Name:      name,
   507  					Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
   508  				},
   509  				Spec: clusterv1.MachineSpec{
   510  					Bootstrap: clusterv1.Bootstrap{
   511  						ConfigRef: &corev1.ObjectReference{
   512  							APIVersion: bootstrapv1.GroupVersion.String(),
   513  							Kind:       "KubeadmConfig",
   514  							Name:       name,
   515  						},
   516  					},
   517  					Version: &version,
   518  				},
   519  			}
   520  			cfg := &bootstrapv1.KubeadmConfig{
   521  				ObjectMeta: metav1.ObjectMeta{
   522  					Namespace: cluster.Namespace,
   523  					Name:      name,
   524  				},
   525  			}
   526  			objs = append(objs, m, cfg)
   527  			fmc.Machines.Insert(m)
   528  		}
   529  
   530  		fakeClient := newFakeClient(objs...)
   531  		fmc.Reader = fakeClient
   532  		r := &KubeadmControlPlaneReconciler{
   533  			Client:                    fakeClient,
   534  			SecretCachingClient:       fakeClient,
   535  			managementCluster:         fmc,
   536  			managementClusterUncached: fmc,
   537  		}
   538  
   539  		_, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp)
   540  		g.Expect(err).ToNot(HaveOccurred())
   541  		g.Expect(adoptableMachineFound).To(BeTrue())
   542  
   543  		machineList := &clusterv1.MachineList{}
   544  		g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
   545  		g.Expect(machineList.Items).To(HaveLen(3))
   546  		for _, machine := range machineList.Items {
   547  			g.Expect(machine.OwnerReferences).To(HaveLen(1))
   548  			g.Expect(machine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   549  			// Machines are adopted but since they are not originally created by KCP, infra template annotation will be missing.
   550  			g.Expect(machine.GetAnnotations()).NotTo(HaveKey(clusterv1.TemplateClonedFromGroupKindAnnotation))
   551  			g.Expect(machine.GetAnnotations()).NotTo(HaveKey(clusterv1.TemplateClonedFromNameAnnotation))
   552  		}
   553  	})
   554  
   555  	t.Run("adopts v1alpha2 cluster secrets", func(t *testing.T) {
   556  		g := NewWithT(t)
   557  
   558  		cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   559  		cluster.Spec.ControlPlaneEndpoint.Host = "validhost"
   560  		cluster.Spec.ControlPlaneEndpoint.Port = 6443
   561  		cluster.Status.InfrastructureReady = true
   562  		kcp.Spec.Version = version
   563  
   564  		fmc := &fakeManagementCluster{
   565  			Machines: collections.Machines{},
   566  			Workload: fakeWorkloadCluster{},
   567  		}
   568  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   569  		for i := 0; i < 3; i++ {
   570  			name := fmt.Sprintf("test-%d", i)
   571  			m := &clusterv1.Machine{
   572  				ObjectMeta: metav1.ObjectMeta{
   573  					Namespace: cluster.Namespace,
   574  					Name:      name,
   575  					Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
   576  				},
   577  				Spec: clusterv1.MachineSpec{
   578  					Bootstrap: clusterv1.Bootstrap{
   579  						ConfigRef: &corev1.ObjectReference{
   580  							APIVersion: bootstrapv1.GroupVersion.String(),
   581  							Kind:       "KubeadmConfig",
   582  							Name:       name,
   583  						},
   584  					},
   585  					Version: &version,
   586  				},
   587  			}
   588  			cfg := &bootstrapv1.KubeadmConfig{
   589  				TypeMeta: metav1.TypeMeta{
   590  					APIVersion: bootstrapv1.GroupVersion.String(),
   591  					Kind:       "KubeadmConfig",
   592  				},
   593  				ObjectMeta: metav1.ObjectMeta{
   594  					Namespace: cluster.Namespace,
   595  					Name:      name,
   596  					UID:       types.UID(fmt.Sprintf("my-uid-%d", i)),
   597  				},
   598  			}
   599  
   600  			// A simulacrum of the various Certificate and kubeconfig secrets
   601  			// it's a little weird that this is one per KubeadmConfig rather than just whichever config was "first,"
   602  			// but the intent is to ensure that the owner is changed regardless of which Machine we start with
   603  			clusterSecret := &corev1.Secret{
   604  				ObjectMeta: metav1.ObjectMeta{
   605  					Namespace: cluster.Namespace,
   606  					Name:      fmt.Sprintf("important-cluster-secret-%d", i),
   607  					Labels: map[string]string{
   608  						"cluster.x-k8s.io/cluster-name": cluster.Name,
   609  						"previous-owner":                "kubeadmconfig",
   610  					},
   611  					// See: https://github.com/kubernetes-sigs/cluster-api-bootstrap-provider-kubeadm/blob/38af74d92056e64e571b9ea1d664311769779453/internal/cluster/certificates.go#L323-L330
   612  					OwnerReferences: []metav1.OwnerReference{
   613  						{
   614  							APIVersion: bootstrapv1.GroupVersion.String(),
   615  							Kind:       "KubeadmConfig",
   616  							Name:       cfg.Name,
   617  							UID:        cfg.UID,
   618  						},
   619  					},
   620  				},
   621  			}
   622  			objs = append(objs, m, cfg, clusterSecret)
   623  			fmc.Machines.Insert(m)
   624  		}
   625  
   626  		fakeClient := newFakeClient(objs...)
   627  		fmc.Reader = fakeClient
   628  		r := &KubeadmControlPlaneReconciler{
   629  			Client:                    fakeClient,
   630  			SecretCachingClient:       fakeClient,
   631  			managementCluster:         fmc,
   632  			managementClusterUncached: fmc,
   633  		}
   634  
   635  		_, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp)
   636  		g.Expect(err).ToNot(HaveOccurred())
   637  		g.Expect(adoptableMachineFound).To(BeTrue())
   638  
   639  		machineList := &clusterv1.MachineList{}
   640  		g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
   641  		g.Expect(machineList.Items).To(HaveLen(3))
   642  		for _, machine := range machineList.Items {
   643  			g.Expect(machine.OwnerReferences).To(HaveLen(1))
   644  			g.Expect(machine.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   645  			// Machines are adopted but since they are not originally created by KCP, infra template annotation will be missing.
   646  			g.Expect(machine.GetAnnotations()).NotTo(HaveKey(clusterv1.TemplateClonedFromGroupKindAnnotation))
   647  			g.Expect(machine.GetAnnotations()).NotTo(HaveKey(clusterv1.TemplateClonedFromNameAnnotation))
   648  		}
   649  
   650  		secrets := &corev1.SecretList{}
   651  		g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"previous-owner": "kubeadmconfig"})).To(Succeed())
   652  		g.Expect(secrets.Items).To(HaveLen(3))
   653  		for _, secret := range secrets.Items {
   654  			g.Expect(secret.OwnerReferences).To(HaveLen(1))
   655  			g.Expect(secret.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   656  		}
   657  	})
   658  
   659  	t.Run("Deleted KubeadmControlPlanes don't adopt machines", func(t *testing.T) {
   660  		// Usually we won't get into the inner reconcile with a deleted control plane, but it's possible when deleting with "orphanDependents":
   661  		// 1. The deletion timestamp is set in the API server, but our cache has not yet updated
   662  		// 2. The garbage collector removes our ownership reference from a Machine, triggering a re-reconcile (or we get unlucky with the periodic reconciliation)
   663  		// 3. We get into the inner reconcile function and re-adopt the Machine
   664  		// 4. The update to our cache for our deletion timestamp arrives
   665  		g := NewWithT(t)
   666  
   667  		cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   668  		cluster.Spec.ControlPlaneEndpoint.Host = "nodomain.example.com1"
   669  		cluster.Spec.ControlPlaneEndpoint.Port = 6443
   670  		cluster.Status.InfrastructureReady = true
   671  		kcp.Spec.Version = version
   672  
   673  		now := metav1.Now()
   674  		kcp.DeletionTimestamp = &now
   675  		// We also have to set a finalizer as fake client doesn't accept objects
   676  		// with a deletionTimestamp without a finalizer.
   677  		kcp.Finalizers = []string{"block-deletion"}
   678  
   679  		fmc := &fakeManagementCluster{
   680  			Machines: collections.Machines{},
   681  			Workload: fakeWorkloadCluster{},
   682  		}
   683  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   684  		for i := 0; i < 3; i++ {
   685  			name := fmt.Sprintf("test-%d", i)
   686  			m := &clusterv1.Machine{
   687  				ObjectMeta: metav1.ObjectMeta{
   688  					Namespace: cluster.Namespace,
   689  					Name:      name,
   690  					Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
   691  				},
   692  				Spec: clusterv1.MachineSpec{
   693  					Bootstrap: clusterv1.Bootstrap{
   694  						ConfigRef: &corev1.ObjectReference{
   695  							APIVersion: bootstrapv1.GroupVersion.String(),
   696  							Kind:       "KubeadmConfig",
   697  							Name:       name,
   698  						},
   699  					},
   700  					Version: &version,
   701  				},
   702  			}
   703  			cfg := &bootstrapv1.KubeadmConfig{
   704  				ObjectMeta: metav1.ObjectMeta{
   705  					Namespace: cluster.Namespace,
   706  					Name:      name,
   707  				},
   708  			}
   709  			objs = append(objs, m, cfg)
   710  			fmc.Machines.Insert(m)
   711  		}
   712  		fakeClient := newFakeClient(objs...)
   713  		fmc.Reader = fakeClient
   714  		r := &KubeadmControlPlaneReconciler{
   715  			Client:                    fakeClient,
   716  			SecretCachingClient:       fakeClient,
   717  			managementCluster:         fmc,
   718  			managementClusterUncached: fmc,
   719  		}
   720  
   721  		_, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp)
   722  		g.Expect(err).ToNot(HaveOccurred())
   723  		g.Expect(adoptableMachineFound).To(BeFalse())
   724  
   725  		machineList := &clusterv1.MachineList{}
   726  		g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
   727  		g.Expect(machineList.Items).To(HaveLen(3))
   728  		for _, machine := range machineList.Items {
   729  			g.Expect(machine.OwnerReferences).To(BeEmpty())
   730  		}
   731  	})
   732  
   733  	t.Run("Do not adopt Machines that are more than one version old", func(t *testing.T) {
   734  		g := NewWithT(t)
   735  
   736  		cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   737  		cluster.Spec.ControlPlaneEndpoint.Host = "nodomain.example.com2"
   738  		cluster.Spec.ControlPlaneEndpoint.Port = 6443
   739  		cluster.Status.InfrastructureReady = true
   740  		kcp.Spec.Version = "v1.17.0"
   741  
   742  		fmc := &fakeManagementCluster{
   743  			Machines: collections.Machines{
   744  				"test0": &clusterv1.Machine{
   745  					ObjectMeta: metav1.ObjectMeta{
   746  						Namespace: cluster.Namespace,
   747  						Name:      "test0",
   748  						Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
   749  					},
   750  					Spec: clusterv1.MachineSpec{
   751  						Bootstrap: clusterv1.Bootstrap{
   752  							ConfigRef: &corev1.ObjectReference{
   753  								APIVersion: bootstrapv1.GroupVersion.String(),
   754  								Kind:       "KubeadmConfig",
   755  							},
   756  						},
   757  						Version: pointer.String("v1.15.0"),
   758  					},
   759  				},
   760  			},
   761  			Workload: fakeWorkloadCluster{},
   762  		}
   763  
   764  		fakeClient := newFakeClient(builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy(), fmc.Machines["test0"].DeepCopy())
   765  		fmc.Reader = fakeClient
   766  		recorder := record.NewFakeRecorder(32)
   767  		r := &KubeadmControlPlaneReconciler{
   768  			Client:                    fakeClient,
   769  			SecretCachingClient:       fakeClient,
   770  			recorder:                  recorder,
   771  			managementCluster:         fmc,
   772  			managementClusterUncached: fmc,
   773  		}
   774  
   775  		_, adoptableMachineFound, err := r.initControlPlaneScope(ctx, cluster, kcp)
   776  		g.Expect(err).ToNot(HaveOccurred())
   777  		g.Expect(adoptableMachineFound).To(BeTrue())
   778  
   779  		// Message: Warning AdoptionFailed Could not adopt Machine test/test0: its version ("v1.15.0") is outside supported +/- one minor version skew from KCP's ("v1.17.0")
   780  		g.Expect(recorder.Events).To(Receive(ContainSubstring("minor version")))
   781  
   782  		machineList := &clusterv1.MachineList{}
   783  		g.Expect(fakeClient.List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
   784  		g.Expect(machineList.Items).To(HaveLen(1))
   785  		for _, machine := range machineList.Items {
   786  			g.Expect(machine.OwnerReferences).To(BeEmpty())
   787  		}
   788  	})
   789  }
   790  
   791  func TestKubeadmControlPlaneReconciler_ensureOwnerReferences(t *testing.T) {
   792  	g := NewWithT(t)
   793  
   794  	cluster, kcp, tmpl := createClusterWithControlPlane(metav1.NamespaceDefault)
   795  	cluster.Spec.ControlPlaneEndpoint.Host = "bar"
   796  	cluster.Spec.ControlPlaneEndpoint.Port = 6443
   797  	cluster.Status.InfrastructureReady = true
   798  	kcp.Spec.Version = "v1.21.0"
   799  	key, err := certs.NewPrivateKey()
   800  	g.Expect(err).ToNot(HaveOccurred())
   801  	crt, err := getTestCACert(key)
   802  	g.Expect(err).ToNot(HaveOccurred())
   803  
   804  	clusterSecret := &corev1.Secret{
   805  		// The Secret's Type is used by KCP to determine whether it is user-provided.
   806  		// clusterv1.ClusterSecretType signals that the Secret is CAPI-provided.
   807  		ObjectMeta: metav1.ObjectMeta{
   808  			Namespace: cluster.Namespace,
   809  			Name:      "",
   810  			Labels: map[string]string{
   811  				"cluster.x-k8s.io/cluster-name": cluster.Name,
   812  				"testing":                       "yes",
   813  			},
   814  		},
   815  		Data: map[string][]byte{
   816  			secret.TLSCrtDataName: certs.EncodeCertPEM(crt),
   817  			secret.TLSKeyDataName: certs.EncodePrivateKeyPEM(key),
   818  		},
   819  	}
   820  
   821  	kcpOwner := *metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))
   822  
   823  	t.Run("add KCP owner for secrets with no controller reference", func(t *testing.T) {
   824  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   825  		certificates := secret.Certificates{
   826  			{Purpose: secret.ClusterCA},
   827  			{Purpose: secret.FrontProxyCA},
   828  			{Purpose: secret.ServiceAccount},
   829  			{Purpose: secret.EtcdCA},
   830  		}
   831  		for _, c := range certificates {
   832  			s := clusterSecret.DeepCopy()
   833  			// Set the secret name to the purpose
   834  			s.Name = secret.Name(cluster.Name, c.Purpose)
   835  			// Set the Secret Type to clusterv1.ClusterSecretType which signals this Secret was generated by CAPI.
   836  			s.Type = clusterv1.ClusterSecretType
   837  
   838  			// Store the secret in the certificate.
   839  			c.Secret = s
   840  
   841  			objs = append(objs, s)
   842  		}
   843  
   844  		fakeClient := newFakeClient(objs...)
   845  
   846  		r := KubeadmControlPlaneReconciler{
   847  			Client:              fakeClient,
   848  			SecretCachingClient: fakeClient,
   849  		}
   850  		err = r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner)
   851  		g.Expect(err).ToNot(HaveOccurred())
   852  
   853  		secrets := &corev1.SecretList{}
   854  		g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"testing": "yes"})).To(Succeed())
   855  		for _, secret := range secrets.Items {
   856  			g.Expect(secret.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   857  		}
   858  	})
   859  
   860  	t.Run("replace non-KCP controller with KCP controller reference", func(t *testing.T) {
   861  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   862  		certificates := secret.Certificates{
   863  			{Purpose: secret.ClusterCA},
   864  			{Purpose: secret.FrontProxyCA},
   865  			{Purpose: secret.ServiceAccount},
   866  			{Purpose: secret.EtcdCA},
   867  		}
   868  		for _, c := range certificates {
   869  			s := clusterSecret.DeepCopy()
   870  			// Set the secret name to the purpose
   871  			s.Name = secret.Name(cluster.Name, c.Purpose)
   872  			// Set the Secret Type to clusterv1.ClusterSecretType which signals this Secret was generated by CAPI.
   873  			s.Type = clusterv1.ClusterSecretType
   874  
   875  			// Set the a controller owner reference of an unknown type on the secret.
   876  			s.SetOwnerReferences([]metav1.OwnerReference{
   877  				{
   878  					APIVersion: bootstrapv1.GroupVersion.String(),
   879  					// KCP should take ownership of any Secret of the correct type linked to the Cluster.
   880  					Kind:       "OtherController",
   881  					Name:       "name",
   882  					UID:        "uid",
   883  					Controller: pointer.Bool(true),
   884  				},
   885  			})
   886  
   887  			// Store the secret in the certificate.
   888  			c.Secret = s
   889  
   890  			objs = append(objs, s)
   891  		}
   892  
   893  		fakeClient := newFakeClient(objs...)
   894  
   895  		r := KubeadmControlPlaneReconciler{
   896  			Client:              fakeClient,
   897  			SecretCachingClient: fakeClient,
   898  		}
   899  		err := r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner)
   900  		g.Expect(err).ToNot(HaveOccurred())
   901  
   902  		secrets := &corev1.SecretList{}
   903  		g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"testing": "yes"})).To(Succeed())
   904  		for _, secret := range secrets.Items {
   905  			g.Expect(secret.OwnerReferences).To(HaveLen(1))
   906  			g.Expect(secret.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane"))))
   907  		}
   908  	})
   909  
   910  	t.Run("does not add owner reference to user-provided secrets", func(t *testing.T) {
   911  		g := NewWithT(t)
   912  		objs := []client.Object{builder.GenericInfrastructureMachineTemplateCRD, cluster.DeepCopy(), kcp.DeepCopy(), tmpl.DeepCopy()}
   913  		certificates := secret.Certificates{
   914  			{Purpose: secret.ClusterCA},
   915  			{Purpose: secret.FrontProxyCA},
   916  			{Purpose: secret.ServiceAccount},
   917  			{Purpose: secret.EtcdCA},
   918  		}
   919  		for _, c := range certificates {
   920  			s := clusterSecret.DeepCopy()
   921  			// Set the secret name to the purpose
   922  			s.Name = secret.Name(cluster.Name, c.Purpose)
   923  			// Set the Secret Type to any type which signals this Secret was is user-provided.
   924  			s.Type = corev1.SecretTypeOpaque
   925  			// Set the a controller owner reference of an unknown type on the secret.
   926  			s.SetOwnerReferences(util.EnsureOwnerRef(s.GetOwnerReferences(),
   927  				metav1.OwnerReference{
   928  					APIVersion: bootstrapv1.GroupVersion.String(),
   929  					// This owner reference to a different controller should be preserved.
   930  					Kind:               "OtherController",
   931  					Name:               kcp.Name,
   932  					UID:                kcp.UID,
   933  					Controller:         pointer.Bool(true),
   934  					BlockOwnerDeletion: pointer.Bool(true),
   935  				},
   936  			))
   937  
   938  			// Store the secret in the certificate.
   939  			c.Secret = s
   940  
   941  			objs = append(objs, s)
   942  		}
   943  
   944  		fakeClient := newFakeClient(objs...)
   945  
   946  		r := KubeadmControlPlaneReconciler{
   947  			Client:              fakeClient,
   948  			SecretCachingClient: fakeClient,
   949  		}
   950  		err := r.ensureCertificatesOwnerRef(ctx, certificates, kcpOwner)
   951  		g.Expect(err).ToNot(HaveOccurred())
   952  
   953  		secrets := &corev1.SecretList{}
   954  		g.Expect(fakeClient.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabels{"testing": "yes"})).To(Succeed())
   955  		for _, secret := range secrets.Items {
   956  			g.Expect(secret.OwnerReferences).To(HaveLen(1))
   957  			g.Expect(secret.OwnerReferences).To(ContainElement(*metav1.NewControllerRef(kcp, bootstrapv1.GroupVersion.WithKind("OtherController"))))
   958  		}
   959  	})
   960  }
   961  
   962  func TestReconcileCertificateExpiries(t *testing.T) {
   963  	g := NewWithT(t)
   964  
   965  	preExistingExpiry := time.Now().Add(5 * 24 * time.Hour)
   966  	detectedExpiry := time.Now().Add(25 * 24 * time.Hour)
   967  
   968  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
   969  	kcp := &controlplanev1.KubeadmControlPlane{
   970  		Status: controlplanev1.KubeadmControlPlaneStatus{Initialized: true},
   971  	}
   972  	machineWithoutExpiryAnnotation := &clusterv1.Machine{
   973  		ObjectMeta: metav1.ObjectMeta{
   974  			Name: "machineWithoutExpiryAnnotation",
   975  		},
   976  		Spec: clusterv1.MachineSpec{
   977  			InfrastructureRef: corev1.ObjectReference{
   978  				Kind:       "GenericMachine",
   979  				APIVersion: "generic.io/v1",
   980  				Namespace:  metav1.NamespaceDefault,
   981  				Name:       "machineWithoutExpiryAnnotation-infra",
   982  			},
   983  			Bootstrap: clusterv1.Bootstrap{
   984  				ConfigRef: &corev1.ObjectReference{
   985  					Kind:       "KubeadmConfig",
   986  					APIVersion: bootstrapv1.GroupVersion.String(),
   987  					Namespace:  metav1.NamespaceDefault,
   988  					Name:       "machineWithoutExpiryAnnotation-bootstrap",
   989  				},
   990  			},
   991  		},
   992  		Status: clusterv1.MachineStatus{
   993  			NodeRef: &corev1.ObjectReference{
   994  				Name: "machineWithoutExpiryAnnotation",
   995  			},
   996  		},
   997  	}
   998  	machineWithoutExpiryAnnotationKubeadmConfig := &bootstrapv1.KubeadmConfig{
   999  		ObjectMeta: metav1.ObjectMeta{
  1000  			Name: "machineWithoutExpiryAnnotation-bootstrap",
  1001  		},
  1002  	}
  1003  	machineWithExpiryAnnotation := &clusterv1.Machine{
  1004  		ObjectMeta: metav1.ObjectMeta{
  1005  			Name: "machineWithExpiryAnnotation",
  1006  		},
  1007  		Spec: clusterv1.MachineSpec{
  1008  			InfrastructureRef: corev1.ObjectReference{
  1009  				Kind:       "GenericMachine",
  1010  				APIVersion: "generic.io/v1",
  1011  				Namespace:  metav1.NamespaceDefault,
  1012  				Name:       "machineWithExpiryAnnotation-infra",
  1013  			},
  1014  			Bootstrap: clusterv1.Bootstrap{
  1015  				ConfigRef: &corev1.ObjectReference{
  1016  					Kind:       "KubeadmConfig",
  1017  					APIVersion: bootstrapv1.GroupVersion.String(),
  1018  					Namespace:  metav1.NamespaceDefault,
  1019  					Name:       "machineWithExpiryAnnotation-bootstrap",
  1020  				},
  1021  			},
  1022  		},
  1023  		Status: clusterv1.MachineStatus{
  1024  			NodeRef: &corev1.ObjectReference{
  1025  				Name: "machineWithExpiryAnnotation",
  1026  			},
  1027  		},
  1028  	}
  1029  	machineWithExpiryAnnotationKubeadmConfig := &bootstrapv1.KubeadmConfig{
  1030  		ObjectMeta: metav1.ObjectMeta{
  1031  			Name: "machineWithExpiryAnnotation-bootstrap",
  1032  			Annotations: map[string]string{
  1033  				clusterv1.MachineCertificatesExpiryDateAnnotation: preExistingExpiry.Format(time.RFC3339),
  1034  			},
  1035  		},
  1036  	}
  1037  	machineWithDeletionTimestamp := &clusterv1.Machine{
  1038  		ObjectMeta: metav1.ObjectMeta{
  1039  			Name:              "machineWithDeletionTimestamp",
  1040  			DeletionTimestamp: &metav1.Time{Time: time.Now()},
  1041  		},
  1042  		Spec: clusterv1.MachineSpec{
  1043  			InfrastructureRef: corev1.ObjectReference{
  1044  				Kind:       "GenericMachine",
  1045  				APIVersion: "generic.io/v1",
  1046  				Namespace:  metav1.NamespaceDefault,
  1047  				Name:       "machineWithDeletionTimestamp-infra",
  1048  			},
  1049  			Bootstrap: clusterv1.Bootstrap{
  1050  				ConfigRef: &corev1.ObjectReference{
  1051  					Kind:       "KubeadmConfig",
  1052  					APIVersion: bootstrapv1.GroupVersion.String(),
  1053  					Namespace:  metav1.NamespaceDefault,
  1054  					Name:       "machineWithDeletionTimestamp-bootstrap",
  1055  				},
  1056  			},
  1057  		},
  1058  		Status: clusterv1.MachineStatus{
  1059  			NodeRef: &corev1.ObjectReference{
  1060  				Name: "machineWithDeletionTimestamp",
  1061  			},
  1062  		},
  1063  	}
  1064  	machineWithDeletionTimestampKubeadmConfig := &bootstrapv1.KubeadmConfig{
  1065  		ObjectMeta: metav1.ObjectMeta{
  1066  			Name: "machineWithDeletionTimestamp-bootstrap",
  1067  		},
  1068  	}
  1069  	machineWithoutNodeRef := &clusterv1.Machine{
  1070  		ObjectMeta: metav1.ObjectMeta{
  1071  			Name: "machineWithoutNodeRef",
  1072  		},
  1073  		Spec: clusterv1.MachineSpec{
  1074  			InfrastructureRef: corev1.ObjectReference{
  1075  				Kind:       "GenericMachine",
  1076  				APIVersion: "generic.io/v1",
  1077  				Namespace:  metav1.NamespaceDefault,
  1078  				Name:       "machineWithoutNodeRef-infra",
  1079  			},
  1080  			Bootstrap: clusterv1.Bootstrap{
  1081  				ConfigRef: &corev1.ObjectReference{
  1082  					Kind:       "KubeadmConfig",
  1083  					APIVersion: bootstrapv1.GroupVersion.String(),
  1084  					Namespace:  metav1.NamespaceDefault,
  1085  					Name:       "machineWithoutNodeRef-bootstrap",
  1086  				},
  1087  			},
  1088  		},
  1089  	}
  1090  	machineWithoutNodeRefKubeadmConfig := &bootstrapv1.KubeadmConfig{
  1091  		ObjectMeta: metav1.ObjectMeta{
  1092  			Name: "machineWithoutNodeRef-bootstrap",
  1093  		},
  1094  	}
  1095  	machineWithoutKubeadmConfig := &clusterv1.Machine{
  1096  		ObjectMeta: metav1.ObjectMeta{
  1097  			Name: "machineWithoutKubeadmConfig",
  1098  		},
  1099  		Spec: clusterv1.MachineSpec{
  1100  			InfrastructureRef: corev1.ObjectReference{
  1101  				Kind:       "GenericMachine",
  1102  				APIVersion: "generic.io/v1",
  1103  				Namespace:  metav1.NamespaceDefault,
  1104  				Name:       "machineWithoutKubeadmConfig-infra",
  1105  			},
  1106  			Bootstrap: clusterv1.Bootstrap{
  1107  				ConfigRef: &corev1.ObjectReference{
  1108  					Kind:       "KubeadmConfig",
  1109  					APIVersion: bootstrapv1.GroupVersion.String(),
  1110  					Namespace:  metav1.NamespaceDefault,
  1111  					Name:       "machineWithoutKubeadmConfig-bootstrap",
  1112  				},
  1113  			},
  1114  		},
  1115  		Status: clusterv1.MachineStatus{
  1116  			NodeRef: &corev1.ObjectReference{
  1117  				Name: "machineWithoutKubeadmConfig",
  1118  			},
  1119  		},
  1120  	}
  1121  
  1122  	ownedMachines := collections.FromMachines(
  1123  		machineWithoutExpiryAnnotation,
  1124  		machineWithExpiryAnnotation,
  1125  		machineWithDeletionTimestamp,
  1126  		machineWithoutNodeRef,
  1127  		machineWithoutKubeadmConfig,
  1128  	)
  1129  
  1130  	fakeClient := newFakeClient(
  1131  		machineWithoutExpiryAnnotationKubeadmConfig,
  1132  		machineWithExpiryAnnotationKubeadmConfig,
  1133  		machineWithDeletionTimestampKubeadmConfig,
  1134  		machineWithoutNodeRefKubeadmConfig,
  1135  	)
  1136  
  1137  	managementCluster := &fakeManagementCluster{
  1138  		Workload: fakeWorkloadCluster{
  1139  			APIServerCertificateExpiry: &detectedExpiry,
  1140  		},
  1141  	}
  1142  
  1143  	r := &KubeadmControlPlaneReconciler{
  1144  		Client:              fakeClient,
  1145  		SecretCachingClient: fakeClient,
  1146  		managementCluster:   managementCluster,
  1147  	}
  1148  
  1149  	controlPlane, err := internal.NewControlPlane(ctx, managementCluster, fakeClient, cluster, kcp, ownedMachines)
  1150  	g.Expect(err).ToNot(HaveOccurred())
  1151  
  1152  	err = r.reconcileCertificateExpiries(ctx, controlPlane)
  1153  	g.Expect(err).ToNot(HaveOccurred())
  1154  
  1155  	// Verify machineWithoutExpiryAnnotation has detectedExpiry.
  1156  	actualKubeadmConfig := bootstrapv1.KubeadmConfig{}
  1157  	err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithoutExpiryAnnotationKubeadmConfig), &actualKubeadmConfig)
  1158  	g.Expect(err).ToNot(HaveOccurred())
  1159  	actualExpiry := actualKubeadmConfig.Annotations[clusterv1.MachineCertificatesExpiryDateAnnotation]
  1160  	g.Expect(actualExpiry).To(Equal(detectedExpiry.Format(time.RFC3339)))
  1161  
  1162  	// Verify machineWithExpiryAnnotation has still preExistingExpiry.
  1163  	err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithExpiryAnnotationKubeadmConfig), &actualKubeadmConfig)
  1164  	g.Expect(err).ToNot(HaveOccurred())
  1165  	actualExpiry = actualKubeadmConfig.Annotations[clusterv1.MachineCertificatesExpiryDateAnnotation]
  1166  	g.Expect(actualExpiry).To(Equal(preExistingExpiry.Format(time.RFC3339)))
  1167  
  1168  	// Verify machineWithDeletionTimestamp has still no expiry annotation.
  1169  	err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithDeletionTimestampKubeadmConfig), &actualKubeadmConfig)
  1170  	g.Expect(err).ToNot(HaveOccurred())
  1171  	g.Expect(actualKubeadmConfig.Annotations).ToNot(ContainElement(clusterv1.MachineCertificatesExpiryDateAnnotation))
  1172  
  1173  	// Verify machineWithoutNodeRef has still no expiry annotation.
  1174  	err = fakeClient.Get(ctx, client.ObjectKeyFromObject(machineWithoutNodeRefKubeadmConfig), &actualKubeadmConfig)
  1175  	g.Expect(err).ToNot(HaveOccurred())
  1176  	g.Expect(actualKubeadmConfig.Annotations).ToNot(ContainElement(clusterv1.MachineCertificatesExpiryDateAnnotation))
  1177  }
  1178  
  1179  func TestReconcileInitializeControlPlane(t *testing.T) {
  1180  	setup := func(t *testing.T, g *WithT) *corev1.Namespace {
  1181  		t.Helper()
  1182  
  1183  		t.Log("Creating the namespace")
  1184  		ns, err := env.CreateNamespace(ctx, "test-kcp-reconcile-initializecontrolplane")
  1185  		g.Expect(err).ToNot(HaveOccurred())
  1186  
  1187  		return ns
  1188  	}
  1189  
  1190  	teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) {
  1191  		t.Helper()
  1192  
  1193  		t.Log("Deleting the namespace")
  1194  		g.Expect(env.Delete(ctx, ns)).To(Succeed())
  1195  	}
  1196  
  1197  	g := NewWithT(t)
  1198  	namespace := setup(t, g)
  1199  	defer teardown(t, g, namespace)
  1200  
  1201  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: namespace.Name})
  1202  	cluster.Spec = clusterv1.ClusterSpec{
  1203  		ControlPlaneEndpoint: clusterv1.APIEndpoint{
  1204  			Host: "test.local",
  1205  			Port: 9999,
  1206  		},
  1207  	}
  1208  	g.Expect(env.Create(ctx, cluster)).To(Succeed())
  1209  	patchHelper, err := patch.NewHelper(cluster, env)
  1210  	g.Expect(err).ToNot(HaveOccurred())
  1211  	cluster.Status = clusterv1.ClusterStatus{InfrastructureReady: true}
  1212  	g.Expect(patchHelper.Patch(ctx, cluster)).To(Succeed())
  1213  
  1214  	genericInfrastructureMachineTemplate := &unstructured.Unstructured{
  1215  		Object: map[string]interface{}{
  1216  			"kind":       "GenericInfrastructureMachineTemplate",
  1217  			"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  1218  			"metadata": map[string]interface{}{
  1219  				"name":      "infra-foo",
  1220  				"namespace": cluster.Namespace,
  1221  			},
  1222  			"spec": map[string]interface{}{
  1223  				"template": map[string]interface{}{
  1224  					"spec": map[string]interface{}{
  1225  						"hello": "world",
  1226  					},
  1227  				},
  1228  			},
  1229  		},
  1230  	}
  1231  	g.Expect(env.Create(ctx, genericInfrastructureMachineTemplate)).To(Succeed())
  1232  
  1233  	kcp := &controlplanev1.KubeadmControlPlane{
  1234  		ObjectMeta: metav1.ObjectMeta{
  1235  			Namespace: cluster.Namespace,
  1236  			Name:      "foo",
  1237  			OwnerReferences: []metav1.OwnerReference{
  1238  				{
  1239  					Kind:       "Cluster",
  1240  					APIVersion: clusterv1.GroupVersion.String(),
  1241  					Name:       cluster.Name,
  1242  					UID:        cluster.UID,
  1243  				},
  1244  			},
  1245  		},
  1246  		Spec: controlplanev1.KubeadmControlPlaneSpec{
  1247  			Replicas: nil,
  1248  			Version:  "v1.16.6",
  1249  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
  1250  				InfrastructureRef: corev1.ObjectReference{
  1251  					Kind:       genericInfrastructureMachineTemplate.GetKind(),
  1252  					APIVersion: genericInfrastructureMachineTemplate.GetAPIVersion(),
  1253  					Name:       genericInfrastructureMachineTemplate.GetName(),
  1254  					Namespace:  cluster.Namespace,
  1255  				},
  1256  			},
  1257  			KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{},
  1258  		},
  1259  	}
  1260  	g.Expect(env.Create(ctx, kcp)).To(Succeed())
  1261  
  1262  	corednsCM := &corev1.ConfigMap{
  1263  		ObjectMeta: metav1.ObjectMeta{
  1264  			Name:      "coredns",
  1265  			Namespace: namespace.Name,
  1266  		},
  1267  		Data: map[string]string{
  1268  			"Corefile": "original-core-file",
  1269  		},
  1270  	}
  1271  	g.Expect(env.Create(ctx, corednsCM)).To(Succeed())
  1272  
  1273  	kubeadmCM := &corev1.ConfigMap{
  1274  		ObjectMeta: metav1.ObjectMeta{
  1275  			Name:      "kubeadm-config",
  1276  			Namespace: namespace.Name,
  1277  		},
  1278  		Data: map[string]string{
  1279  			"ClusterConfiguration": `apiServer:
  1280  dns:
  1281    type: CoreDNS
  1282  imageRepository: registry.k8s.io
  1283  kind: ClusterConfiguration
  1284  kubernetesVersion: metav1.16.1`,
  1285  		},
  1286  	}
  1287  	g.Expect(env.Create(ctx, kubeadmCM)).To(Succeed())
  1288  
  1289  	corednsDepl := &appsv1.Deployment{
  1290  		ObjectMeta: metav1.ObjectMeta{
  1291  			Name:      "coredns",
  1292  			Namespace: namespace.Name,
  1293  		},
  1294  		Spec: appsv1.DeploymentSpec{
  1295  			Selector: &metav1.LabelSelector{
  1296  				MatchLabels: map[string]string{
  1297  					"coredns": "",
  1298  				},
  1299  			},
  1300  			Template: corev1.PodTemplateSpec{
  1301  				ObjectMeta: metav1.ObjectMeta{
  1302  					Name: "coredns",
  1303  					Labels: map[string]string{
  1304  						"coredns": "",
  1305  					},
  1306  				},
  1307  				Spec: corev1.PodSpec{
  1308  					Containers: []corev1.Container{{
  1309  						Name:  "coredns",
  1310  						Image: "registry.k8s.io/coredns:1.6.2",
  1311  					}},
  1312  				},
  1313  			},
  1314  		},
  1315  	}
  1316  	g.Expect(env.Create(ctx, corednsDepl)).To(Succeed())
  1317  
  1318  	expectedLabels := map[string]string{clusterv1.ClusterNameLabel: "foo"}
  1319  	r := &KubeadmControlPlaneReconciler{
  1320  		Client:              env,
  1321  		SecretCachingClient: secretCachingClient,
  1322  		recorder:            record.NewFakeRecorder(32),
  1323  		managementCluster: &fakeManagementCluster{
  1324  			Management: &internal.Management{Client: env},
  1325  			Workload: fakeWorkloadCluster{
  1326  				Workload: &internal.Workload{
  1327  					Client: env,
  1328  				},
  1329  				Status: internal.ClusterStatus{},
  1330  			},
  1331  		},
  1332  		managementClusterUncached: &fakeManagementCluster{
  1333  			Management: &internal.Management{Client: env},
  1334  			Workload: fakeWorkloadCluster{
  1335  				Workload: &internal.Workload{
  1336  					Client: env,
  1337  				},
  1338  				Status: internal.ClusterStatus{},
  1339  			},
  1340  		},
  1341  		ssaCache: ssa.NewCache(),
  1342  	}
  1343  
  1344  	result, err := r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
  1345  	g.Expect(err).ToNot(HaveOccurred())
  1346  	// this first requeue is to add finalizer
  1347  	g.Expect(result).To(BeComparableTo(ctrl.Result{}))
  1348  	g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(kcp), kcp)).To(Succeed())
  1349  	g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  1350  
  1351  	g.Eventually(func(g Gomega) {
  1352  		_, err = r.Reconcile(ctx, ctrl.Request{NamespacedName: util.ObjectKey(kcp)})
  1353  		g.Expect(err).ToNot(HaveOccurred())
  1354  		g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKey{Name: kcp.Name, Namespace: kcp.Namespace}, kcp)).To(Succeed())
  1355  		// Expect the referenced infrastructure template to have a Cluster Owner Reference.
  1356  		g.Expect(env.GetAPIReader().Get(ctx, util.ObjectKey(genericInfrastructureMachineTemplate), genericInfrastructureMachineTemplate)).To(Succeed())
  1357  		g.Expect(genericInfrastructureMachineTemplate.GetOwnerReferences()).To(ContainElement(metav1.OwnerReference{
  1358  			APIVersion: clusterv1.GroupVersion.String(),
  1359  			Kind:       "Cluster",
  1360  			Name:       cluster.Name,
  1361  			UID:        cluster.UID,
  1362  		}))
  1363  
  1364  		// Always expect that the Finalizer is set on the passed in resource
  1365  		g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  1366  
  1367  		g.Expect(kcp.Status.Selector).NotTo(BeEmpty())
  1368  		g.Expect(kcp.Status.Replicas).To(BeEquivalentTo(1))
  1369  		g.Expect(conditions.IsFalse(kcp, controlplanev1.AvailableCondition)).To(BeTrue())
  1370  
  1371  		s, err := secret.GetFromNamespacedName(ctx, env, client.ObjectKey{Namespace: cluster.Namespace, Name: "foo"}, secret.ClusterCA)
  1372  		g.Expect(err).ToNot(HaveOccurred())
  1373  		g.Expect(s).NotTo(BeNil())
  1374  		g.Expect(s.Data).NotTo(BeEmpty())
  1375  		g.Expect(s.Labels).To(Equal(expectedLabels))
  1376  
  1377  		k, err := kubeconfig.FromSecret(ctx, env, util.ObjectKey(cluster))
  1378  		g.Expect(err).ToNot(HaveOccurred())
  1379  		g.Expect(k).NotTo(BeEmpty())
  1380  
  1381  		machineList := &clusterv1.MachineList{}
  1382  		g.Expect(env.GetAPIReader().List(ctx, machineList, client.InNamespace(cluster.Namespace))).To(Succeed())
  1383  		g.Expect(machineList.Items).To(HaveLen(1))
  1384  
  1385  		machine := machineList.Items[0]
  1386  		g.Expect(machine.Name).To(HavePrefix(kcp.Name))
  1387  		// Newly cloned infra objects should have the infraref annotation.
  1388  		infraObj, err := external.Get(ctx, r.Client, &machine.Spec.InfrastructureRef, machine.Spec.InfrastructureRef.Namespace)
  1389  		g.Expect(err).ToNot(HaveOccurred())
  1390  		g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromNameAnnotation, genericInfrastructureMachineTemplate.GetName()))
  1391  		g.Expect(infraObj.GetAnnotations()).To(HaveKeyWithValue(clusterv1.TemplateClonedFromGroupKindAnnotation, genericInfrastructureMachineTemplate.GroupVersionKind().GroupKind().String()))
  1392  	}, 30*time.Second).Should(Succeed())
  1393  }
  1394  
  1395  func TestKubeadmControlPlaneReconciler_syncMachines(t *testing.T) {
  1396  	setup := func(t *testing.T, g *WithT) (*corev1.Namespace, *clusterv1.Cluster) {
  1397  		t.Helper()
  1398  
  1399  		t.Log("Creating the namespace")
  1400  		ns, err := env.CreateNamespace(ctx, "test-kcp-reconciler-sync-machines")
  1401  		g.Expect(err).ToNot(HaveOccurred())
  1402  
  1403  		t.Log("Creating the Cluster")
  1404  		cluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: ns.Name, Name: "test-cluster"}}
  1405  		g.Expect(env.Create(ctx, cluster)).To(Succeed())
  1406  
  1407  		t.Log("Creating the Cluster Kubeconfig Secret")
  1408  		g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed())
  1409  
  1410  		return ns, cluster
  1411  	}
  1412  
  1413  	teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace, cluster *clusterv1.Cluster) {
  1414  		t.Helper()
  1415  
  1416  		t.Log("Deleting the Cluster")
  1417  		g.Expect(env.Delete(ctx, cluster)).To(Succeed())
  1418  		t.Log("Deleting the namespace")
  1419  		g.Expect(env.Delete(ctx, ns)).To(Succeed())
  1420  	}
  1421  
  1422  	g := NewWithT(t)
  1423  	namespace, testCluster := setup(t, g)
  1424  	defer teardown(t, g, namespace, testCluster)
  1425  
  1426  	classicManager := "manager"
  1427  	duration5s := &metav1.Duration{Duration: 5 * time.Second}
  1428  	duration10s := &metav1.Duration{Duration: 10 * time.Second}
  1429  
  1430  	// Existing InfraMachine
  1431  	infraMachineSpec := map[string]interface{}{
  1432  		"infra-field": "infra-value",
  1433  	}
  1434  	existingInfraMachine := &unstructured.Unstructured{
  1435  		Object: map[string]interface{}{
  1436  			"kind":       "GenericInfrastructureMachine",
  1437  			"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  1438  			"metadata": map[string]interface{}{
  1439  				"name":      "existing-inframachine",
  1440  				"namespace": testCluster.Namespace,
  1441  				"labels": map[string]string{
  1442  					"preserved-label": "preserved-value",
  1443  					"dropped-label":   "dropped-value",
  1444  					"modified-label":  "modified-value",
  1445  				},
  1446  				"annotations": map[string]string{
  1447  					"preserved-annotation": "preserved-value",
  1448  					"dropped-annotation":   "dropped-value",
  1449  					"modified-annotation":  "modified-value",
  1450  				},
  1451  			},
  1452  			"spec": infraMachineSpec,
  1453  		},
  1454  	}
  1455  	infraMachineRef := &corev1.ObjectReference{
  1456  		Kind:       "GenericInfrastructureMachine",
  1457  		Namespace:  namespace.Name,
  1458  		Name:       "existing-inframachine",
  1459  		APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  1460  	}
  1461  	// Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0.
  1462  	g.Expect(env.Create(ctx, existingInfraMachine, client.FieldOwner("manager"))).To(Succeed())
  1463  
  1464  	// Existing KubeadmConfig
  1465  	bootstrapSpec := &bootstrapv1.KubeadmConfigSpec{
  1466  		Users:             []bootstrapv1.User{{Name: "test-user"}},
  1467  		JoinConfiguration: &bootstrapv1.JoinConfiguration{},
  1468  	}
  1469  	existingKubeadmConfig := &bootstrapv1.KubeadmConfig{
  1470  		TypeMeta: metav1.TypeMeta{
  1471  			Kind:       "KubeadmConfig",
  1472  			APIVersion: bootstrapv1.GroupVersion.String(),
  1473  		},
  1474  		ObjectMeta: metav1.ObjectMeta{
  1475  			Name:      "existing-kubeadmconfig",
  1476  			Namespace: namespace.Name,
  1477  			Labels: map[string]string{
  1478  				"preserved-label": "preserved-value",
  1479  				"dropped-label":   "dropped-value",
  1480  				"modified-label":  "modified-value",
  1481  			},
  1482  			Annotations: map[string]string{
  1483  				"preserved-annotation": "preserved-value",
  1484  				"dropped-annotation":   "dropped-value",
  1485  				"modified-annotation":  "modified-value",
  1486  			},
  1487  		},
  1488  		Spec: *bootstrapSpec,
  1489  	}
  1490  	bootstrapRef := &corev1.ObjectReference{
  1491  		Kind:       "KubeadmConfig",
  1492  		Namespace:  namespace.Name,
  1493  		Name:       "existing-kubeadmconfig",
  1494  		APIVersion: bootstrapv1.GroupVersion.String(),
  1495  	}
  1496  	// Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0.
  1497  	g.Expect(env.Create(ctx, existingKubeadmConfig, client.FieldOwner("manager"))).To(Succeed())
  1498  
  1499  	// Existing Machine to validate in-place mutation
  1500  	fd := pointer.String("fd1")
  1501  	inPlaceMutatingMachine := &clusterv1.Machine{
  1502  		TypeMeta: metav1.TypeMeta{
  1503  			Kind:       "Machine",
  1504  			APIVersion: clusterv1.GroupVersion.String(),
  1505  		},
  1506  		ObjectMeta: metav1.ObjectMeta{
  1507  			Name:      "existing-machine",
  1508  			Namespace: namespace.Name,
  1509  			Labels: map[string]string{
  1510  				"preserved-label": "preserved-value",
  1511  				"dropped-label":   "dropped-value",
  1512  				"modified-label":  "modified-value",
  1513  			},
  1514  			Annotations: map[string]string{
  1515  				"preserved-annotation": "preserved-value",
  1516  				"dropped-annotation":   "dropped-value",
  1517  				"modified-annotation":  "modified-value",
  1518  			},
  1519  		},
  1520  		Spec: clusterv1.MachineSpec{
  1521  			ClusterName: testCluster.Name,
  1522  			Bootstrap: clusterv1.Bootstrap{
  1523  				ConfigRef: bootstrapRef,
  1524  			},
  1525  			InfrastructureRef:       *infraMachineRef,
  1526  			Version:                 pointer.String("v1.25.3"),
  1527  			FailureDomain:           fd,
  1528  			ProviderID:              pointer.String("provider-id"),
  1529  			NodeDrainTimeout:        duration5s,
  1530  			NodeVolumeDetachTimeout: duration5s,
  1531  			NodeDeletionTimeout:     duration5s,
  1532  		},
  1533  	}
  1534  	// Note: use "manager" as the field owner to mimic the manager used before ClusterAPI v1.4.0.
  1535  	g.Expect(env.Create(ctx, inPlaceMutatingMachine, client.FieldOwner("manager"))).To(Succeed())
  1536  
  1537  	// Existing machine that is in deleting state
  1538  	deletingMachine := &clusterv1.Machine{
  1539  		TypeMeta: metav1.TypeMeta{
  1540  			APIVersion: clusterv1.GroupVersion.String(),
  1541  			Kind:       "Machine",
  1542  		},
  1543  		ObjectMeta: metav1.ObjectMeta{
  1544  			Name:        "deleting-machine",
  1545  			Namespace:   namespace.Name,
  1546  			Labels:      map[string]string{},
  1547  			Annotations: map[string]string{},
  1548  			Finalizers:  []string{"testing-finalizer"},
  1549  		},
  1550  		Spec: clusterv1.MachineSpec{
  1551  			ClusterName: testCluster.Name,
  1552  			InfrastructureRef: corev1.ObjectReference{
  1553  				Namespace: namespace.Name,
  1554  			},
  1555  			Bootstrap: clusterv1.Bootstrap{
  1556  				DataSecretName: pointer.String("machine-bootstrap-secret"),
  1557  			},
  1558  		},
  1559  	}
  1560  	g.Expect(env.Create(ctx, deletingMachine, client.FieldOwner(classicManager))).To(Succeed())
  1561  	// Delete the machine to put it in the deleting state
  1562  	g.Expect(env.Delete(ctx, deletingMachine)).To(Succeed())
  1563  	// Wait till the machine is marked for deletion
  1564  	g.Eventually(func() bool {
  1565  		if err := env.Get(ctx, client.ObjectKeyFromObject(deletingMachine), deletingMachine); err != nil {
  1566  			return false
  1567  		}
  1568  		return !deletingMachine.DeletionTimestamp.IsZero()
  1569  	}, 30*time.Second).Should(BeTrue())
  1570  
  1571  	// Existing machine that has a InfrastructureRef which does not exist.
  1572  	nilInfraMachineMachine := &clusterv1.Machine{
  1573  		TypeMeta: metav1.TypeMeta{
  1574  			APIVersion: clusterv1.GroupVersion.String(),
  1575  			Kind:       "Machine",
  1576  		},
  1577  		ObjectMeta: metav1.ObjectMeta{
  1578  			Name:        "nil-infra-machine-machine",
  1579  			Namespace:   namespace.Name,
  1580  			Labels:      map[string]string{},
  1581  			Annotations: map[string]string{},
  1582  			Finalizers:  []string{"testing-finalizer"},
  1583  		},
  1584  		Spec: clusterv1.MachineSpec{
  1585  			ClusterName: testCluster.Name,
  1586  			InfrastructureRef: corev1.ObjectReference{
  1587  				Namespace: namespace.Name,
  1588  			},
  1589  			Bootstrap: clusterv1.Bootstrap{
  1590  				DataSecretName: pointer.String("machine-bootstrap-secret"),
  1591  			},
  1592  		},
  1593  	}
  1594  	g.Expect(env.Create(ctx, nilInfraMachineMachine, client.FieldOwner(classicManager))).To(Succeed())
  1595  	// Delete the machine to put it in the deleting state
  1596  
  1597  	kcp := &controlplanev1.KubeadmControlPlane{
  1598  		TypeMeta: metav1.TypeMeta{
  1599  			Kind:       "KubeadmControlPlane",
  1600  			APIVersion: controlplanev1.GroupVersion.String(),
  1601  		},
  1602  		ObjectMeta: metav1.ObjectMeta{
  1603  			UID:       types.UID("abc-123-control-plane"),
  1604  			Name:      "existing-kcp",
  1605  			Namespace: namespace.Name,
  1606  		},
  1607  		Spec: controlplanev1.KubeadmControlPlaneSpec{
  1608  			Version:           "v1.26.1",
  1609  			KubeadmConfigSpec: *bootstrapSpec,
  1610  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
  1611  				ObjectMeta: clusterv1.ObjectMeta{
  1612  					Labels: map[string]string{
  1613  						"preserved-label": "preserved-value", // Label will be preserved while testing in-place mutation.
  1614  						"dropped-label":   "dropped-value",   // Label will be dropped while testing in-place mutation.
  1615  						"modified-label":  "modified-value",  // Label value will be modified while testing in-place mutation.
  1616  					},
  1617  					Annotations: map[string]string{
  1618  						"preserved-annotation": "preserved-value", // Annotation will be preserved while testing in-place mutation.
  1619  						"dropped-annotation":   "dropped-value",   // Annotation will be dropped while testing in-place mutation.
  1620  						"modified-annotation":  "modified-value",  // Annotation value will be modified while testing in-place mutation.
  1621  					},
  1622  				},
  1623  				InfrastructureRef: corev1.ObjectReference{
  1624  					Kind:       "GenericInfrastructureMachineTemplate",
  1625  					Namespace:  namespace.Name,
  1626  					Name:       "infra-foo",
  1627  					APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  1628  				},
  1629  				NodeDrainTimeout:        duration5s,
  1630  				NodeVolumeDetachTimeout: duration5s,
  1631  				NodeDeletionTimeout:     duration5s,
  1632  			},
  1633  		},
  1634  	}
  1635  
  1636  	controlPlane := &internal.ControlPlane{
  1637  		KCP:     kcp,
  1638  		Cluster: testCluster,
  1639  		Machines: collections.Machines{
  1640  			inPlaceMutatingMachine.Name: inPlaceMutatingMachine,
  1641  			deletingMachine.Name:        deletingMachine,
  1642  			nilInfraMachineMachine.Name: nilInfraMachineMachine,
  1643  		},
  1644  		KubeadmConfigs: map[string]*bootstrapv1.KubeadmConfig{
  1645  			inPlaceMutatingMachine.Name: existingKubeadmConfig,
  1646  			deletingMachine.Name:        nil,
  1647  		},
  1648  		InfraResources: map[string]*unstructured.Unstructured{
  1649  			inPlaceMutatingMachine.Name: existingInfraMachine,
  1650  			deletingMachine.Name:        nil,
  1651  		},
  1652  	}
  1653  
  1654  	//
  1655  	// Verify Managed Fields
  1656  	//
  1657  
  1658  	// Run syncMachines to clean up managed fields and have proper field ownership
  1659  	// for Machines, InfrastructureMachines and KubeadmConfigs.
  1660  	reconciler := &KubeadmControlPlaneReconciler{
  1661  		Client:              env,
  1662  		SecretCachingClient: secretCachingClient,
  1663  		ssaCache:            ssa.NewCache(),
  1664  	}
  1665  	g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed())
  1666  
  1667  	// The inPlaceMutatingMachine should have cleaned up managed fields.
  1668  	updatedInplaceMutatingMachine := inPlaceMutatingMachine.DeepCopy()
  1669  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed())
  1670  	// Verify ManagedFields
  1671  	g.Expect(updatedInplaceMutatingMachine.ManagedFields).Should(
  1672  		ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)),
  1673  		"in-place mutable machine should contain an entry for SSA manager",
  1674  	)
  1675  	g.Expect(updatedInplaceMutatingMachine.ManagedFields).ShouldNot(
  1676  		ContainElement(ssa.MatchManagedFieldsEntry(classicManager, metav1.ManagedFieldsOperationUpdate)),
  1677  		"in-place mutable machine should not contain an entry for old manager",
  1678  	)
  1679  
  1680  	// The InfrastructureMachine should have ownership of "labels" and "annotations" transferred to
  1681  	// "capi-kubeadmcontrolplane" manager.
  1682  	updatedInfraMachine := existingInfraMachine.DeepCopy()
  1683  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed())
  1684  
  1685  	// Verify ManagedFields
  1686  	g.Expect(updatedInfraMachine.GetManagedFields()).Should(
  1687  		ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"}))
  1688  	g.Expect(updatedInfraMachine.GetManagedFields()).Should(
  1689  		ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"}))
  1690  	g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot(
  1691  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"}))
  1692  	g.Expect(updatedInfraMachine.GetManagedFields()).ShouldNot(
  1693  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"}))
  1694  	// Verify ownership of other fields is not changed.
  1695  	g.Expect(updatedInfraMachine.GetManagedFields()).Should(
  1696  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"}))
  1697  
  1698  	// The KubeadmConfig should have ownership of "labels" and "annotations" transferred to
  1699  	// "capi-kubeadmcontrolplane" manager.
  1700  	updatedKubeadmConfig := existingKubeadmConfig.DeepCopy()
  1701  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed())
  1702  
  1703  	// Verify ManagedFields
  1704  	g.Expect(updatedKubeadmConfig.GetManagedFields()).Should(
  1705  		ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:labels"}))
  1706  	g.Expect(updatedKubeadmConfig.GetManagedFields()).Should(
  1707  		ssa.MatchFieldOwnership(kcpManagerName, metav1.ManagedFieldsOperationApply, contract.Path{"f:metadata", "f:annotations"}))
  1708  	g.Expect(updatedKubeadmConfig.GetManagedFields()).ShouldNot(
  1709  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:labels"}))
  1710  	g.Expect(updatedKubeadmConfig.GetManagedFields()).ShouldNot(
  1711  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:metadata", "f:annotations"}))
  1712  	// Verify ownership of other fields is not changed.
  1713  	g.Expect(updatedKubeadmConfig.GetManagedFields()).Should(
  1714  		ssa.MatchFieldOwnership(classicManager, metav1.ManagedFieldsOperationUpdate, contract.Path{"f:spec"}))
  1715  
  1716  	//
  1717  	// Verify In-place mutating fields
  1718  	//
  1719  
  1720  	// Update KCP and verify the in-place mutating fields are propagated.
  1721  	kcp.Spec.MachineTemplate.ObjectMeta.Labels = map[string]string{
  1722  		"preserved-label": "preserved-value",  // Keep the label and value as is
  1723  		"modified-label":  "modified-value-2", // Modify the value of the label
  1724  		// Drop "dropped-label"
  1725  	}
  1726  	expectedLabels := map[string]string{
  1727  		"preserved-label":                      "preserved-value",
  1728  		"modified-label":                       "modified-value-2",
  1729  		clusterv1.ClusterNameLabel:             testCluster.Name,
  1730  		clusterv1.MachineControlPlaneLabel:     "",
  1731  		clusterv1.MachineControlPlaneNameLabel: kcp.Name,
  1732  	}
  1733  	kcp.Spec.MachineTemplate.ObjectMeta.Annotations = map[string]string{
  1734  		"preserved-annotation": "preserved-value",  // Keep the annotation and value as is
  1735  		"modified-annotation":  "modified-value-2", // Modify the value of the annotation
  1736  		// Drop "dropped-annotation"
  1737  	}
  1738  	kcp.Spec.MachineTemplate.NodeDrainTimeout = duration10s
  1739  	kcp.Spec.MachineTemplate.NodeDeletionTimeout = duration10s
  1740  	kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout = duration10s
  1741  
  1742  	// Use the updated KCP.
  1743  	controlPlane.KCP = kcp
  1744  	g.Expect(reconciler.syncMachines(ctx, controlPlane)).To(Succeed())
  1745  
  1746  	// Verify in-place mutable fields are updated on the Machine.
  1747  	updatedInplaceMutatingMachine = inPlaceMutatingMachine.DeepCopy()
  1748  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInplaceMutatingMachine), updatedInplaceMutatingMachine)).To(Succeed())
  1749  	// Verify Labels
  1750  	g.Expect(updatedInplaceMutatingMachine.Labels).Should(Equal(expectedLabels))
  1751  	// Verify Annotations
  1752  	g.Expect(updatedInplaceMutatingMachine.Annotations).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations))
  1753  	// Verify Node timeout values
  1754  	g.Expect(updatedInplaceMutatingMachine.Spec.NodeDrainTimeout).Should(And(
  1755  		Not(BeNil()),
  1756  		HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDrainTimeout)),
  1757  	))
  1758  	g.Expect(updatedInplaceMutatingMachine.Spec.NodeDeletionTimeout).Should(And(
  1759  		Not(BeNil()),
  1760  		HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeDeletionTimeout)),
  1761  	))
  1762  	g.Expect(updatedInplaceMutatingMachine.Spec.NodeVolumeDetachTimeout).Should(And(
  1763  		Not(BeNil()),
  1764  		HaveValue(BeComparableTo(*kcp.Spec.MachineTemplate.NodeVolumeDetachTimeout)),
  1765  	))
  1766  	// Verify that the non in-place mutating fields remain the same.
  1767  	g.Expect(updatedInplaceMutatingMachine.Spec.FailureDomain).Should(Equal(inPlaceMutatingMachine.Spec.FailureDomain))
  1768  	g.Expect(updatedInplaceMutatingMachine.Spec.ProviderID).Should(Equal(inPlaceMutatingMachine.Spec.ProviderID))
  1769  	g.Expect(updatedInplaceMutatingMachine.Spec.Version).Should(Equal(inPlaceMutatingMachine.Spec.Version))
  1770  	g.Expect(updatedInplaceMutatingMachine.Spec.InfrastructureRef).Should(BeComparableTo(inPlaceMutatingMachine.Spec.InfrastructureRef))
  1771  	g.Expect(updatedInplaceMutatingMachine.Spec.Bootstrap).Should(BeComparableTo(inPlaceMutatingMachine.Spec.Bootstrap))
  1772  
  1773  	// Verify in-place mutable fields are updated on InfrastructureMachine
  1774  	updatedInfraMachine = existingInfraMachine.DeepCopy()
  1775  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedInfraMachine), updatedInfraMachine)).To(Succeed())
  1776  	// Verify Labels
  1777  	g.Expect(updatedInfraMachine.GetLabels()).Should(Equal(expectedLabels))
  1778  	// Verify Annotations
  1779  	g.Expect(updatedInfraMachine.GetAnnotations()).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations))
  1780  	// Verify spec remains the same
  1781  	g.Expect(updatedInfraMachine.Object).Should(HaveKeyWithValue("spec", infraMachineSpec))
  1782  
  1783  	// Verify in-place mutable fields are updated on the KubeadmConfig.
  1784  	updatedKubeadmConfig = existingKubeadmConfig.DeepCopy()
  1785  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedKubeadmConfig), updatedKubeadmConfig)).To(Succeed())
  1786  	// Verify Labels
  1787  	g.Expect(updatedKubeadmConfig.GetLabels()).Should(Equal(expectedLabels))
  1788  	// Verify Annotations
  1789  	g.Expect(updatedKubeadmConfig.GetAnnotations()).Should(Equal(kcp.Spec.MachineTemplate.ObjectMeta.Annotations))
  1790  	// Verify spec remains the same
  1791  	g.Expect(updatedKubeadmConfig.Spec).Should(BeComparableTo(existingKubeadmConfig.Spec))
  1792  
  1793  	// The deleting machine should not change.
  1794  	updatedDeletingMachine := deletingMachine.DeepCopy()
  1795  	g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(updatedDeletingMachine), updatedDeletingMachine)).To(Succeed())
  1796  
  1797  	// Verify ManagedFields
  1798  	g.Expect(updatedDeletingMachine.ManagedFields).ShouldNot(
  1799  		ContainElement(ssa.MatchManagedFieldsEntry(kcpManagerName, metav1.ManagedFieldsOperationApply)),
  1800  		"deleting machine should not contain an entry for SSA manager",
  1801  	)
  1802  	g.Expect(updatedDeletingMachine.ManagedFields).Should(
  1803  		ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate)),
  1804  		"in-place mutable machine should still contain an entry for old manager",
  1805  	)
  1806  
  1807  	// Verify the machine labels and annotations are unchanged.
  1808  	g.Expect(updatedDeletingMachine.Labels).Should(Equal(deletingMachine.Labels))
  1809  	g.Expect(updatedDeletingMachine.Annotations).Should(Equal(deletingMachine.Annotations))
  1810  	// Verify the machine spec is unchanged.
  1811  	g.Expect(updatedDeletingMachine.Spec).Should(BeComparableTo(deletingMachine.Spec))
  1812  }
  1813  
  1814  func TestKubeadmControlPlaneReconciler_updateCoreDNS(t *testing.T) {
  1815  	// TODO: (wfernandes) This test could use some refactor love.
  1816  
  1817  	cluster := newCluster(&types.NamespacedName{Name: "foo", Namespace: metav1.NamespaceDefault})
  1818  	kcp := &controlplanev1.KubeadmControlPlane{
  1819  		ObjectMeta: metav1.ObjectMeta{
  1820  			Namespace: cluster.Namespace,
  1821  			Name:      "foo",
  1822  		},
  1823  		Spec: controlplanev1.KubeadmControlPlaneSpec{
  1824  			Replicas: nil,
  1825  			Version:  "v1.16.6",
  1826  			KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
  1827  				ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
  1828  					DNS: bootstrapv1.DNS{
  1829  						ImageMeta: bootstrapv1.ImageMeta{
  1830  							ImageRepository: "registry.k8s.io",
  1831  							ImageTag:        "1.7.2",
  1832  						},
  1833  					},
  1834  				},
  1835  			},
  1836  		},
  1837  	}
  1838  	depl := &appsv1.Deployment{
  1839  		ObjectMeta: metav1.ObjectMeta{
  1840  			Name:      "coredns",
  1841  			Namespace: metav1.NamespaceSystem,
  1842  		},
  1843  		Spec: appsv1.DeploymentSpec{
  1844  			Template: corev1.PodTemplateSpec{
  1845  				ObjectMeta: metav1.ObjectMeta{
  1846  					Name: "coredns",
  1847  				},
  1848  				Spec: corev1.PodSpec{
  1849  					Containers: []corev1.Container{{
  1850  						Name:  "coredns",
  1851  						Image: "registry.k8s.io/coredns:1.6.2",
  1852  					}},
  1853  					Volumes: []corev1.Volume{{
  1854  						Name: "config-volume",
  1855  						VolumeSource: corev1.VolumeSource{
  1856  							ConfigMap: &corev1.ConfigMapVolumeSource{
  1857  								LocalObjectReference: corev1.LocalObjectReference{
  1858  									Name: "coredns",
  1859  								},
  1860  								Items: []corev1.KeyToPath{{
  1861  									Key:  "Corefile",
  1862  									Path: "Corefile",
  1863  								}},
  1864  							},
  1865  						},
  1866  					}},
  1867  				},
  1868  			},
  1869  		},
  1870  	}
  1871  	originalCorefile := "original core file"
  1872  	corednsCM := &corev1.ConfigMap{
  1873  		ObjectMeta: metav1.ObjectMeta{
  1874  			Name:      "coredns",
  1875  			Namespace: metav1.NamespaceSystem,
  1876  		},
  1877  		Data: map[string]string{
  1878  			"Corefile": originalCorefile,
  1879  		},
  1880  	}
  1881  
  1882  	kubeadmCM := &corev1.ConfigMap{
  1883  		ObjectMeta: metav1.ObjectMeta{
  1884  			Name:      "kubeadm-config",
  1885  			Namespace: metav1.NamespaceSystem,
  1886  		},
  1887  		Data: map[string]string{
  1888  			"ClusterConfiguration": `apiServer:
  1889  dns:
  1890    type: CoreDNS
  1891  imageRepository: registry.k8s.io
  1892  kind: ClusterConfiguration
  1893  kubernetesVersion: metav1.16.1`,
  1894  		},
  1895  	}
  1896  
  1897  	t.Run("updates configmaps and deployments successfully", func(t *testing.T) {
  1898  		t.Skip("Updating the corefile, after updating controller runtime somehow makes this test fail in a conflict, needs investigation")
  1899  
  1900  		g := NewWithT(t)
  1901  		objs := []client.Object{
  1902  			cluster.DeepCopy(),
  1903  			kcp.DeepCopy(),
  1904  			depl.DeepCopy(),
  1905  			corednsCM.DeepCopy(),
  1906  			kubeadmCM.DeepCopy(),
  1907  		}
  1908  		fakeClient := newFakeClient(objs...)
  1909  		log.SetLogger(klogr.New())
  1910  
  1911  		workloadCluster := &fakeWorkloadCluster{
  1912  			Workload: &internal.Workload{
  1913  				Client: fakeClient,
  1914  				CoreDNSMigrator: &fakeMigrator{
  1915  					migratedCorefile: "new core file",
  1916  				},
  1917  			},
  1918  		}
  1919  
  1920  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  1921  
  1922  		var actualCoreDNSCM corev1.ConfigMap
  1923  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSCM)).To(Succeed())
  1924  		g.Expect(actualCoreDNSCM.Data).To(HaveLen(2))
  1925  		g.Expect(actualCoreDNSCM.Data).To(HaveKeyWithValue("Corefile", "new core file"))
  1926  		g.Expect(actualCoreDNSCM.Data).To(HaveKeyWithValue("Corefile-backup", originalCorefile))
  1927  
  1928  		var actualKubeadmConfig corev1.ConfigMap
  1929  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "kubeadm-config", Namespace: metav1.NamespaceSystem}, &actualKubeadmConfig)).To(Succeed())
  1930  		g.Expect(actualKubeadmConfig.Data).To(HaveKey("ClusterConfiguration"))
  1931  		g.Expect(actualKubeadmConfig.Data["ClusterConfiguration"]).To(ContainSubstring("1.7.2"))
  1932  
  1933  		expectedVolume := corev1.Volume{
  1934  			Name: "config-volume",
  1935  			VolumeSource: corev1.VolumeSource{
  1936  				ConfigMap: &corev1.ConfigMapVolumeSource{
  1937  					LocalObjectReference: corev1.LocalObjectReference{
  1938  						Name: "coredns",
  1939  					},
  1940  					Items: []corev1.KeyToPath{{
  1941  						Key:  "Corefile",
  1942  						Path: "Corefile",
  1943  					}},
  1944  				},
  1945  			},
  1946  		}
  1947  		var actualCoreDNSDeployment appsv1.Deployment
  1948  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSDeployment)).To(Succeed())
  1949  		g.Expect(actualCoreDNSDeployment.Spec.Template.Spec.Containers[0].Image).To(Equal("registry.k8s.io/coredns:1.7.2"))
  1950  		g.Expect(actualCoreDNSDeployment.Spec.Template.Spec.Volumes).To(ConsistOf(expectedVolume))
  1951  	})
  1952  
  1953  	t.Run("returns no error when no ClusterConfiguration is specified", func(t *testing.T) {
  1954  		g := NewWithT(t)
  1955  		kcp := kcp.DeepCopy()
  1956  		kcp.Spec.KubeadmConfigSpec.ClusterConfiguration = nil
  1957  
  1958  		objs := []client.Object{
  1959  			cluster.DeepCopy(),
  1960  			kcp,
  1961  			depl.DeepCopy(),
  1962  			corednsCM.DeepCopy(),
  1963  			kubeadmCM.DeepCopy(),
  1964  		}
  1965  
  1966  		fakeClient := newFakeClient(objs...)
  1967  		log.SetLogger(klogr.New())
  1968  
  1969  		workloadCluster := fakeWorkloadCluster{
  1970  			Workload: &internal.Workload{
  1971  				Client: fakeClient,
  1972  				CoreDNSMigrator: &fakeMigrator{
  1973  					migratedCorefile: "new core file",
  1974  				},
  1975  			},
  1976  		}
  1977  
  1978  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  1979  	})
  1980  
  1981  	t.Run("should not return an error when there is no CoreDNS configmap", func(t *testing.T) {
  1982  		g := NewWithT(t)
  1983  		objs := []client.Object{
  1984  			cluster.DeepCopy(),
  1985  			kcp.DeepCopy(),
  1986  			depl.DeepCopy(),
  1987  			kubeadmCM.DeepCopy(),
  1988  		}
  1989  
  1990  		fakeClient := newFakeClient(objs...)
  1991  		workloadCluster := fakeWorkloadCluster{
  1992  			Workload: &internal.Workload{
  1993  				Client: fakeClient,
  1994  				CoreDNSMigrator: &fakeMigrator{
  1995  					migratedCorefile: "new core file",
  1996  				},
  1997  			},
  1998  		}
  1999  
  2000  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  2001  	})
  2002  
  2003  	t.Run("should not return an error when there is no CoreDNS deployment", func(t *testing.T) {
  2004  		g := NewWithT(t)
  2005  		objs := []client.Object{
  2006  			cluster.DeepCopy(),
  2007  			kcp.DeepCopy(),
  2008  			corednsCM.DeepCopy(),
  2009  			kubeadmCM.DeepCopy(),
  2010  		}
  2011  
  2012  		fakeClient := newFakeClient(objs...)
  2013  		log.SetLogger(klogr.New())
  2014  
  2015  		workloadCluster := fakeWorkloadCluster{
  2016  			Workload: &internal.Workload{
  2017  				Client: fakeClient,
  2018  				CoreDNSMigrator: &fakeMigrator{
  2019  					migratedCorefile: "new core file",
  2020  				},
  2021  			},
  2022  		}
  2023  
  2024  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  2025  	})
  2026  
  2027  	t.Run("should not return an error when no DNS upgrade is requested", func(t *testing.T) {
  2028  		g := NewWithT(t)
  2029  		objs := []client.Object{
  2030  			cluster.DeepCopy(),
  2031  			corednsCM.DeepCopy(),
  2032  			kubeadmCM.DeepCopy(),
  2033  		}
  2034  		kcp := kcp.DeepCopy()
  2035  		kcp.Annotations = map[string]string{controlplanev1.SkipCoreDNSAnnotation: ""}
  2036  
  2037  		depl := depl.DeepCopy()
  2038  
  2039  		depl.Spec.Template.Spec.Containers[0].Image = "my-cool-image!!!!" // something very unlikely for getCoreDNSInfo to parse
  2040  		objs = append(objs, depl)
  2041  
  2042  		fakeClient := newFakeClient(objs...)
  2043  		workloadCluster := fakeWorkloadCluster{
  2044  			Workload: &internal.Workload{
  2045  				Client: fakeClient,
  2046  			},
  2047  		}
  2048  
  2049  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).To(Succeed())
  2050  
  2051  		var actualCoreDNSCM corev1.ConfigMap
  2052  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSCM)).To(Succeed())
  2053  		g.Expect(actualCoreDNSCM.Data).To(Equal(corednsCM.Data))
  2054  
  2055  		var actualKubeadmConfig corev1.ConfigMap
  2056  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "kubeadm-config", Namespace: metav1.NamespaceSystem}, &actualKubeadmConfig)).To(Succeed())
  2057  		g.Expect(actualKubeadmConfig.Data).To(Equal(kubeadmCM.Data))
  2058  
  2059  		var actualCoreDNSDeployment appsv1.Deployment
  2060  		g.Expect(fakeClient.Get(ctx, client.ObjectKey{Name: "coredns", Namespace: metav1.NamespaceSystem}, &actualCoreDNSDeployment)).To(Succeed())
  2061  		g.Expect(actualCoreDNSDeployment.Spec.Template.Spec.Containers[0].Image).ToNot(ContainSubstring("coredns"))
  2062  	})
  2063  
  2064  	t.Run("returns error when unable to UpdateCoreDNS", func(t *testing.T) {
  2065  		g := NewWithT(t)
  2066  		objs := []client.Object{
  2067  			cluster.DeepCopy(),
  2068  			kcp.DeepCopy(),
  2069  			depl.DeepCopy(),
  2070  			corednsCM.DeepCopy(),
  2071  		}
  2072  
  2073  		fakeClient := newFakeClient(objs...)
  2074  		log.SetLogger(klogr.New())
  2075  
  2076  		workloadCluster := fakeWorkloadCluster{
  2077  			Workload: &internal.Workload{
  2078  				Client: fakeClient,
  2079  				CoreDNSMigrator: &fakeMigrator{
  2080  					migratedCorefile: "new core file",
  2081  				},
  2082  			},
  2083  		}
  2084  
  2085  		g.Expect(workloadCluster.UpdateCoreDNS(ctx, kcp, semver.MustParse("1.19.1"))).ToNot(Succeed())
  2086  	})
  2087  }
  2088  
  2089  func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) {
  2090  	t.Run("removes all control plane Machines", func(t *testing.T) {
  2091  		g := NewWithT(t)
  2092  
  2093  		cluster, kcp, _ := createClusterWithControlPlane(metav1.NamespaceDefault)
  2094  		controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
  2095  		initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy()}
  2096  
  2097  		machines := collections.New()
  2098  		for i := 0; i < 3; i++ {
  2099  			m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
  2100  			initObjs = append(initObjs, m)
  2101  			machines.Insert(m)
  2102  		}
  2103  
  2104  		fakeClient := newFakeClient(initObjs...)
  2105  
  2106  		r := &KubeadmControlPlaneReconciler{
  2107  			Client:              fakeClient,
  2108  			SecretCachingClient: fakeClient,
  2109  			managementCluster: &fakeManagementCluster{
  2110  				Management: &internal.Management{Client: fakeClient},
  2111  				Workload:   fakeWorkloadCluster{},
  2112  			},
  2113  
  2114  			recorder: record.NewFakeRecorder(32),
  2115  		}
  2116  
  2117  		controlPlane := &internal.ControlPlane{
  2118  			KCP:      kcp,
  2119  			Cluster:  cluster,
  2120  			Machines: machines,
  2121  		}
  2122  
  2123  		result, err := r.reconcileDelete(ctx, controlPlane)
  2124  		g.Expect(result).To(Equal(ctrl.Result{RequeueAfter: deleteRequeueAfter}))
  2125  		g.Expect(err).ToNot(HaveOccurred())
  2126  		g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  2127  
  2128  		controlPlaneMachines := clusterv1.MachineList{}
  2129  		g.Expect(fakeClient.List(ctx, &controlPlaneMachines)).To(Succeed())
  2130  		g.Expect(controlPlaneMachines.Items).To(BeEmpty())
  2131  
  2132  		controlPlane = &internal.ControlPlane{
  2133  			KCP:     kcp,
  2134  			Cluster: cluster,
  2135  		}
  2136  
  2137  		result, err = r.reconcileDelete(ctx, controlPlane)
  2138  		g.Expect(result).To(BeComparableTo(ctrl.Result{}))
  2139  		g.Expect(err).ToNot(HaveOccurred())
  2140  		g.Expect(kcp.Finalizers).To(BeEmpty())
  2141  	})
  2142  
  2143  	t.Run("does not remove any control plane Machines if other Machines exist", func(t *testing.T) {
  2144  		g := NewWithT(t)
  2145  
  2146  		cluster, kcp, _ := createClusterWithControlPlane(metav1.NamespaceDefault)
  2147  		controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
  2148  
  2149  		workerMachine := &clusterv1.Machine{
  2150  			ObjectMeta: metav1.ObjectMeta{
  2151  				Name:      "worker",
  2152  				Namespace: cluster.Namespace,
  2153  				Labels: map[string]string{
  2154  					clusterv1.ClusterNameLabel: cluster.Name,
  2155  				},
  2156  			},
  2157  		}
  2158  
  2159  		initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachine.DeepCopy()}
  2160  
  2161  		machines := collections.New()
  2162  		for i := 0; i < 3; i++ {
  2163  			m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
  2164  			initObjs = append(initObjs, m)
  2165  			machines.Insert(m)
  2166  		}
  2167  
  2168  		fakeClient := newFakeClient(initObjs...)
  2169  
  2170  		r := &KubeadmControlPlaneReconciler{
  2171  			Client:              fakeClient,
  2172  			SecretCachingClient: fakeClient,
  2173  			managementCluster: &fakeManagementCluster{
  2174  				Management: &internal.Management{Client: fakeClient},
  2175  				Workload:   fakeWorkloadCluster{},
  2176  			},
  2177  			recorder: record.NewFakeRecorder(32),
  2178  		}
  2179  
  2180  		controlPlane := &internal.ControlPlane{
  2181  			KCP:      kcp,
  2182  			Cluster:  cluster,
  2183  			Machines: machines,
  2184  		}
  2185  
  2186  		result, err := r.reconcileDelete(ctx, controlPlane)
  2187  		g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: deleteRequeueAfter}))
  2188  		g.Expect(err).ToNot(HaveOccurred())
  2189  
  2190  		g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  2191  
  2192  		controlPlaneMachines := clusterv1.MachineList{}
  2193  		labels := map[string]string{
  2194  			clusterv1.MachineControlPlaneLabel: "",
  2195  		}
  2196  		g.Expect(fakeClient.List(ctx, &controlPlaneMachines, client.MatchingLabels(labels))).To(Succeed())
  2197  		g.Expect(controlPlaneMachines.Items).To(HaveLen(3))
  2198  	})
  2199  
  2200  	t.Run("does not remove any control plane Machines if MachinePools exist", func(t *testing.T) {
  2201  		_ = feature.MutableGates.Set("MachinePool=true")
  2202  		g := NewWithT(t)
  2203  
  2204  		cluster, kcp, _ := createClusterWithControlPlane(metav1.NamespaceDefault)
  2205  		controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
  2206  
  2207  		workerMachinePool := &expv1.MachinePool{
  2208  			ObjectMeta: metav1.ObjectMeta{
  2209  				Name:      "worker",
  2210  				Namespace: cluster.Namespace,
  2211  				Labels: map[string]string{
  2212  					clusterv1.ClusterNameLabel: cluster.Name,
  2213  				},
  2214  			},
  2215  		}
  2216  
  2217  		initObjs := []client.Object{cluster.DeepCopy(), kcp.DeepCopy(), workerMachinePool.DeepCopy()}
  2218  
  2219  		machines := collections.New()
  2220  		for i := 0; i < 3; i++ {
  2221  			m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true)
  2222  			initObjs = append(initObjs, m)
  2223  			machines.Insert(m)
  2224  		}
  2225  
  2226  		fakeClient := newFakeClient(initObjs...)
  2227  
  2228  		r := &KubeadmControlPlaneReconciler{
  2229  			Client:              fakeClient,
  2230  			SecretCachingClient: fakeClient,
  2231  			managementCluster: &fakeManagementCluster{
  2232  				Management: &internal.Management{Client: fakeClient},
  2233  				Workload:   fakeWorkloadCluster{},
  2234  			},
  2235  			recorder: record.NewFakeRecorder(32),
  2236  		}
  2237  
  2238  		controlPlane := &internal.ControlPlane{
  2239  			KCP:      kcp,
  2240  			Cluster:  cluster,
  2241  			Machines: machines,
  2242  		}
  2243  
  2244  		result, err := r.reconcileDelete(ctx, controlPlane)
  2245  		g.Expect(result).To(BeComparableTo(ctrl.Result{RequeueAfter: deleteRequeueAfter}))
  2246  		g.Expect(err).ToNot(HaveOccurred())
  2247  
  2248  		g.Expect(kcp.Finalizers).To(ContainElement(controlplanev1.KubeadmControlPlaneFinalizer))
  2249  
  2250  		controlPlaneMachines := clusterv1.MachineList{}
  2251  		labels := map[string]string{
  2252  			clusterv1.MachineControlPlaneLabel: "",
  2253  		}
  2254  		g.Expect(fakeClient.List(ctx, &controlPlaneMachines, client.MatchingLabels(labels))).To(Succeed())
  2255  		g.Expect(controlPlaneMachines.Items).To(HaveLen(3))
  2256  	})
  2257  
  2258  	t.Run("removes the finalizer if no control plane Machines exist", func(t *testing.T) {
  2259  		g := NewWithT(t)
  2260  
  2261  		cluster, kcp, _ := createClusterWithControlPlane(metav1.NamespaceDefault)
  2262  		controllerutil.AddFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
  2263  
  2264  		fakeClient := newFakeClient(cluster.DeepCopy(), kcp.DeepCopy())
  2265  
  2266  		r := &KubeadmControlPlaneReconciler{
  2267  			Client:              fakeClient,
  2268  			SecretCachingClient: fakeClient,
  2269  			managementCluster: &fakeManagementCluster{
  2270  				Management: &internal.Management{Client: fakeClient},
  2271  				Workload:   fakeWorkloadCluster{},
  2272  			},
  2273  			recorder: record.NewFakeRecorder(32),
  2274  		}
  2275  
  2276  		controlPlane := &internal.ControlPlane{
  2277  			KCP:     kcp,
  2278  			Cluster: cluster,
  2279  		}
  2280  
  2281  		result, err := r.reconcileDelete(ctx, controlPlane)
  2282  		g.Expect(result).To(BeComparableTo(ctrl.Result{}))
  2283  		g.Expect(err).ToNot(HaveOccurred())
  2284  		g.Expect(kcp.Finalizers).To(BeEmpty())
  2285  	})
  2286  }
  2287  
  2288  // test utils.
  2289  
  2290  func newFakeClient(initObjs ...client.Object) client.Client {
  2291  	return &fakeClient{
  2292  		startTime: time.Now(),
  2293  		Client:    fake.NewClientBuilder().WithObjects(initObjs...).WithStatusSubresource(&controlplanev1.KubeadmControlPlane{}).Build(),
  2294  	}
  2295  }
  2296  
  2297  type fakeClient struct {
  2298  	startTime time.Time
  2299  	mux       sync.Mutex
  2300  	client.Client
  2301  }
  2302  
  2303  type fakeClientI interface {
  2304  	SetCreationTimestamp(timestamp metav1.Time)
  2305  }
  2306  
  2307  // controller-runtime's fake client doesn't set a CreationTimestamp
  2308  // this sets one that increments by a minute for each object created.
  2309  func (c *fakeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
  2310  	if f, ok := obj.(fakeClientI); ok {
  2311  		c.mux.Lock()
  2312  		c.startTime = c.startTime.Add(time.Minute)
  2313  		f.SetCreationTimestamp(metav1.NewTime(c.startTime))
  2314  		c.mux.Unlock()
  2315  	}
  2316  	return c.Client.Create(ctx, obj, opts...)
  2317  }
  2318  
  2319  func createClusterWithControlPlane(namespace string) (*clusterv1.Cluster, *controlplanev1.KubeadmControlPlane, *unstructured.Unstructured) {
  2320  	kcpName := fmt.Sprintf("kcp-foo-%s", util.RandomString(6))
  2321  
  2322  	cluster := newCluster(&types.NamespacedName{Name: kcpName, Namespace: namespace})
  2323  	cluster.Spec = clusterv1.ClusterSpec{
  2324  		ControlPlaneRef: &corev1.ObjectReference{
  2325  			Kind:       "KubeadmControlPlane",
  2326  			Namespace:  namespace,
  2327  			Name:       kcpName,
  2328  			APIVersion: controlplanev1.GroupVersion.String(),
  2329  		},
  2330  	}
  2331  
  2332  	kcp := &controlplanev1.KubeadmControlPlane{
  2333  		TypeMeta: metav1.TypeMeta{
  2334  			APIVersion: controlplanev1.GroupVersion.String(),
  2335  			Kind:       "KubeadmControlPlane",
  2336  		},
  2337  		ObjectMeta: metav1.ObjectMeta{
  2338  			Name:      kcpName,
  2339  			Namespace: namespace,
  2340  			OwnerReferences: []metav1.OwnerReference{
  2341  				{
  2342  					Kind:       "Cluster",
  2343  					APIVersion: clusterv1.GroupVersion.String(),
  2344  					Name:       kcpName,
  2345  					UID:        "1",
  2346  				},
  2347  			},
  2348  		},
  2349  		Spec: controlplanev1.KubeadmControlPlaneSpec{
  2350  			MachineTemplate: controlplanev1.KubeadmControlPlaneMachineTemplate{
  2351  				InfrastructureRef: corev1.ObjectReference{
  2352  					Kind:       "GenericInfrastructureMachineTemplate",
  2353  					Namespace:  namespace,
  2354  					Name:       "infra-foo",
  2355  					APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
  2356  				},
  2357  			},
  2358  			Replicas: pointer.Int32(int32(3)),
  2359  			Version:  "v1.16.6",
  2360  			RolloutStrategy: &controlplanev1.RolloutStrategy{
  2361  				Type: "RollingUpdate",
  2362  				RollingUpdate: &controlplanev1.RollingUpdate{
  2363  					MaxSurge: &intstr.IntOrString{
  2364  						IntVal: 1,
  2365  					},
  2366  				},
  2367  			},
  2368  		},
  2369  	}
  2370  
  2371  	genericMachineTemplate := &unstructured.Unstructured{
  2372  		Object: map[string]interface{}{
  2373  			"kind":       "GenericInfrastructureMachineTemplate",
  2374  			"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
  2375  			"metadata": map[string]interface{}{
  2376  				"name":      "infra-foo",
  2377  				"namespace": namespace,
  2378  			},
  2379  			"spec": map[string]interface{}{
  2380  				"template": map[string]interface{}{
  2381  					"spec": map[string]interface{}{
  2382  						"hello": "world",
  2383  					},
  2384  				},
  2385  			},
  2386  		},
  2387  	}
  2388  	return cluster, kcp, genericMachineTemplate
  2389  }
  2390  
  2391  func setKCPHealthy(kcp *controlplanev1.KubeadmControlPlane) {
  2392  	conditions.MarkTrue(kcp, controlplanev1.ControlPlaneComponentsHealthyCondition)
  2393  	conditions.MarkTrue(kcp, controlplanev1.EtcdClusterHealthyCondition)
  2394  }
  2395  
  2396  func createMachineNodePair(name string, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, ready bool) (*clusterv1.Machine, *corev1.Node) {
  2397  	machine := &clusterv1.Machine{
  2398  		TypeMeta: metav1.TypeMeta{
  2399  			Kind:       "Machine",
  2400  			APIVersion: clusterv1.GroupVersion.String(),
  2401  		},
  2402  		ObjectMeta: metav1.ObjectMeta{
  2403  			Namespace: cluster.Namespace,
  2404  			Name:      name,
  2405  			Labels:    internal.ControlPlaneMachineLabelsForCluster(kcp, cluster.Name),
  2406  			OwnerReferences: []metav1.OwnerReference{
  2407  				*metav1.NewControllerRef(kcp, controlplanev1.GroupVersion.WithKind("KubeadmControlPlane")),
  2408  			},
  2409  		},
  2410  		Spec: clusterv1.MachineSpec{
  2411  			ClusterName: cluster.Name,
  2412  			InfrastructureRef: corev1.ObjectReference{
  2413  				Kind:       builder.GenericInfrastructureMachineCRD.Kind,
  2414  				APIVersion: builder.GenericInfrastructureMachineCRD.APIVersion,
  2415  				Name:       builder.GenericInfrastructureMachineCRD.Name,
  2416  				Namespace:  builder.GenericInfrastructureMachineCRD.Namespace,
  2417  			},
  2418  		},
  2419  		Status: clusterv1.MachineStatus{
  2420  			NodeRef: &corev1.ObjectReference{
  2421  				Kind:       "Node",
  2422  				APIVersion: corev1.SchemeGroupVersion.String(),
  2423  				Name:       name,
  2424  			},
  2425  		},
  2426  	}
  2427  	webhook := webhooks.Machine{}
  2428  	if err := webhook.Default(ctx, machine); err != nil {
  2429  		panic(err)
  2430  	}
  2431  
  2432  	node := &corev1.Node{
  2433  		ObjectMeta: metav1.ObjectMeta{
  2434  			Name:   name,
  2435  			Labels: map[string]string{"node-role.kubernetes.io/control-plane": ""},
  2436  		},
  2437  	}
  2438  
  2439  	if ready {
  2440  		node.Spec.ProviderID = fmt.Sprintf("test://%s", machine.GetName())
  2441  		node.Status.Conditions = []corev1.NodeCondition{
  2442  			{
  2443  				Type:   corev1.NodeReady,
  2444  				Status: corev1.ConditionTrue,
  2445  			},
  2446  		}
  2447  	}
  2448  	return machine, node
  2449  }
  2450  
  2451  func setMachineHealthy(m *clusterv1.Machine) {
  2452  	m.Status.NodeRef = &corev1.ObjectReference{
  2453  		Kind: "Node",
  2454  		Name: "node-1",
  2455  	}
  2456  	conditions.MarkTrue(m, controlplanev1.MachineAPIServerPodHealthyCondition)
  2457  	conditions.MarkTrue(m, controlplanev1.MachineControllerManagerPodHealthyCondition)
  2458  	conditions.MarkTrue(m, controlplanev1.MachineSchedulerPodHealthyCondition)
  2459  	conditions.MarkTrue(m, controlplanev1.MachineEtcdPodHealthyCondition)
  2460  	conditions.MarkTrue(m, controlplanev1.MachineEtcdMemberHealthyCondition)
  2461  }
  2462  
  2463  // newCluster return a CAPI cluster object.
  2464  func newCluster(namespacedName *types.NamespacedName) *clusterv1.Cluster {
  2465  	return &clusterv1.Cluster{
  2466  		TypeMeta: metav1.TypeMeta{
  2467  			Kind:       "Cluster",
  2468  			APIVersion: clusterv1.GroupVersion.String(),
  2469  		},
  2470  		ObjectMeta: metav1.ObjectMeta{
  2471  			Namespace: namespacedName.Namespace,
  2472  			Name:      namespacedName.Name,
  2473  		},
  2474  	}
  2475  }
  2476  
  2477  func getTestCACert(key *rsa.PrivateKey) (*x509.Certificate, error) {
  2478  	cfg := certs.Config{
  2479  		CommonName: "kubernetes",
  2480  	}
  2481  
  2482  	now := time.Now().UTC()
  2483  
  2484  	tmpl := x509.Certificate{
  2485  		SerialNumber: new(big.Int).SetInt64(0),
  2486  		Subject: pkix.Name{
  2487  			CommonName:   cfg.CommonName,
  2488  			Organization: cfg.Organization,
  2489  		},
  2490  		NotBefore:             now.Add(time.Minute * -5),
  2491  		NotAfter:              now.Add(time.Hour * 24), // 1 day
  2492  		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
  2493  		MaxPathLenZero:        true,
  2494  		BasicConstraintsValid: true,
  2495  		MaxPathLen:            0,
  2496  		IsCA:                  true,
  2497  	}
  2498  
  2499  	b, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, key.Public(), key)
  2500  	if err != nil {
  2501  		return nil, err
  2502  	}
  2503  
  2504  	c, err := x509.ParseCertificate(b)
  2505  	return c, err
  2506  }