sigs.k8s.io/cluster-api@v1.6.3/internal/controllers/cluster/cluster_controller_test.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cluster
    18  
    19  import (
    20  	"testing"
    21  
    22  	. "github.com/onsi/gomega"
    23  	corev1 "k8s.io/api/core/v1"
    24  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    25  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    26  	utilfeature "k8s.io/component-base/featuregate/testing"
    27  	"k8s.io/utils/pointer"
    28  	ctrl "sigs.k8s.io/controller-runtime"
    29  	"sigs.k8s.io/controller-runtime/pkg/client"
    30  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    31  
    32  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    33  	expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1"
    34  	runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1"
    35  	"sigs.k8s.io/cluster-api/feature"
    36  	"sigs.k8s.io/cluster-api/internal/test/builder"
    37  	"sigs.k8s.io/cluster-api/util"
    38  	"sigs.k8s.io/cluster-api/util/conditions"
    39  	"sigs.k8s.io/cluster-api/util/patch"
    40  )
    41  
    42  const (
    43  	clusterReconcileNamespace = "test-cluster-reconcile"
    44  )
    45  
    46  func TestClusterReconciler(t *testing.T) {
    47  	ns, err := env.CreateNamespace(ctx, clusterReconcileNamespace)
    48  	if err != nil {
    49  		t.Fatal(err)
    50  	}
    51  	defer func() {
    52  		if err := env.Delete(ctx, ns); err != nil {
    53  			t.Fatal(err)
    54  		}
    55  	}()
    56  
    57  	t.Run("Should create a Cluster", func(t *testing.T) {
    58  		g := NewWithT(t)
    59  
    60  		instance := &clusterv1.Cluster{
    61  			ObjectMeta: metav1.ObjectMeta{
    62  				GenerateName: "test1-",
    63  				Namespace:    ns.Name,
    64  			},
    65  			Spec: clusterv1.ClusterSpec{},
    66  		}
    67  
    68  		// Create the Cluster object and expect the Reconcile and Deployment to be created
    69  		g.Expect(env.Create(ctx, instance)).To(Succeed())
    70  		key := client.ObjectKey{Namespace: instance.Namespace, Name: instance.Name}
    71  		defer func() {
    72  			err := env.Delete(ctx, instance)
    73  			g.Expect(err).ToNot(HaveOccurred())
    74  		}()
    75  
    76  		// Make sure the Cluster exists.
    77  		g.Eventually(func() bool {
    78  			if err := env.Get(ctx, key, instance); err != nil {
    79  				return false
    80  			}
    81  			return len(instance.Finalizers) > 0
    82  		}, timeout).Should(BeTrue())
    83  	})
    84  
    85  	t.Run("Should successfully patch a cluster object if the status diff is empty but the spec diff is not", func(t *testing.T) {
    86  		g := NewWithT(t)
    87  
    88  		// Setup
    89  		cluster := &clusterv1.Cluster{
    90  			ObjectMeta: metav1.ObjectMeta{
    91  				GenerateName: "test2-",
    92  				Namespace:    ns.Name,
    93  			},
    94  		}
    95  		g.Expect(env.Create(ctx, cluster)).To(Succeed())
    96  		key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}
    97  		defer func() {
    98  			err := env.Delete(ctx, cluster)
    99  			g.Expect(err).ToNot(HaveOccurred())
   100  		}()
   101  
   102  		// Wait for reconciliation to happen.
   103  		g.Eventually(func() bool {
   104  			if err := env.Get(ctx, key, cluster); err != nil {
   105  				return false
   106  			}
   107  			return len(cluster.Finalizers) > 0
   108  		}, timeout).Should(BeTrue())
   109  
   110  		// Patch
   111  		g.Eventually(func() bool {
   112  			ph, err := patch.NewHelper(cluster, env)
   113  			g.Expect(err).ToNot(HaveOccurred())
   114  			cluster.Spec.InfrastructureRef = &corev1.ObjectReference{Name: "test"}
   115  			cluster.Spec.ControlPlaneRef = &corev1.ObjectReference{Name: "test-too"}
   116  			g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed())
   117  			return true
   118  		}, timeout).Should(BeTrue())
   119  
   120  		// Assertions
   121  		g.Eventually(func() bool {
   122  			instance := &clusterv1.Cluster{}
   123  			if err := env.Get(ctx, key, instance); err != nil {
   124  				return false
   125  			}
   126  			return instance.Spec.InfrastructureRef != nil &&
   127  				instance.Spec.InfrastructureRef.Name == "test"
   128  		}, timeout).Should(BeTrue())
   129  	})
   130  
   131  	t.Run("Should successfully patch a cluster object if the spec diff is empty but the status diff is not", func(t *testing.T) {
   132  		g := NewWithT(t)
   133  
   134  		// Setup
   135  		cluster := &clusterv1.Cluster{
   136  			ObjectMeta: metav1.ObjectMeta{
   137  				GenerateName: "test3-",
   138  				Namespace:    ns.Name,
   139  			},
   140  		}
   141  		g.Expect(env.Create(ctx, cluster)).To(Succeed())
   142  		key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}
   143  		defer func() {
   144  			err := env.Delete(ctx, cluster)
   145  			g.Expect(err).ToNot(HaveOccurred())
   146  		}()
   147  
   148  		// Wait for reconciliation to happen.
   149  		g.Eventually(func() bool {
   150  			if err := env.Get(ctx, key, cluster); err != nil {
   151  				return false
   152  			}
   153  			return len(cluster.Finalizers) > 0
   154  		}, timeout).Should(BeTrue())
   155  
   156  		// Patch
   157  		g.Eventually(func() bool {
   158  			ph, err := patch.NewHelper(cluster, env)
   159  			g.Expect(err).ToNot(HaveOccurred())
   160  			cluster.Status.InfrastructureReady = true
   161  			g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed())
   162  			return true
   163  		}, timeout).Should(BeTrue())
   164  
   165  		// Assertions
   166  		g.Eventually(func() bool {
   167  			instance := &clusterv1.Cluster{}
   168  			if err := env.Get(ctx, key, instance); err != nil {
   169  				return false
   170  			}
   171  			return instance.Status.InfrastructureReady
   172  		}, timeout).Should(BeTrue())
   173  	})
   174  
   175  	t.Run("Should successfully patch a cluster object if both the spec diff and status diff are non empty", func(t *testing.T) {
   176  		g := NewWithT(t)
   177  
   178  		// Setup
   179  		cluster := &clusterv1.Cluster{
   180  			ObjectMeta: metav1.ObjectMeta{
   181  				GenerateName: "test4-",
   182  				Namespace:    ns.Name,
   183  			},
   184  		}
   185  
   186  		g.Expect(env.Create(ctx, cluster)).To(Succeed())
   187  		key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}
   188  		defer func() {
   189  			err := env.Delete(ctx, cluster)
   190  			g.Expect(err).ToNot(HaveOccurred())
   191  		}()
   192  
   193  		// Wait for reconciliation to happen.
   194  		g.Eventually(func() bool {
   195  			if err := env.Get(ctx, key, cluster); err != nil {
   196  				return false
   197  			}
   198  			return len(cluster.Finalizers) > 0
   199  		}, timeout).Should(BeTrue())
   200  
   201  		// Patch
   202  		g.Eventually(func() bool {
   203  			ph, err := patch.NewHelper(cluster, env)
   204  			g.Expect(err).ToNot(HaveOccurred())
   205  			cluster.Status.InfrastructureReady = true
   206  			cluster.Spec.InfrastructureRef = &corev1.ObjectReference{Name: "test"}
   207  			g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed())
   208  			return true
   209  		}, timeout).Should(BeTrue())
   210  
   211  		// Assertions
   212  		g.Eventually(func() bool {
   213  			instance := &clusterv1.Cluster{}
   214  			if err := env.Get(ctx, key, instance); err != nil {
   215  				return false
   216  			}
   217  			return instance.Status.InfrastructureReady &&
   218  				instance.Spec.InfrastructureRef != nil &&
   219  				instance.Spec.InfrastructureRef.Name == "test"
   220  		}, timeout).Should(BeTrue())
   221  	})
   222  
   223  	t.Run("Should re-apply finalizers if removed", func(t *testing.T) {
   224  		g := NewWithT(t)
   225  
   226  		// Setup
   227  		cluster := &clusterv1.Cluster{
   228  			ObjectMeta: metav1.ObjectMeta{
   229  				GenerateName: "test5-",
   230  				Namespace:    ns.Name,
   231  			},
   232  		}
   233  		g.Expect(env.Create(ctx, cluster)).To(Succeed())
   234  		key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}
   235  		defer func() {
   236  			err := env.Delete(ctx, cluster)
   237  			g.Expect(err).ToNot(HaveOccurred())
   238  		}()
   239  
   240  		// Wait for reconciliation to happen.
   241  		g.Eventually(func() bool {
   242  			if err := env.Get(ctx, key, cluster); err != nil {
   243  				return false
   244  			}
   245  			return len(cluster.Finalizers) > 0
   246  		}, timeout).Should(BeTrue())
   247  
   248  		// Remove finalizers
   249  		g.Eventually(func() bool {
   250  			ph, err := patch.NewHelper(cluster, env)
   251  			g.Expect(err).ToNot(HaveOccurred())
   252  			cluster.SetFinalizers([]string{})
   253  			g.Expect(ph.Patch(ctx, cluster, patch.WithStatusObservedGeneration{})).To(Succeed())
   254  			return true
   255  		}, timeout).Should(BeTrue())
   256  
   257  		g.Expect(cluster.Finalizers).Should(BeEmpty())
   258  
   259  		// Check finalizers are re-applied
   260  		g.Eventually(func() []string {
   261  			instance := &clusterv1.Cluster{}
   262  			if err := env.Get(ctx, key, instance); err != nil {
   263  				return []string{"not-empty"}
   264  			}
   265  			return instance.Finalizers
   266  		}, timeout).ShouldNot(BeEmpty())
   267  	})
   268  
   269  	t.Run("Should successfully set ControlPlaneInitialized on the cluster object if controlplane is ready", func(t *testing.T) {
   270  		g := NewWithT(t)
   271  
   272  		cluster := &clusterv1.Cluster{
   273  			ObjectMeta: metav1.ObjectMeta{
   274  				GenerateName: "test6-",
   275  				Namespace:    ns.Name,
   276  			},
   277  		}
   278  
   279  		g.Expect(env.Create(ctx, cluster)).To(Succeed())
   280  		key := client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}
   281  		defer func() {
   282  			err := env.Delete(ctx, cluster)
   283  			g.Expect(err).ToNot(HaveOccurred())
   284  		}()
   285  		g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed())
   286  
   287  		// Wait for reconciliation to happen.
   288  		g.Eventually(func() bool {
   289  			if err := env.Get(ctx, key, cluster); err != nil {
   290  				return false
   291  			}
   292  			return len(cluster.Finalizers) > 0
   293  		}, timeout).Should(BeTrue())
   294  
   295  		// Create a node so we can speed up reconciliation. Otherwise, the machine reconciler will requeue the machine
   296  		// after 10 seconds, potentially slowing down this test.
   297  		node := &corev1.Node{
   298  			ObjectMeta: metav1.ObjectMeta{
   299  				Name: "id-node-1",
   300  			},
   301  			Spec: corev1.NodeSpec{
   302  				ProviderID: "aws:///id-node-1",
   303  			},
   304  		}
   305  
   306  		g.Expect(env.Create(ctx, node)).To(Succeed())
   307  
   308  		machine := &clusterv1.Machine{
   309  			ObjectMeta: metav1.ObjectMeta{
   310  				GenerateName: "test6-",
   311  				Namespace:    ns.Name,
   312  				Labels: map[string]string{
   313  					clusterv1.MachineControlPlaneLabel: "",
   314  				},
   315  			},
   316  			Spec: clusterv1.MachineSpec{
   317  				ClusterName: cluster.Name,
   318  				ProviderID:  pointer.String("aws:///id-node-1"),
   319  				Bootstrap: clusterv1.Bootstrap{
   320  					DataSecretName: pointer.String(""),
   321  				},
   322  			},
   323  		}
   324  		machine.Spec.Bootstrap.DataSecretName = pointer.String("test6-bootstrapdata")
   325  		g.Expect(env.Create(ctx, machine)).To(Succeed())
   326  		key = client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace}
   327  		defer func() {
   328  			err := env.Delete(ctx, machine)
   329  			g.Expect(err).ToNot(HaveOccurred())
   330  		}()
   331  
   332  		// Wait for machine to be ready.
   333  		//
   334  		// [ncdc] Note, we're using an increased timeout because we've been seeing failures
   335  		// in Prow for this particular block. It looks like it's sometimes taking more than 10 seconds (the value of
   336  		// timeout) for the machine reconciler to add the finalizer and for the change to be persisted to etcd. If
   337  		// we continue to see test timeouts here, that will likely point to something else being the problem, but
   338  		// I've yet to determine any other possibility for the test flakes.
   339  		g.Eventually(func() bool {
   340  			if err := env.Get(ctx, key, machine); err != nil {
   341  				return false
   342  			}
   343  			return len(machine.Finalizers) > 0
   344  		}, timeout*3).Should(BeTrue())
   345  
   346  		// Assertion
   347  		key = client.ObjectKey{Name: cluster.Name, Namespace: cluster.Namespace}
   348  		g.Eventually(func() bool {
   349  			if err := env.Get(ctx, key, cluster); err != nil {
   350  				return false
   351  			}
   352  			return conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition)
   353  		}, timeout).Should(BeTrue())
   354  	})
   355  }
   356  
   357  func TestClusterReconciler_reconcileDelete(t *testing.T) {
   358  	defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)()
   359  	defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.ClusterTopology, true)()
   360  
   361  	fakeInfraCluster := builder.InfrastructureCluster("test-ns", "test-cluster").Build()
   362  
   363  	tests := []struct {
   364  		name       string
   365  		cluster    *clusterv1.Cluster
   366  		wantDelete bool
   367  	}{
   368  		{
   369  			name: "should proceed with delete if the cluster has the ok-to-delete annotation",
   370  			cluster: func() *clusterv1.Cluster {
   371  				fakeCluster := builder.Cluster("test-ns", "test-cluster").WithTopology(&clusterv1.Topology{}).WithInfrastructureCluster(fakeInfraCluster).Build()
   372  				if fakeCluster.Annotations == nil {
   373  					fakeCluster.Annotations = map[string]string{}
   374  				}
   375  				fakeCluster.Annotations[runtimev1.OkToDeleteAnnotation] = ""
   376  				return fakeCluster
   377  			}(),
   378  			wantDelete: true,
   379  		},
   380  		{
   381  			name:       "should not proceed with delete if the cluster does not have the ok-to-delete annotation",
   382  			cluster:    builder.Cluster("test-ns", "test-cluster").WithTopology(&clusterv1.Topology{}).WithInfrastructureCluster(fakeInfraCluster).Build(),
   383  			wantDelete: false,
   384  		},
   385  	}
   386  
   387  	for _, tt := range tests {
   388  		t.Run(tt.name, func(t *testing.T) {
   389  			g := NewWithT(t)
   390  			fakeClient := fake.NewClientBuilder().WithObjects(fakeInfraCluster, tt.cluster).Build()
   391  			r := &Reconciler{
   392  				Client:                    fakeClient,
   393  				UnstructuredCachingClient: fakeClient,
   394  				APIReader:                 fakeClient,
   395  			}
   396  
   397  			_, _ = r.reconcileDelete(ctx, tt.cluster)
   398  			infraCluster := builder.InfrastructureCluster("", "").Build()
   399  			err := fakeClient.Get(ctx, client.ObjectKeyFromObject(fakeInfraCluster), infraCluster)
   400  			g.Expect(apierrors.IsNotFound(err)).To(Equal(tt.wantDelete))
   401  		})
   402  	}
   403  }
   404  
   405  func TestClusterReconcilerNodeRef(t *testing.T) {
   406  	t.Run("machine to cluster", func(t *testing.T) {
   407  		cluster := &clusterv1.Cluster{
   408  			TypeMeta: metav1.TypeMeta{
   409  				Kind: "Cluster",
   410  			},
   411  			ObjectMeta: metav1.ObjectMeta{
   412  				Name:      "test-cluster",
   413  				Namespace: "test",
   414  			},
   415  			Spec:   clusterv1.ClusterSpec{},
   416  			Status: clusterv1.ClusterStatus{},
   417  		}
   418  
   419  		controlPlaneWithNoderef := &clusterv1.Machine{
   420  			TypeMeta: metav1.TypeMeta{
   421  				Kind: "Machine",
   422  			},
   423  			ObjectMeta: metav1.ObjectMeta{
   424  				Name:      "controlPlaneWithNoderef",
   425  				Namespace: "test",
   426  				Labels: map[string]string{
   427  					clusterv1.ClusterNameLabel:         cluster.Name,
   428  					clusterv1.MachineControlPlaneLabel: "",
   429  				},
   430  			},
   431  			Spec: clusterv1.MachineSpec{
   432  				ClusterName: "test-cluster",
   433  			},
   434  			Status: clusterv1.MachineStatus{
   435  				NodeRef: &corev1.ObjectReference{
   436  					Kind:      "Node",
   437  					Namespace: "test-node",
   438  				},
   439  			},
   440  		}
   441  		controlPlaneWithoutNoderef := &clusterv1.Machine{
   442  			TypeMeta: metav1.TypeMeta{
   443  				Kind: "Machine",
   444  			},
   445  			ObjectMeta: metav1.ObjectMeta{
   446  				Name:      "controlPlaneWithoutNoderef",
   447  				Namespace: "test",
   448  				Labels: map[string]string{
   449  					clusterv1.ClusterNameLabel:         cluster.Name,
   450  					clusterv1.MachineControlPlaneLabel: "",
   451  				},
   452  			},
   453  			Spec: clusterv1.MachineSpec{
   454  				ClusterName: "test-cluster",
   455  			},
   456  		}
   457  		nonControlPlaneWithNoderef := &clusterv1.Machine{
   458  			TypeMeta: metav1.TypeMeta{
   459  				Kind: "Machine",
   460  			},
   461  			ObjectMeta: metav1.ObjectMeta{
   462  				Name:      "nonControlPlaneWitNoderef",
   463  				Namespace: "test",
   464  				Labels: map[string]string{
   465  					clusterv1.ClusterNameLabel: cluster.Name,
   466  				},
   467  			},
   468  			Spec: clusterv1.MachineSpec{
   469  				ClusterName: "test-cluster",
   470  			},
   471  			Status: clusterv1.MachineStatus{
   472  				NodeRef: &corev1.ObjectReference{
   473  					Kind:      "Node",
   474  					Namespace: "test-node",
   475  				},
   476  			},
   477  		}
   478  		nonControlPlaneWithoutNoderef := &clusterv1.Machine{
   479  			TypeMeta: metav1.TypeMeta{
   480  				Kind: "Machine",
   481  			},
   482  			ObjectMeta: metav1.ObjectMeta{
   483  				Name:      "nonControlPlaneWithoutNoderef",
   484  				Namespace: "test",
   485  				Labels: map[string]string{
   486  					clusterv1.ClusterNameLabel: cluster.Name,
   487  				},
   488  			},
   489  			Spec: clusterv1.MachineSpec{
   490  				ClusterName: "test-cluster",
   491  			},
   492  		}
   493  
   494  		tests := []struct {
   495  			name string
   496  			o    client.Object
   497  			want []ctrl.Request
   498  		}{
   499  			{
   500  				name: "controlplane machine, noderef is set, should return cluster",
   501  				o:    controlPlaneWithNoderef,
   502  				want: []ctrl.Request{
   503  					{
   504  						NamespacedName: util.ObjectKey(cluster),
   505  					},
   506  				},
   507  			},
   508  			{
   509  				name: "controlplane machine, noderef is not set",
   510  				o:    controlPlaneWithoutNoderef,
   511  				want: nil,
   512  			},
   513  			{
   514  				name: "not controlplane machine, noderef is set",
   515  				o:    nonControlPlaneWithNoderef,
   516  				want: nil,
   517  			},
   518  			{
   519  				name: "not controlplane machine, noderef is not set",
   520  				o:    nonControlPlaneWithoutNoderef,
   521  				want: nil,
   522  			},
   523  		}
   524  		for _, tt := range tests {
   525  			t.Run(tt.name, func(t *testing.T) {
   526  				g := NewWithT(t)
   527  
   528  				c := fake.NewClientBuilder().WithObjects(cluster, controlPlaneWithNoderef, controlPlaneWithoutNoderef, nonControlPlaneWithNoderef, nonControlPlaneWithoutNoderef).Build()
   529  				r := &Reconciler{
   530  					Client:                    c,
   531  					UnstructuredCachingClient: c,
   532  				}
   533  				requests := r.controlPlaneMachineToCluster(ctx, tt.o)
   534  				g.Expect(requests).To(BeComparableTo(tt.want))
   535  			})
   536  		}
   537  	})
   538  }
   539  
   540  type machineDeploymentBuilder struct {
   541  	md clusterv1.MachineDeployment
   542  }
   543  
   544  func newMachineDeploymentBuilder() *machineDeploymentBuilder {
   545  	return &machineDeploymentBuilder{}
   546  }
   547  
   548  func (b *machineDeploymentBuilder) named(name string) *machineDeploymentBuilder {
   549  	b.md.Name = name
   550  	return b
   551  }
   552  
   553  func (b *machineDeploymentBuilder) ownedBy(c *clusterv1.Cluster) *machineDeploymentBuilder {
   554  	b.md.OwnerReferences = append(b.md.OwnerReferences, metav1.OwnerReference{
   555  		APIVersion: clusterv1.GroupVersion.String(),
   556  		Kind:       "Cluster",
   557  		Name:       c.Name,
   558  	})
   559  	return b
   560  }
   561  
   562  func (b *machineDeploymentBuilder) build() clusterv1.MachineDeployment {
   563  	return b.md
   564  }
   565  
   566  type machineSetBuilder struct {
   567  	ms clusterv1.MachineSet
   568  }
   569  
   570  func newMachineSetBuilder() *machineSetBuilder {
   571  	return &machineSetBuilder{}
   572  }
   573  
   574  func (b *machineSetBuilder) named(name string) *machineSetBuilder {
   575  	b.ms.Name = name
   576  	return b
   577  }
   578  
   579  func (b *machineSetBuilder) ownedBy(c *clusterv1.Cluster) *machineSetBuilder {
   580  	b.ms.OwnerReferences = append(b.ms.OwnerReferences, metav1.OwnerReference{
   581  		APIVersion: clusterv1.GroupVersion.String(),
   582  		Kind:       "Cluster",
   583  		Name:       c.Name,
   584  	})
   585  	return b
   586  }
   587  
   588  func (b *machineSetBuilder) build() clusterv1.MachineSet {
   589  	return b.ms
   590  }
   591  
   592  type machineBuilder struct {
   593  	m clusterv1.Machine
   594  }
   595  
   596  func newMachineBuilder() *machineBuilder {
   597  	return &machineBuilder{}
   598  }
   599  
   600  func (b *machineBuilder) named(name string) *machineBuilder {
   601  	b.m.Name = name
   602  	return b
   603  }
   604  
   605  func (b *machineBuilder) ownedBy(c *clusterv1.Cluster) *machineBuilder {
   606  	b.m.OwnerReferences = append(b.m.OwnerReferences, metav1.OwnerReference{
   607  		APIVersion: clusterv1.GroupVersion.String(),
   608  		Kind:       "Cluster",
   609  		Name:       c.Name,
   610  	})
   611  	return b
   612  }
   613  
   614  func (b *machineBuilder) controlPlane() *machineBuilder {
   615  	b.m.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""}
   616  	return b
   617  }
   618  
   619  func (b *machineBuilder) build() clusterv1.Machine {
   620  	return b.m
   621  }
   622  
   623  type machinePoolBuilder struct {
   624  	mp expv1.MachinePool
   625  }
   626  
   627  func newMachinePoolBuilder() *machinePoolBuilder {
   628  	return &machinePoolBuilder{}
   629  }
   630  
   631  func (b *machinePoolBuilder) named(name string) *machinePoolBuilder {
   632  	b.mp.Name = name
   633  	return b
   634  }
   635  
   636  func (b *machinePoolBuilder) ownedBy(c *clusterv1.Cluster) *machinePoolBuilder {
   637  	b.mp.OwnerReferences = append(b.mp.OwnerReferences, metav1.OwnerReference{
   638  		APIVersion: clusterv1.GroupVersion.String(),
   639  		Kind:       "Cluster",
   640  		Name:       c.Name,
   641  	})
   642  	return b
   643  }
   644  
   645  func (b *machinePoolBuilder) build() expv1.MachinePool {
   646  	return b.mp
   647  }
   648  
   649  func TestFilterOwnedDescendants(t *testing.T) {
   650  	_ = feature.MutableGates.Set("MachinePool=true")
   651  	g := NewWithT(t)
   652  
   653  	c := clusterv1.Cluster{
   654  		TypeMeta: metav1.TypeMeta{
   655  			APIVersion: clusterv1.GroupVersion.String(),
   656  			Kind:       "Cluster",
   657  		},
   658  		ObjectMeta: metav1.ObjectMeta{
   659  			Name: "c",
   660  		},
   661  	}
   662  
   663  	md1NotOwnedByCluster := newMachineDeploymentBuilder().named("md1").build()
   664  	md2OwnedByCluster := newMachineDeploymentBuilder().named("md2").ownedBy(&c).build()
   665  	md3NotOwnedByCluster := newMachineDeploymentBuilder().named("md3").build()
   666  	md4OwnedByCluster := newMachineDeploymentBuilder().named("md4").ownedBy(&c).build()
   667  
   668  	ms1NotOwnedByCluster := newMachineSetBuilder().named("ms1").build()
   669  	ms2OwnedByCluster := newMachineSetBuilder().named("ms2").ownedBy(&c).build()
   670  	ms3NotOwnedByCluster := newMachineSetBuilder().named("ms3").build()
   671  	ms4OwnedByCluster := newMachineSetBuilder().named("ms4").ownedBy(&c).build()
   672  
   673  	m1NotOwnedByCluster := newMachineBuilder().named("m1").build()
   674  	m2OwnedByCluster := newMachineBuilder().named("m2").ownedBy(&c).build()
   675  	m3ControlPlaneOwnedByCluster := newMachineBuilder().named("m3").ownedBy(&c).controlPlane().build()
   676  	m4NotOwnedByCluster := newMachineBuilder().named("m4").build()
   677  	m5OwnedByCluster := newMachineBuilder().named("m5").ownedBy(&c).build()
   678  	m6ControlPlaneOwnedByCluster := newMachineBuilder().named("m6").ownedBy(&c).controlPlane().build()
   679  
   680  	mp1NotOwnedByCluster := newMachinePoolBuilder().named("mp1").build()
   681  	mp2OwnedByCluster := newMachinePoolBuilder().named("mp2").ownedBy(&c).build()
   682  	mp3NotOwnedByCluster := newMachinePoolBuilder().named("mp3").build()
   683  	mp4OwnedByCluster := newMachinePoolBuilder().named("mp4").ownedBy(&c).build()
   684  
   685  	d := clusterDescendants{
   686  		machineDeployments: clusterv1.MachineDeploymentList{
   687  			Items: []clusterv1.MachineDeployment{
   688  				md1NotOwnedByCluster,
   689  				md2OwnedByCluster,
   690  				md3NotOwnedByCluster,
   691  				md4OwnedByCluster,
   692  			},
   693  		},
   694  		machineSets: clusterv1.MachineSetList{
   695  			Items: []clusterv1.MachineSet{
   696  				ms1NotOwnedByCluster,
   697  				ms2OwnedByCluster,
   698  				ms3NotOwnedByCluster,
   699  				ms4OwnedByCluster,
   700  			},
   701  		},
   702  		controlPlaneMachines: clusterv1.MachineList{
   703  			Items: []clusterv1.Machine{
   704  				m3ControlPlaneOwnedByCluster,
   705  				m6ControlPlaneOwnedByCluster,
   706  			},
   707  		},
   708  		workerMachines: clusterv1.MachineList{
   709  			Items: []clusterv1.Machine{
   710  				m1NotOwnedByCluster,
   711  				m2OwnedByCluster,
   712  				m4NotOwnedByCluster,
   713  				m5OwnedByCluster,
   714  			},
   715  		},
   716  		machinePools: expv1.MachinePoolList{
   717  			Items: []expv1.MachinePool{
   718  				mp1NotOwnedByCluster,
   719  				mp2OwnedByCluster,
   720  				mp3NotOwnedByCluster,
   721  				mp4OwnedByCluster,
   722  			},
   723  		},
   724  	}
   725  
   726  	actual, err := d.filterOwnedDescendants(&c)
   727  	g.Expect(err).ToNot(HaveOccurred())
   728  
   729  	expected := []client.Object{
   730  		&mp2OwnedByCluster,
   731  		&mp4OwnedByCluster,
   732  		&md2OwnedByCluster,
   733  		&md4OwnedByCluster,
   734  		&ms2OwnedByCluster,
   735  		&ms4OwnedByCluster,
   736  		&m2OwnedByCluster,
   737  		&m5OwnedByCluster,
   738  		&m3ControlPlaneOwnedByCluster,
   739  		&m6ControlPlaneOwnedByCluster,
   740  	}
   741  
   742  	g.Expect(actual).To(BeComparableTo(expected))
   743  }
   744  
   745  func TestDescendantsLength(t *testing.T) {
   746  	g := NewWithT(t)
   747  
   748  	d := clusterDescendants{
   749  		machineDeployments: clusterv1.MachineDeploymentList{
   750  			Items: []clusterv1.MachineDeployment{
   751  				newMachineDeploymentBuilder().named("md1").build(),
   752  			},
   753  		},
   754  		machineSets: clusterv1.MachineSetList{
   755  			Items: []clusterv1.MachineSet{
   756  				newMachineSetBuilder().named("ms1").build(),
   757  				newMachineSetBuilder().named("ms2").build(),
   758  			},
   759  		},
   760  		controlPlaneMachines: clusterv1.MachineList{
   761  			Items: []clusterv1.Machine{
   762  				newMachineBuilder().named("m1").build(),
   763  				newMachineBuilder().named("m2").build(),
   764  				newMachineBuilder().named("m3").build(),
   765  			},
   766  		},
   767  		workerMachines: clusterv1.MachineList{
   768  			Items: []clusterv1.Machine{
   769  				newMachineBuilder().named("m3").build(),
   770  				newMachineBuilder().named("m4").build(),
   771  				newMachineBuilder().named("m5").build(),
   772  				newMachineBuilder().named("m6").build(),
   773  			},
   774  		},
   775  		machinePools: expv1.MachinePoolList{
   776  			Items: []expv1.MachinePool{
   777  				newMachinePoolBuilder().named("mp1").build(),
   778  				newMachinePoolBuilder().named("mp2").build(),
   779  				newMachinePoolBuilder().named("mp3").build(),
   780  				newMachinePoolBuilder().named("mp4").build(),
   781  				newMachinePoolBuilder().named("mp5").build(),
   782  			},
   783  		},
   784  	}
   785  
   786  	g.Expect(d.length()).To(Equal(15))
   787  }
   788  
   789  func TestReconcileControlPlaneInitializedControlPlaneRef(t *testing.T) {
   790  	g := NewWithT(t)
   791  
   792  	c := &clusterv1.Cluster{
   793  		ObjectMeta: metav1.ObjectMeta{
   794  			Name: "c",
   795  		},
   796  		Spec: clusterv1.ClusterSpec{
   797  			ControlPlaneRef: &corev1.ObjectReference{
   798  				APIVersion: "test.io/v1",
   799  				Namespace:  "test",
   800  				Name:       "foo",
   801  			},
   802  		},
   803  	}
   804  
   805  	r := &Reconciler{}
   806  	res, err := r.reconcileControlPlaneInitialized(ctx, c)
   807  	g.Expect(res.IsZero()).To(BeTrue())
   808  	g.Expect(err).ToNot(HaveOccurred())
   809  	g.Expect(conditions.Has(c, clusterv1.ControlPlaneInitializedCondition)).To(BeFalse())
   810  }