sigs.k8s.io/cluster-api@v1.6.3/controlplane/kubeadm/internal/workload_cluster_test.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package internal
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"testing"
    23  
    24  	"github.com/blang/semver/v4"
    25  	"github.com/google/go-cmp/cmp"
    26  	. "github.com/onsi/gomega"
    27  	appsv1 "k8s.io/api/apps/v1"
    28  	corev1 "k8s.io/api/core/v1"
    29  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"sigs.k8s.io/controller-runtime/pkg/client"
    32  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    33  
    34  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    35  	bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
    36  	controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
    37  	"sigs.k8s.io/cluster-api/util/version"
    38  	"sigs.k8s.io/cluster-api/util/yaml"
    39  )
    40  
    41  func TestGetControlPlaneNodes(t *testing.T) {
    42  	tests := []struct {
    43  		name          string
    44  		nodes         []corev1.Node
    45  		expectedNodes []string
    46  	}{
    47  		{
    48  			name: "Return control plane nodes",
    49  			nodes: []corev1.Node{
    50  				{
    51  					ObjectMeta: metav1.ObjectMeta{
    52  						Name: "control-plane-node-with-old-label",
    53  						Labels: map[string]string{
    54  							labelNodeRoleOldControlPlane: "",
    55  						},
    56  					},
    57  				},
    58  				{
    59  					ObjectMeta: metav1.ObjectMeta{
    60  						Name: "control-plane-node-with-both-labels",
    61  						Labels: map[string]string{
    62  							labelNodeRoleOldControlPlane: "",
    63  							labelNodeRoleControlPlane:    "",
    64  						},
    65  					},
    66  				},
    67  				{
    68  					ObjectMeta: metav1.ObjectMeta{
    69  						Name: "control-plane-node-with-new-label",
    70  						Labels: map[string]string{
    71  							labelNodeRoleControlPlane: "",
    72  						},
    73  					},
    74  				},
    75  				{
    76  					ObjectMeta: metav1.ObjectMeta{
    77  						Name:   "worker-node",
    78  						Labels: map[string]string{},
    79  					},
    80  				},
    81  			},
    82  			expectedNodes: []string{
    83  				"control-plane-node-with-both-labels",
    84  				"control-plane-node-with-old-label",
    85  				"control-plane-node-with-new-label",
    86  			},
    87  		},
    88  	}
    89  
    90  	for _, tt := range tests {
    91  		t.Run(tt.name, func(t *testing.T) {
    92  			g := NewWithT(t)
    93  			objs := []client.Object{}
    94  			for i := range tt.nodes {
    95  				objs = append(objs, &tt.nodes[i])
    96  			}
    97  			fakeClient := fake.NewClientBuilder().WithObjects(objs...).Build()
    98  
    99  			w := &Workload{
   100  				Client: fakeClient,
   101  			}
   102  			nodes, err := w.getControlPlaneNodes(ctx)
   103  			g.Expect(err).ToNot(HaveOccurred())
   104  			var actualNodes []string
   105  			for _, n := range nodes.Items {
   106  				actualNodes = append(actualNodes, n.Name)
   107  			}
   108  			g.Expect(actualNodes).To(Equal(tt.expectedNodes))
   109  		})
   110  	}
   111  }
   112  
   113  func TestUpdateKubeProxyImageInfo(t *testing.T) {
   114  	tests := []struct {
   115  		name        string
   116  		ds          appsv1.DaemonSet
   117  		expectErr   bool
   118  		expectImage string
   119  		clientGet   map[string]interface{}
   120  		patchErr    error
   121  		KCP         *controlplanev1.KubeadmControlPlane
   122  	}{
   123  		{
   124  			name:        "succeeds if patch correctly",
   125  			ds:          newKubeProxyDS(),
   126  			expectErr:   false,
   127  			expectImage: "k8s.gcr.io/kube-proxy:v1.16.3",
   128  			KCP:         &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3"}},
   129  		},
   130  		{
   131  			name:        "returns error if image in kube-proxy ds was in digest format",
   132  			ds:          newKubeProxyDSWithImage("k8s.gcr.io/kube-proxy@sha256:47bfd"),
   133  			expectErr:   true,
   134  			expectImage: "k8s.gcr.io/kube-proxy@sha256:47bfd",
   135  			KCP:         &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3"}},
   136  		},
   137  		{
   138  			name:        "expects OCI compatible format of tag",
   139  			ds:          newKubeProxyDS(),
   140  			expectErr:   false,
   141  			expectImage: "k8s.gcr.io/kube-proxy:v1.16.3_build1",
   142  			KCP:         &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3+build1"}},
   143  		},
   144  		{
   145  			name:      "returns error if image in kube-proxy ds was in wrong format",
   146  			ds:        newKubeProxyDSWithImage(""),
   147  			expectErr: true,
   148  			KCP:       &controlplanev1.KubeadmControlPlane{Spec: controlplanev1.KubeadmControlPlaneSpec{Version: "v1.16.3"}},
   149  		},
   150  		{
   151  			name:        "updates image repository if one has been set on the control plane",
   152  			ds:          newKubeProxyDS(),
   153  			expectErr:   false,
   154  			expectImage: "foo.bar.example/baz/qux/kube-proxy:v1.16.3",
   155  			KCP: &controlplanev1.KubeadmControlPlane{
   156  				Spec: controlplanev1.KubeadmControlPlaneSpec{
   157  					Version: "v1.16.3",
   158  					KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
   159  						ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
   160  							ImageRepository: "foo.bar.example/baz/qux",
   161  						},
   162  					},
   163  				}},
   164  		},
   165  		{
   166  			name:        "does not update image repository if it is blank",
   167  			ds:          newKubeProxyDS(),
   168  			expectErr:   false,
   169  			expectImage: "k8s.gcr.io/kube-proxy:v1.16.3",
   170  			KCP: &controlplanev1.KubeadmControlPlane{
   171  				Spec: controlplanev1.KubeadmControlPlaneSpec{
   172  					Version: "v1.16.3",
   173  					KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
   174  						ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
   175  							ImageRepository: "",
   176  						},
   177  					},
   178  				}},
   179  		},
   180  		{
   181  			name:        "does update image repository to new default registry for v1.25 updates",
   182  			ds:          newKubeProxyDSWithImage("k8s.gcr.io/kube-proxy:v1.24.0"),
   183  			expectErr:   false,
   184  			expectImage: "registry.k8s.io/kube-proxy:v1.25.0-alpha.1",
   185  			KCP: &controlplanev1.KubeadmControlPlane{
   186  				Spec: controlplanev1.KubeadmControlPlaneSpec{
   187  					Version: "v1.25.0-alpha.1",
   188  				}},
   189  		},
   190  		{
   191  			name:      "returns error if image repository is invalid",
   192  			ds:        newKubeProxyDS(),
   193  			expectErr: true,
   194  			KCP: &controlplanev1.KubeadmControlPlane{
   195  				Spec: controlplanev1.KubeadmControlPlaneSpec{
   196  					Version: "v1.16.3",
   197  					KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
   198  						ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
   199  							ImageRepository: "%%%",
   200  						},
   201  					},
   202  				}},
   203  		},
   204  		{
   205  			name:        "does not update image repository when no kube-proxy update is requested",
   206  			ds:          newKubeProxyDSWithImage(""), // Using the same image name that would otherwise lead to an error
   207  			expectErr:   false,
   208  			expectImage: "",
   209  			KCP: &controlplanev1.KubeadmControlPlane{
   210  				ObjectMeta: metav1.ObjectMeta{
   211  					Annotations: map[string]string{
   212  						controlplanev1.SkipKubeProxyAnnotation: "",
   213  					},
   214  				},
   215  				Spec: controlplanev1.KubeadmControlPlaneSpec{
   216  					Version: "v1.16.3",
   217  				}},
   218  		},
   219  	}
   220  
   221  	for i := range tests {
   222  		tt := tests[i]
   223  		t.Run(tt.name, func(t *testing.T) {
   224  			gs := NewWithT(t)
   225  
   226  			objects := []client.Object{
   227  				&tt.ds,
   228  			}
   229  			fakeClient := fake.NewClientBuilder().WithObjects(objects...).Build()
   230  			w := &Workload{
   231  				Client: fakeClient,
   232  			}
   233  			kubernetesVersion, err := version.ParseMajorMinorPatchTolerant(tt.KCP.Spec.Version)
   234  			gs.Expect(err).ToNot(HaveOccurred())
   235  			err = w.UpdateKubeProxyImageInfo(ctx, tt.KCP, kubernetesVersion)
   236  			if tt.expectErr {
   237  				gs.Expect(err).To(HaveOccurred())
   238  			} else {
   239  				gs.Expect(err).ToNot(HaveOccurred())
   240  			}
   241  
   242  			proxyImage, err := getProxyImageInfo(ctx, w.Client)
   243  			gs.Expect(err).ToNot(HaveOccurred())
   244  			if tt.expectImage != "" {
   245  				gs.Expect(proxyImage).To(Equal(tt.expectImage))
   246  			}
   247  		})
   248  	}
   249  }
   250  
   251  func TestRemoveMachineFromKubeadmConfigMap(t *testing.T) {
   252  	machine := &clusterv1.Machine{
   253  		Status: clusterv1.MachineStatus{
   254  			NodeRef: &corev1.ObjectReference{
   255  				Name: "ip-10-0-0-1.ec2.internal",
   256  			},
   257  		},
   258  	}
   259  	kubeadmConfig := &corev1.ConfigMap{
   260  		ObjectMeta: metav1.ObjectMeta{
   261  			Name:      kubeadmConfigKey,
   262  			Namespace: metav1.NamespaceSystem,
   263  		},
   264  		Data: map[string]string{
   265  			clusterStatusKey: yaml.Raw(`
   266  				apiEndpoints:
   267  				  ip-10-0-0-1.ec2.internal:
   268  				    advertiseAddress: 10.0.0.1
   269  				    bindPort: 6443
   270  				  ip-10-0-0-2.ec2.internal:
   271  				    advertiseAddress: 10.0.0.2
   272  				    bindPort: 6443
   273  				apiVersion: kubeadm.k8s.io/v1beta2
   274  				kind: ClusterStatus
   275  				`),
   276  		},
   277  		BinaryData: map[string][]byte{
   278  			"": nil,
   279  		},
   280  	}
   281  	kubeadmConfigWithoutClusterStatus := kubeadmConfig.DeepCopy()
   282  	delete(kubeadmConfigWithoutClusterStatus.Data, clusterStatusKey)
   283  
   284  	tests := []struct {
   285  		name              string
   286  		kubernetesVersion semver.Version
   287  		machine           *clusterv1.Machine
   288  		objs              []client.Object
   289  		expectErr         bool
   290  		expectedEndpoints string
   291  	}{
   292  		{
   293  			name:      "does not panic if machine is nil",
   294  			expectErr: false,
   295  		},
   296  		{
   297  			name: "does not panic if machine noderef is nil",
   298  			machine: &clusterv1.Machine{
   299  				Status: clusterv1.MachineStatus{
   300  					NodeRef: nil,
   301  				},
   302  			},
   303  			expectErr: false,
   304  		},
   305  		{
   306  			name:      "returns error if unable to find kubeadm-config",
   307  			machine:   machine,
   308  			expectErr: true,
   309  		},
   310  		{
   311  			name:              "returns error if unable to find kubeadm-config for Kubernetes version < 1.22.0",
   312  			kubernetesVersion: semver.MustParse("1.19.1"),
   313  			machine:           machine,
   314  			objs:              []client.Object{kubeadmConfigWithoutClusterStatus},
   315  			expectErr:         true,
   316  		},
   317  		{
   318  			name:              "returns error if unable to remove api endpoint for Kubernetes version < 1.22.0",
   319  			kubernetesVersion: semver.MustParse("1.19.1"), // Kubernetes version < 1.22.0 has ClusterStatus
   320  			machine:           machine,
   321  			objs:              []client.Object{kubeadmConfigWithoutClusterStatus},
   322  			expectErr:         true,
   323  		},
   324  		{
   325  			name:              "removes the machine node ref from kubeadm config for Kubernetes version < 1.22.0",
   326  			kubernetesVersion: semver.MustParse("1.19.1"), // Kubernetes version < 1.22.0 has ClusterStatus
   327  			machine:           machine,
   328  			objs:              []client.Object{kubeadmConfig},
   329  			expectErr:         false,
   330  			expectedEndpoints: yaml.Raw(`
   331  				apiEndpoints:
   332  				  ip-10-0-0-2.ec2.internal:
   333  				    advertiseAddress: 10.0.0.2
   334  				    bindPort: 6443
   335  				apiVersion: kubeadm.k8s.io/v1beta2
   336  				kind: ClusterStatus
   337  				`),
   338  		},
   339  		{
   340  			name:              "no op for Kubernetes version 1.22.0 alpha",
   341  			kubernetesVersion: semver.MustParse("1.22.0-alpha.0.734+ba502ee555924a"), // Kubernetes version >= 1.22.0 should not manage ClusterStatus
   342  			machine:           machine,
   343  			objs:              []client.Object{kubeadmConfigWithoutClusterStatus},
   344  			expectErr:         false,
   345  		},
   346  		{
   347  			name:              "no op for Kubernetes version >= 1.22.0",
   348  			kubernetesVersion: minKubernetesVersionWithoutClusterStatus, // Kubernetes version >= 1.22.0 should not manage ClusterStatus
   349  			machine:           machine,
   350  			objs:              []client.Object{kubeadmConfigWithoutClusterStatus},
   351  			expectErr:         false,
   352  		},
   353  	}
   354  
   355  	for _, tt := range tests {
   356  		t.Run(tt.name, func(t *testing.T) {
   357  			g := NewWithT(t)
   358  			fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()
   359  			w := &Workload{
   360  				Client: fakeClient,
   361  			}
   362  			err := w.RemoveMachineFromKubeadmConfigMap(ctx, tt.machine, tt.kubernetesVersion)
   363  			if tt.expectErr {
   364  				g.Expect(err).To(HaveOccurred())
   365  				return
   366  			}
   367  			g.Expect(err).ToNot(HaveOccurred())
   368  			if tt.expectedEndpoints != "" {
   369  				var actualConfig corev1.ConfigMap
   370  				g.Expect(w.Client.Get(
   371  					ctx,
   372  					client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   373  					&actualConfig,
   374  				)).To(Succeed())
   375  				g.Expect(actualConfig.Data[clusterStatusKey]).To(Equal(tt.expectedEndpoints), cmp.Diff(tt.expectedEndpoints, actualConfig.Data[clusterStatusKey]))
   376  			}
   377  		})
   378  	}
   379  }
   380  
   381  func TestUpdateKubeletConfigMap(t *testing.T) {
   382  	tests := []struct {
   383  		name               string
   384  		version            semver.Version
   385  		objs               []client.Object
   386  		expectErr          bool
   387  		expectCgroupDriver string
   388  		expectNewConfigMap bool
   389  	}{
   390  		{
   391  			name:    "create new config map for 1.19 --> 1.20 (anything < 1.24); config map for previous version is copied",
   392  			version: semver.Version{Major: 1, Minor: 20},
   393  			objs: []client.Object{&corev1.ConfigMap{
   394  				ObjectMeta: metav1.ObjectMeta{
   395  					Name:            "kubelet-config-1.19",
   396  					Namespace:       metav1.NamespaceSystem,
   397  					ResourceVersion: "some-resource-version",
   398  				},
   399  				Data: map[string]string{
   400  					kubeletConfigKey: yaml.Raw(`
   401  						apiVersion: kubelet.config.k8s.io/v1beta1
   402  						kind: KubeletConfiguration
   403  						foo: bar
   404  						`),
   405  				},
   406  			}},
   407  			expectNewConfigMap: true,
   408  		},
   409  		{
   410  			name:    "create new config map 1.23 --> 1.24; config map for previous version is copied",
   411  			version: semver.Version{Major: 1, Minor: 24},
   412  			objs: []client.Object{&corev1.ConfigMap{
   413  				ObjectMeta: metav1.ObjectMeta{
   414  					Name:            "kubelet-config-1.23",
   415  					Namespace:       metav1.NamespaceSystem,
   416  					ResourceVersion: "some-resource-version",
   417  				},
   418  				Data: map[string]string{
   419  					kubeletConfigKey: yaml.Raw(`
   420  						apiVersion: kubelet.config.k8s.io/v1beta1
   421  						kind: KubeletConfiguration
   422  						foo: bar
   423  						`),
   424  				},
   425  			}},
   426  			expectNewConfigMap: true,
   427  		},
   428  		{
   429  			name:    "create new config map >=1.24 --> next; no op",
   430  			version: semver.Version{Major: 1, Minor: 25},
   431  			objs: []client.Object{&corev1.ConfigMap{
   432  				ObjectMeta: metav1.ObjectMeta{
   433  					Name:            "kubelet-config",
   434  					Namespace:       metav1.NamespaceSystem,
   435  					ResourceVersion: "some-resource-version",
   436  				},
   437  				Data: map[string]string{
   438  					kubeletConfigKey: yaml.Raw(`
   439  						apiVersion: kubelet.config.k8s.io/v1beta1
   440  						kind: KubeletConfiguration
   441  						foo: bar
   442  						`),
   443  				},
   444  			}},
   445  		},
   446  		{
   447  			name:    "1.20 --> 1.21 sets the cgroupDriver if empty",
   448  			version: semver.Version{Major: 1, Minor: 21},
   449  			objs: []client.Object{&corev1.ConfigMap{
   450  				ObjectMeta: metav1.ObjectMeta{
   451  					Name:            "kubelet-config-1.20",
   452  					Namespace:       metav1.NamespaceSystem,
   453  					ResourceVersion: "some-resource-version",
   454  				},
   455  				Data: map[string]string{
   456  					kubeletConfigKey: yaml.Raw(`
   457  						apiVersion: kubelet.config.k8s.io/v1beta1
   458  						kind: KubeletConfiguration
   459  						foo: bar
   460  						`),
   461  				},
   462  			}},
   463  			expectCgroupDriver: "systemd",
   464  			expectNewConfigMap: true,
   465  		},
   466  		{
   467  			name:    "1.20 --> 1.21 preserves cgroupDriver if already set",
   468  			version: semver.Version{Major: 1, Minor: 21},
   469  			objs: []client.Object{&corev1.ConfigMap{
   470  				ObjectMeta: metav1.ObjectMeta{
   471  					Name:            "kubelet-config-1.20",
   472  					Namespace:       metav1.NamespaceSystem,
   473  					ResourceVersion: "some-resource-version",
   474  				},
   475  				Data: map[string]string{
   476  					kubeletConfigKey: yaml.Raw(`
   477  						apiVersion: kubelet.config.k8s.io/v1beta1
   478  						kind: KubeletConfiguration
   479  						cgroupDriver: cgroupfs
   480  						foo: bar
   481  					`),
   482  				},
   483  			}},
   484  			expectCgroupDriver: "cgroupfs",
   485  			expectNewConfigMap: true,
   486  		},
   487  		{
   488  			name:      "returns error if cannot find previous config map",
   489  			version:   semver.Version{Major: 1, Minor: 21},
   490  			expectErr: true,
   491  		},
   492  	}
   493  
   494  	for _, tt := range tests {
   495  		t.Run(tt.name, func(t *testing.T) {
   496  			g := NewWithT(t)
   497  			fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()
   498  			w := &Workload{
   499  				Client: fakeClient,
   500  			}
   501  			err := w.UpdateKubeletConfigMap(ctx, tt.version)
   502  			if tt.expectErr {
   503  				g.Expect(err).To(HaveOccurred())
   504  				return
   505  			}
   506  			g.Expect(err).ToNot(HaveOccurred())
   507  
   508  			// Check if the resulting ConfigMap exists
   509  			var actualConfig corev1.ConfigMap
   510  			g.Expect(w.Client.Get(
   511  				ctx,
   512  				client.ObjectKey{Name: generateKubeletConfigName(tt.version), Namespace: metav1.NamespaceSystem},
   513  				&actualConfig,
   514  			)).To(Succeed())
   515  			// Check other values are carried over for previous config map
   516  			g.Expect(actualConfig.Data[kubeletConfigKey]).To(ContainSubstring("foo"))
   517  			// Check the cgroupvalue has the expected value
   518  			g.Expect(actualConfig.Data[kubeletConfigKey]).To(ContainSubstring(tt.expectCgroupDriver))
   519  			// check if the config map is new
   520  			if tt.expectNewConfigMap {
   521  				g.Expect(actualConfig.ResourceVersion).ToNot(Equal("some-resource-version"))
   522  			} else {
   523  				g.Expect(actualConfig.ResourceVersion).To(Equal("some-resource-version"))
   524  			}
   525  		})
   526  	}
   527  }
   528  
   529  func TestUpdateUpdateClusterConfigurationInKubeadmConfigMap(t *testing.T) {
   530  	tests := []struct {
   531  		name          string
   532  		version       semver.Version
   533  		objs          []client.Object
   534  		mutator       func(*bootstrapv1.ClusterConfiguration)
   535  		wantConfigMap *corev1.ConfigMap
   536  		wantErr       bool
   537  	}{
   538  		{
   539  			name:    "fails if missing config map",
   540  			version: semver.MustParse("1.17.2"),
   541  			objs:    nil,
   542  			wantErr: true,
   543  		},
   544  		{
   545  			name:    "fail if config map without ClusterConfiguration data",
   546  			version: semver.MustParse("1.17.2"),
   547  			objs: []client.Object{&corev1.ConfigMap{
   548  				ObjectMeta: metav1.ObjectMeta{
   549  					Name:      kubeadmConfigKey,
   550  					Namespace: metav1.NamespaceSystem,
   551  				},
   552  				Data: map[string]string{},
   553  			}},
   554  			wantErr: true,
   555  		},
   556  		{
   557  			name:    "fail if config map with invalid ClusterConfiguration data",
   558  			version: semver.MustParse("1.17.2"),
   559  			objs: []client.Object{&corev1.ConfigMap{
   560  				ObjectMeta: metav1.ObjectMeta{
   561  					Name:      kubeadmConfigKey,
   562  					Namespace: metav1.NamespaceSystem,
   563  				},
   564  				Data: map[string]string{
   565  					clusterConfigurationKey: "foo",
   566  				},
   567  			}},
   568  			wantErr: true,
   569  		},
   570  		{
   571  			name:    "no op if mutator does not apply changes",
   572  			version: semver.MustParse("1.17.2"),
   573  			objs: []client.Object{&corev1.ConfigMap{
   574  				ObjectMeta: metav1.ObjectMeta{
   575  					Name:      kubeadmConfigKey,
   576  					Namespace: metav1.NamespaceSystem,
   577  				},
   578  				Data: map[string]string{
   579  					clusterConfigurationKey: yaml.Raw(`
   580  						apiVersion: kubeadm.k8s.io/v1beta2
   581  						kind: ClusterConfiguration
   582  						kubernetesVersion: v1.16.1
   583  						`),
   584  				},
   585  			}},
   586  			mutator: func(c *bootstrapv1.ClusterConfiguration) {},
   587  			wantConfigMap: &corev1.ConfigMap{
   588  				ObjectMeta: metav1.ObjectMeta{
   589  					Name:      kubeadmConfigKey,
   590  					Namespace: metav1.NamespaceSystem,
   591  				},
   592  				Data: map[string]string{
   593  					clusterConfigurationKey: yaml.Raw(`
   594  						apiVersion: kubeadm.k8s.io/v1beta2
   595  						kind: ClusterConfiguration
   596  						kubernetesVersion: v1.16.1
   597  						`),
   598  				},
   599  			},
   600  		},
   601  		{
   602  			name:    "apply changes",
   603  			version: semver.MustParse("1.17.2"),
   604  			objs: []client.Object{&corev1.ConfigMap{
   605  				ObjectMeta: metav1.ObjectMeta{
   606  					Name:      kubeadmConfigKey,
   607  					Namespace: metav1.NamespaceSystem,
   608  				},
   609  				Data: map[string]string{
   610  					clusterConfigurationKey: yaml.Raw(`
   611  						apiVersion: kubeadm.k8s.io/v1beta2
   612  						kind: ClusterConfiguration
   613  						kubernetesVersion: v1.16.1
   614  						`),
   615  				},
   616  			}},
   617  			mutator: func(c *bootstrapv1.ClusterConfiguration) {
   618  				c.KubernetesVersion = "v1.17.2"
   619  			},
   620  			wantConfigMap: &corev1.ConfigMap{
   621  				ObjectMeta: metav1.ObjectMeta{
   622  					Name:      kubeadmConfigKey,
   623  					Namespace: metav1.NamespaceSystem,
   624  				},
   625  				Data: map[string]string{
   626  					clusterConfigurationKey: yaml.Raw(`
   627  						apiServer: {}
   628  						apiVersion: kubeadm.k8s.io/v1beta2
   629  						controllerManager: {}
   630  						dns: {}
   631  						etcd: {}
   632  						kind: ClusterConfiguration
   633  						kubernetesVersion: v1.17.2
   634  						networking: {}
   635  						scheduler: {}
   636  						`),
   637  				},
   638  			},
   639  		},
   640  		{
   641  			name:    "converts kubeadm api version during mutation if required",
   642  			version: semver.MustParse("1.28.0"),
   643  			objs: []client.Object{&corev1.ConfigMap{
   644  				ObjectMeta: metav1.ObjectMeta{
   645  					Name:      kubeadmConfigKey,
   646  					Namespace: metav1.NamespaceSystem,
   647  				},
   648  				Data: map[string]string{
   649  					clusterConfigurationKey: yaml.Raw(`
   650  						apiVersion: kubeadm.k8s.io/v1beta2
   651  						kind: ClusterConfiguration
   652  						kubernetesVersion: v1.16.1
   653  						`),
   654  				},
   655  			}},
   656  			mutator: func(c *bootstrapv1.ClusterConfiguration) {
   657  				c.KubernetesVersion = "v1.28.0"
   658  			},
   659  			wantConfigMap: &corev1.ConfigMap{
   660  				ObjectMeta: metav1.ObjectMeta{
   661  					Name:      kubeadmConfigKey,
   662  					Namespace: metav1.NamespaceSystem,
   663  				},
   664  				Data: map[string]string{
   665  					clusterConfigurationKey: yaml.Raw(`
   666  						apiServer: {}
   667  						apiVersion: kubeadm.k8s.io/v1beta3
   668  						controllerManager: {}
   669  						dns: {}
   670  						etcd: {}
   671  						kind: ClusterConfiguration
   672  						kubernetesVersion: v1.28.0
   673  						networking: {}
   674  						scheduler: {}
   675  						`),
   676  				},
   677  			},
   678  		},
   679  	}
   680  
   681  	for _, tt := range tests {
   682  		t.Run(tt.name, func(t *testing.T) {
   683  			g := NewWithT(t)
   684  			fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()
   685  
   686  			w := &Workload{
   687  				Client: fakeClient,
   688  			}
   689  			err := w.updateClusterConfiguration(ctx, tt.mutator, tt.version)
   690  			if tt.wantErr {
   691  				g.Expect(err).To(HaveOccurred())
   692  				return
   693  			}
   694  			g.Expect(err).ToNot(HaveOccurred())
   695  
   696  			var actualConfig corev1.ConfigMap
   697  			g.Expect(w.Client.Get(
   698  				ctx,
   699  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   700  				&actualConfig,
   701  			)).To(Succeed())
   702  			g.Expect(actualConfig.Data[clusterConfigurationKey]).To(Equal(tt.wantConfigMap.Data[clusterConfigurationKey]), cmp.Diff(tt.wantConfigMap.Data[clusterConfigurationKey], actualConfig.Data[clusterConfigurationKey]))
   703  		})
   704  	}
   705  }
   706  
   707  func TestUpdateUpdateClusterStatusInKubeadmConfigMap(t *testing.T) {
   708  	tests := []struct {
   709  		name          string
   710  		version       semver.Version
   711  		objs          []client.Object
   712  		mutator       func(status *bootstrapv1.ClusterStatus)
   713  		wantConfigMap *corev1.ConfigMap
   714  		wantErr       bool
   715  	}{
   716  		{
   717  			name:    "fails if missing config map",
   718  			version: semver.MustParse("1.17.2"),
   719  			objs:    nil,
   720  			wantErr: true,
   721  		},
   722  		{
   723  			name:    "fail if config map without ClusterStatus data",
   724  			version: semver.MustParse("1.17.2"),
   725  			objs: []client.Object{&corev1.ConfigMap{
   726  				ObjectMeta: metav1.ObjectMeta{
   727  					Name:      kubeadmConfigKey,
   728  					Namespace: metav1.NamespaceSystem,
   729  				},
   730  				Data: map[string]string{},
   731  			}},
   732  			wantErr: true,
   733  		},
   734  		{
   735  			name:    "fail if config map with invalid ClusterStatus data",
   736  			version: semver.MustParse("1.17.2"),
   737  			objs: []client.Object{&corev1.ConfigMap{
   738  				ObjectMeta: metav1.ObjectMeta{
   739  					Name:      kubeadmConfigKey,
   740  					Namespace: metav1.NamespaceSystem,
   741  				},
   742  				Data: map[string]string{
   743  					clusterStatusKey: "foo",
   744  				},
   745  			}},
   746  			wantErr: true,
   747  		},
   748  		{
   749  			name:    "no op if mutator does not apply changes",
   750  			version: semver.MustParse("1.17.2"),
   751  			objs: []client.Object{&corev1.ConfigMap{
   752  				ObjectMeta: metav1.ObjectMeta{
   753  					Name:      kubeadmConfigKey,
   754  					Namespace: metav1.NamespaceSystem,
   755  				},
   756  				Data: map[string]string{
   757  					clusterStatusKey: yaml.Raw(`
   758  						apiEndpoints:
   759  						  ip-10-0-0-1.ec2.internal:
   760  						    advertiseAddress: 10.0.0.1
   761  						    bindPort: 6443
   762  						apiVersion: kubeadm.k8s.io/v1beta2
   763  						kind: ClusterStatus
   764  						`),
   765  				},
   766  			}},
   767  			mutator: func(status *bootstrapv1.ClusterStatus) {},
   768  			wantConfigMap: &corev1.ConfigMap{
   769  				ObjectMeta: metav1.ObjectMeta{
   770  					Name:      kubeadmConfigKey,
   771  					Namespace: metav1.NamespaceSystem,
   772  				},
   773  				Data: map[string]string{
   774  					clusterStatusKey: yaml.Raw(`
   775  						apiEndpoints:
   776  						  ip-10-0-0-1.ec2.internal:
   777  						    advertiseAddress: 10.0.0.1
   778  						    bindPort: 6443
   779  						apiVersion: kubeadm.k8s.io/v1beta2
   780  						kind: ClusterStatus
   781  						`),
   782  				},
   783  			},
   784  		},
   785  		{
   786  			name:    "apply changes",
   787  			version: semver.MustParse("1.17.2"),
   788  			objs: []client.Object{&corev1.ConfigMap{
   789  				ObjectMeta: metav1.ObjectMeta{
   790  					Name:      kubeadmConfigKey,
   791  					Namespace: metav1.NamespaceSystem,
   792  				},
   793  				Data: map[string]string{
   794  					clusterStatusKey: yaml.Raw(`
   795  						apiEndpoints:
   796  						  ip-10-0-0-1.ec2.internal:
   797  						    advertiseAddress: 10.0.0.1
   798  						    bindPort: 6443
   799  						apiVersion: kubeadm.k8s.io/v1beta2
   800  						kind: ClusterStatus
   801  						`),
   802  				},
   803  			}},
   804  			mutator: func(status *bootstrapv1.ClusterStatus) {
   805  				status.APIEndpoints["ip-10-0-0-2.ec2.internal"] = bootstrapv1.APIEndpoint{}
   806  			},
   807  			wantConfigMap: &corev1.ConfigMap{
   808  				ObjectMeta: metav1.ObjectMeta{
   809  					Name:      kubeadmConfigKey,
   810  					Namespace: metav1.NamespaceSystem,
   811  				},
   812  				Data: map[string]string{
   813  					clusterStatusKey: yaml.Raw(`
   814  						apiEndpoints:
   815  						  ip-10-0-0-1.ec2.internal:
   816  						    advertiseAddress: 10.0.0.1
   817  						    bindPort: 6443
   818  						  ip-10-0-0-2.ec2.internal: {}
   819  						apiVersion: kubeadm.k8s.io/v1beta2
   820  						kind: ClusterStatus
   821  						`),
   822  				},
   823  			},
   824  		},
   825  	}
   826  
   827  	for _, tt := range tests {
   828  		t.Run(tt.name, func(t *testing.T) {
   829  			g := NewWithT(t)
   830  			fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()
   831  
   832  			w := &Workload{
   833  				Client: fakeClient,
   834  			}
   835  			err := w.updateClusterStatus(ctx, tt.mutator, tt.version)
   836  			if tt.wantErr {
   837  				g.Expect(err).To(HaveOccurred())
   838  				return
   839  			}
   840  			g.Expect(err).ToNot(HaveOccurred())
   841  
   842  			var actualConfig corev1.ConfigMap
   843  			g.Expect(w.Client.Get(
   844  				ctx,
   845  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   846  				&actualConfig,
   847  			)).To(Succeed())
   848  			g.Expect(actualConfig.Data[clusterStatusKey]).To(Equal(tt.wantConfigMap.Data[clusterStatusKey]), cmp.Diff(tt.wantConfigMap.Data[clusterStatusKey], actualConfig.Data[clusterStatusKey]))
   849  		})
   850  	}
   851  }
   852  
   853  func TestUpdateKubernetesVersionInKubeadmConfigMap(t *testing.T) {
   854  	tests := []struct {
   855  		name                     string
   856  		version                  semver.Version
   857  		clusterConfigurationData string
   858  	}{
   859  		{
   860  			name:    "updates the config map and changes the kubeadm API version",
   861  			version: semver.MustParse("1.17.2"),
   862  			clusterConfigurationData: yaml.Raw(`
   863  				apiVersion: kubeadm.k8s.io/v1beta2
   864  				kind: ClusterConfiguration
   865  				kubernetesVersion: v1.16.1`),
   866  		},
   867  	}
   868  
   869  	for _, tt := range tests {
   870  		t.Run(tt.name, func(t *testing.T) {
   871  			g := NewWithT(t)
   872  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
   873  				ObjectMeta: metav1.ObjectMeta{
   874  					Name:      kubeadmConfigKey,
   875  					Namespace: metav1.NamespaceSystem,
   876  				},
   877  				Data: map[string]string{
   878  					clusterConfigurationKey: tt.clusterConfigurationData,
   879  				},
   880  			}).Build()
   881  
   882  			w := &Workload{
   883  				Client: fakeClient,
   884  			}
   885  			err := w.UpdateKubernetesVersionInKubeadmConfigMap(ctx, tt.version)
   886  			g.Expect(err).ToNot(HaveOccurred())
   887  
   888  			var actualConfig corev1.ConfigMap
   889  			g.Expect(w.Client.Get(
   890  				ctx,
   891  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   892  				&actualConfig,
   893  			)).To(Succeed())
   894  			g.Expect(actualConfig.Data[clusterConfigurationKey]).To(ContainSubstring(tt.version.String()))
   895  		})
   896  	}
   897  }
   898  
   899  func TestUpdateImageRepositoryInKubeadmConfigMap(t *testing.T) {
   900  	tests := []struct {
   901  		name                     string
   902  		clusterConfigurationData string
   903  		newImageRepository       string
   904  		wantImageRepository      string
   905  	}{
   906  		{
   907  			name: "it should set the image repository",
   908  			clusterConfigurationData: yaml.Raw(`
   909  				apiVersion: kubeadm.k8s.io/v1beta2
   910  				kind: ClusterConfiguration`),
   911  			newImageRepository:  "example.com/k8s",
   912  			wantImageRepository: "example.com/k8s",
   913  		},
   914  		{
   915  			name: "it should preserve the existing image repository if then new value is empty",
   916  			clusterConfigurationData: yaml.Raw(`
   917  				apiVersion: kubeadm.k8s.io/v1beta2
   918  				kind: ClusterConfiguration
   919  				imageRepository: foo.bar/baz.io`),
   920  			newImageRepository:  "",
   921  			wantImageRepository: "foo.bar/baz.io",
   922  		},
   923  	}
   924  
   925  	for _, tt := range tests {
   926  		t.Run(tt.name, func(t *testing.T) {
   927  			g := NewWithT(t)
   928  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
   929  				ObjectMeta: metav1.ObjectMeta{
   930  					Name:      kubeadmConfigKey,
   931  					Namespace: metav1.NamespaceSystem,
   932  				},
   933  				Data: map[string]string{
   934  					clusterConfigurationKey: tt.clusterConfigurationData,
   935  				},
   936  			}).Build()
   937  
   938  			w := &Workload{
   939  				Client: fakeClient,
   940  			}
   941  			err := w.UpdateImageRepositoryInKubeadmConfigMap(ctx, tt.newImageRepository, semver.MustParse("1.19.1"))
   942  			g.Expect(err).ToNot(HaveOccurred())
   943  
   944  			var actualConfig corev1.ConfigMap
   945  			g.Expect(w.Client.Get(
   946  				ctx,
   947  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   948  				&actualConfig,
   949  			)).To(Succeed())
   950  			g.Expect(actualConfig.Data[clusterConfigurationKey]).To(ContainSubstring(tt.wantImageRepository))
   951  		})
   952  	}
   953  }
   954  
   955  func TestUpdateApiServerInKubeadmConfigMap(t *testing.T) {
   956  	tests := []struct {
   957  		name                     string
   958  		clusterConfigurationData string
   959  		newAPIServer             bootstrapv1.APIServer
   960  		wantClusterConfiguration string
   961  	}{
   962  		{
   963  			name: "it should set the api server config",
   964  			clusterConfigurationData: yaml.Raw(`
   965  				apiVersion: kubeadm.k8s.io/v1beta2
   966  				kind: ClusterConfiguration
   967  				`),
   968  			newAPIServer: bootstrapv1.APIServer{
   969  				ControlPlaneComponent: bootstrapv1.ControlPlaneComponent{
   970  					ExtraArgs: map[string]string{
   971  						"bar":     "baz",
   972  						"someKey": "someVal",
   973  					},
   974  					ExtraVolumes: []bootstrapv1.HostPathMount{
   975  						{
   976  							Name:      "mount2",
   977  							HostPath:  "/bar/baz",
   978  							MountPath: "/foo/bar",
   979  						},
   980  					},
   981  				},
   982  			},
   983  			wantClusterConfiguration: yaml.Raw(`
   984  				apiServer:
   985  				  extraArgs:
   986  				    bar: baz
   987  				    someKey: someVal
   988  				  extraVolumes:
   989  				  - hostPath: /bar/baz
   990  				    mountPath: /foo/bar
   991  				    name: mount2
   992  				apiVersion: kubeadm.k8s.io/v1beta2
   993  				controllerManager: {}
   994  				dns: {}
   995  				etcd: {}
   996  				kind: ClusterConfiguration
   997  				networking: {}
   998  				scheduler: {}
   999  				`),
  1000  		},
  1001  	}
  1002  
  1003  	for _, tt := range tests {
  1004  		t.Run(tt.name, func(t *testing.T) {
  1005  			g := NewWithT(t)
  1006  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
  1007  				ObjectMeta: metav1.ObjectMeta{
  1008  					Name:      kubeadmConfigKey,
  1009  					Namespace: metav1.NamespaceSystem,
  1010  				},
  1011  				Data: map[string]string{
  1012  					clusterConfigurationKey: tt.clusterConfigurationData,
  1013  				},
  1014  			}).Build()
  1015  
  1016  			w := &Workload{
  1017  				Client: fakeClient,
  1018  			}
  1019  			err := w.UpdateAPIServerInKubeadmConfigMap(ctx, tt.newAPIServer, semver.MustParse("1.19.1"))
  1020  			g.Expect(err).ToNot(HaveOccurred())
  1021  
  1022  			var actualConfig corev1.ConfigMap
  1023  			g.Expect(w.Client.Get(
  1024  				ctx,
  1025  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
  1026  				&actualConfig,
  1027  			)).To(Succeed())
  1028  			g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey]))
  1029  		})
  1030  	}
  1031  }
  1032  
  1033  func TestUpdateControllerManagerInKubeadmConfigMap(t *testing.T) {
  1034  	tests := []struct {
  1035  		name                     string
  1036  		clusterConfigurationData string
  1037  		newControllerManager     bootstrapv1.ControlPlaneComponent
  1038  		wantClusterConfiguration string
  1039  	}{
  1040  		{
  1041  			name: "it should set the controller manager config",
  1042  			clusterConfigurationData: yaml.Raw(`
  1043  				apiVersion: kubeadm.k8s.io/v1beta2
  1044  				kind: ClusterConfiguration
  1045  				`),
  1046  			newControllerManager: bootstrapv1.ControlPlaneComponent{
  1047  				ExtraArgs: map[string]string{
  1048  					"bar":     "baz",
  1049  					"someKey": "someVal",
  1050  				},
  1051  				ExtraVolumes: []bootstrapv1.HostPathMount{
  1052  					{
  1053  						Name:      "mount2",
  1054  						HostPath:  "/bar/baz",
  1055  						MountPath: "/foo/bar",
  1056  					},
  1057  				},
  1058  			},
  1059  			wantClusterConfiguration: yaml.Raw(`
  1060  				apiServer: {}
  1061  				apiVersion: kubeadm.k8s.io/v1beta2
  1062  				controllerManager:
  1063  				  extraArgs:
  1064  				    bar: baz
  1065  				    someKey: someVal
  1066  				  extraVolumes:
  1067  				  - hostPath: /bar/baz
  1068  				    mountPath: /foo/bar
  1069  				    name: mount2
  1070  				dns: {}
  1071  				etcd: {}
  1072  				kind: ClusterConfiguration
  1073  				networking: {}
  1074  				scheduler: {}
  1075  				`),
  1076  		},
  1077  	}
  1078  
  1079  	for _, tt := range tests {
  1080  		t.Run(tt.name, func(t *testing.T) {
  1081  			g := NewWithT(t)
  1082  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
  1083  				ObjectMeta: metav1.ObjectMeta{
  1084  					Name:      kubeadmConfigKey,
  1085  					Namespace: metav1.NamespaceSystem,
  1086  				},
  1087  				Data: map[string]string{
  1088  					clusterConfigurationKey: tt.clusterConfigurationData,
  1089  				},
  1090  			}).Build()
  1091  
  1092  			w := &Workload{
  1093  				Client: fakeClient,
  1094  			}
  1095  			err := w.UpdateControllerManagerInKubeadmConfigMap(ctx, tt.newControllerManager, semver.MustParse("1.19.1"))
  1096  			g.Expect(err).ToNot(HaveOccurred())
  1097  
  1098  			var actualConfig corev1.ConfigMap
  1099  			g.Expect(w.Client.Get(
  1100  				ctx,
  1101  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
  1102  				&actualConfig,
  1103  			)).To(Succeed())
  1104  			g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey]))
  1105  		})
  1106  	}
  1107  }
  1108  
  1109  func TestUpdateSchedulerInKubeadmConfigMap(t *testing.T) {
  1110  	tests := []struct {
  1111  		name                     string
  1112  		clusterConfigurationData string
  1113  		newScheduler             bootstrapv1.ControlPlaneComponent
  1114  		wantClusterConfiguration string
  1115  	}{
  1116  		{
  1117  			name: "it should set the scheduler config",
  1118  			clusterConfigurationData: yaml.Raw(`
  1119  				apiVersion: kubeadm.k8s.io/v1beta2
  1120  				kind: ClusterConfiguration
  1121  				`),
  1122  			newScheduler: bootstrapv1.ControlPlaneComponent{
  1123  				ExtraArgs: map[string]string{
  1124  					"bar":     "baz",
  1125  					"someKey": "someVal",
  1126  				},
  1127  				ExtraVolumes: []bootstrapv1.HostPathMount{
  1128  					{
  1129  						Name:      "mount2",
  1130  						HostPath:  "/bar/baz",
  1131  						MountPath: "/foo/bar",
  1132  					},
  1133  				},
  1134  			},
  1135  			wantClusterConfiguration: yaml.Raw(`
  1136  				apiServer: {}
  1137  				apiVersion: kubeadm.k8s.io/v1beta2
  1138  				controllerManager: {}
  1139  				dns: {}
  1140  				etcd: {}
  1141  				kind: ClusterConfiguration
  1142  				networking: {}
  1143  				scheduler:
  1144  				  extraArgs:
  1145  				    bar: baz
  1146  				    someKey: someVal
  1147  				  extraVolumes:
  1148  				  - hostPath: /bar/baz
  1149  				    mountPath: /foo/bar
  1150  				    name: mount2
  1151  				`),
  1152  		},
  1153  	}
  1154  	for _, tt := range tests {
  1155  		t.Run(tt.name, func(t *testing.T) {
  1156  			g := NewWithT(t)
  1157  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
  1158  				ObjectMeta: metav1.ObjectMeta{
  1159  					Name:      kubeadmConfigKey,
  1160  					Namespace: metav1.NamespaceSystem,
  1161  				},
  1162  				Data: map[string]string{
  1163  					clusterConfigurationKey: tt.clusterConfigurationData,
  1164  				},
  1165  			}).Build()
  1166  
  1167  			w := &Workload{
  1168  				Client: fakeClient,
  1169  			}
  1170  			err := w.UpdateSchedulerInKubeadmConfigMap(ctx, tt.newScheduler, semver.MustParse("1.19.1"))
  1171  			g.Expect(err).ToNot(HaveOccurred())
  1172  
  1173  			var actualConfig corev1.ConfigMap
  1174  			g.Expect(w.Client.Get(
  1175  				ctx,
  1176  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
  1177  				&actualConfig,
  1178  			)).To(Succeed())
  1179  			g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey]))
  1180  		})
  1181  	}
  1182  }
  1183  
  1184  func TestClusterStatus(t *testing.T) {
  1185  	node1 := &corev1.Node{
  1186  		ObjectMeta: metav1.ObjectMeta{
  1187  			Name: "node1",
  1188  			Labels: map[string]string{
  1189  				labelNodeRoleControlPlane: "",
  1190  			},
  1191  		},
  1192  		Status: corev1.NodeStatus{
  1193  			Conditions: []corev1.NodeCondition{{
  1194  				Type:   corev1.NodeReady,
  1195  				Status: corev1.ConditionTrue,
  1196  			}},
  1197  		},
  1198  	}
  1199  	node2 := &corev1.Node{
  1200  		ObjectMeta: metav1.ObjectMeta{
  1201  			Name: "node2",
  1202  			Labels: map[string]string{
  1203  				labelNodeRoleControlPlane: "",
  1204  			},
  1205  		},
  1206  		Status: corev1.NodeStatus{
  1207  			Conditions: []corev1.NodeCondition{{
  1208  				Type:   corev1.NodeReady,
  1209  				Status: corev1.ConditionFalse,
  1210  			}},
  1211  		},
  1212  	}
  1213  	kconf := &corev1.ConfigMap{
  1214  		ObjectMeta: metav1.ObjectMeta{
  1215  			Name:      kubeadmConfigKey,
  1216  			Namespace: metav1.NamespaceSystem,
  1217  		},
  1218  	}
  1219  	tests := []struct {
  1220  		name          string
  1221  		objs          []client.Object
  1222  		expectErr     bool
  1223  		expectHasConf bool
  1224  	}{
  1225  		{
  1226  			name:          "returns cluster status",
  1227  			objs:          []client.Object{node1, node2},
  1228  			expectErr:     false,
  1229  			expectHasConf: false,
  1230  		},
  1231  		{
  1232  			name:          "returns cluster status with kubeadm config",
  1233  			objs:          []client.Object{node1, node2, kconf},
  1234  			expectErr:     false,
  1235  			expectHasConf: true,
  1236  		},
  1237  	}
  1238  
  1239  	for _, tt := range tests {
  1240  		t.Run(tt.name, func(t *testing.T) {
  1241  			g := NewWithT(t)
  1242  			fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()
  1243  			w := &Workload{
  1244  				Client: fakeClient,
  1245  			}
  1246  			status, err := w.ClusterStatus(ctx)
  1247  			if tt.expectErr {
  1248  				g.Expect(err).To(HaveOccurred())
  1249  				return
  1250  			}
  1251  			g.Expect(err).ToNot(HaveOccurred())
  1252  			g.Expect(status.Nodes).To(BeEquivalentTo(2))
  1253  			g.Expect(status.ReadyNodes).To(BeEquivalentTo(1))
  1254  			if tt.expectHasConf {
  1255  				g.Expect(status.HasKubeadmConfig).To(BeTrue())
  1256  				return
  1257  			}
  1258  			g.Expect(status.HasKubeadmConfig).To(BeFalse())
  1259  		})
  1260  	}
  1261  }
  1262  
  1263  func getProxyImageInfo(ctx context.Context, c client.Client) (string, error) {
  1264  	ds := &appsv1.DaemonSet{}
  1265  
  1266  	if err := c.Get(ctx, client.ObjectKey{Name: kubeProxyKey, Namespace: metav1.NamespaceSystem}, ds); err != nil {
  1267  		if apierrors.IsNotFound(err) {
  1268  			return "", errors.New("no image found")
  1269  		}
  1270  		return "", errors.New("failed to determine if daemonset already exists")
  1271  	}
  1272  	container := findKubeProxyContainer(ds)
  1273  	if container == nil {
  1274  		return "", errors.New("unable to find container")
  1275  	}
  1276  	return container.Image, nil
  1277  }
  1278  
  1279  func newKubeProxyDS() appsv1.DaemonSet {
  1280  	return appsv1.DaemonSet{
  1281  		ObjectMeta: metav1.ObjectMeta{
  1282  			Name:      kubeProxyKey,
  1283  			Namespace: metav1.NamespaceSystem,
  1284  		},
  1285  		Spec: appsv1.DaemonSetSpec{
  1286  			Template: corev1.PodTemplateSpec{
  1287  				Spec: corev1.PodSpec{
  1288  					Containers: []corev1.Container{
  1289  						{
  1290  							Image: "k8s.gcr.io/kube-proxy:v1.16.2",
  1291  							Name:  "kube-proxy",
  1292  						},
  1293  					},
  1294  				},
  1295  			},
  1296  		},
  1297  	}
  1298  }
  1299  
  1300  func newKubeProxyDSWithImage(image string) appsv1.DaemonSet {
  1301  	ds := newKubeProxyDS()
  1302  	ds.Spec.Template.Spec.Containers[0].Image = image
  1303  	return ds
  1304  }