sigs.k8s.io/cluster-api@v1.6.3/controlplane/kubeadm/internal/workload_cluster_etcd_test.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package internal
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"testing"
    23  
    24  	"github.com/blang/semver/v4"
    25  	"github.com/google/go-cmp/cmp"
    26  	. "github.com/onsi/gomega"
    27  	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
    28  	clientv3 "go.etcd.io/etcd/client/v3"
    29  	corev1 "k8s.io/api/core/v1"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"sigs.k8s.io/controller-runtime/pkg/client"
    32  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    33  
    34  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    35  	"sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd"
    36  	fake2 "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/fake"
    37  	"sigs.k8s.io/cluster-api/util/yaml"
    38  )
    39  
    40  func TestUpdateEtcdVersionInKubeadmConfigMap(t *testing.T) {
    41  	tests := []struct {
    42  		name                     string
    43  		clusterConfigurationData string
    44  		newImageRepository       string
    45  		newImageTag              string
    46  		wantClusterConfiguration string
    47  	}{
    48  		{
    49  			name: "it should set etcd version when local etcd",
    50  			clusterConfigurationData: yaml.Raw(`
    51  				apiVersion: kubeadm.k8s.io/v1beta2
    52  				kind: ClusterConfiguration
    53  				etcd:
    54  				  local: {}
    55  				`),
    56  			newImageRepository: "example.com/k8s",
    57  			newImageTag:        "v1.6.0",
    58  			wantClusterConfiguration: yaml.Raw(`
    59  				apiServer: {}
    60  				apiVersion: kubeadm.k8s.io/v1beta2
    61  				controllerManager: {}
    62  				dns: {}
    63  				etcd:
    64  				  local:
    65  				    imageRepository: example.com/k8s
    66  				    imageTag: v1.6.0
    67  				kind: ClusterConfiguration
    68  				networking: {}
    69  				scheduler: {}
    70  				`),
    71  		},
    72  		{
    73  			name: "no op when external etcd",
    74  			clusterConfigurationData: yaml.Raw(`
    75  				apiVersion: kubeadm.k8s.io/v1beta2
    76  				kind: ClusterConfiguration
    77  				etcd:
    78  				  external: {}
    79  				`),
    80  			newImageRepository: "example.com/k8s",
    81  			newImageTag:        "v1.6.0",
    82  			wantClusterConfiguration: yaml.Raw(`
    83  				apiVersion: kubeadm.k8s.io/v1beta2
    84  				kind: ClusterConfiguration
    85  				etcd:
    86  				  external: {}
    87  				`),
    88  		},
    89  	}
    90  
    91  	for _, tt := range tests {
    92  		t.Run(tt.name, func(t *testing.T) {
    93  			g := NewWithT(t)
    94  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
    95  				ObjectMeta: metav1.ObjectMeta{
    96  					Name:      kubeadmConfigKey,
    97  					Namespace: metav1.NamespaceSystem,
    98  				},
    99  				Data: map[string]string{
   100  					clusterConfigurationKey: tt.clusterConfigurationData,
   101  				},
   102  			}).Build()
   103  
   104  			w := &Workload{
   105  				Client: fakeClient,
   106  			}
   107  			err := w.UpdateEtcdVersionInKubeadmConfigMap(ctx, tt.newImageRepository, tt.newImageTag, semver.MustParse("1.19.1"))
   108  			g.Expect(err).ToNot(HaveOccurred())
   109  
   110  			var actualConfig corev1.ConfigMap
   111  			g.Expect(w.Client.Get(
   112  				ctx,
   113  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   114  				&actualConfig,
   115  			)).To(Succeed())
   116  			g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey]))
   117  		})
   118  	}
   119  }
   120  
   121  func TestUpdateEtcdExtraArgsInKubeadmConfigMap(t *testing.T) {
   122  	tests := []struct {
   123  		name                     string
   124  		clusterConfigurationData string
   125  		newExtraArgs             map[string]string
   126  		wantClusterConfiguration string
   127  	}{
   128  		{
   129  			name: "it should set etcd extraArgs when local etcd",
   130  			clusterConfigurationData: yaml.Raw(`
   131  				apiVersion: kubeadm.k8s.io/v1beta2
   132  				kind: ClusterConfiguration
   133  				etcd:
   134  				  local: {}
   135  				`),
   136  			newExtraArgs: map[string]string{
   137  				"foo": "bar",
   138  			},
   139  			wantClusterConfiguration: yaml.Raw(`
   140  				apiServer: {}
   141  				apiVersion: kubeadm.k8s.io/v1beta2
   142  				controllerManager: {}
   143  				dns: {}
   144  				etcd:
   145  				  local:
   146  				    extraArgs:
   147  				      foo: bar
   148  				kind: ClusterConfiguration
   149  				networking: {}
   150  				scheduler: {}
   151  				`),
   152  		},
   153  		{
   154  			name: "no op when external etcd",
   155  			clusterConfigurationData: yaml.Raw(`
   156  				apiVersion: kubeadm.k8s.io/v1beta2
   157  				kind: ClusterConfiguration
   158  				etcd:
   159  				  external: {}
   160  				`),
   161  			newExtraArgs: map[string]string{
   162  				"foo": "bar",
   163  			},
   164  			wantClusterConfiguration: yaml.Raw(`
   165  				apiVersion: kubeadm.k8s.io/v1beta2
   166  				kind: ClusterConfiguration
   167  				etcd:
   168  				  external: {}
   169  				`),
   170  		},
   171  	}
   172  
   173  	for _, tt := range tests {
   174  		t.Run(tt.name, func(t *testing.T) {
   175  			g := NewWithT(t)
   176  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
   177  				ObjectMeta: metav1.ObjectMeta{
   178  					Name:      kubeadmConfigKey,
   179  					Namespace: metav1.NamespaceSystem,
   180  				},
   181  				Data: map[string]string{
   182  					clusterConfigurationKey: tt.clusterConfigurationData,
   183  				},
   184  			}).Build()
   185  
   186  			w := &Workload{
   187  				Client: fakeClient,
   188  			}
   189  			err := w.UpdateEtcdExtraArgsInKubeadmConfigMap(ctx, tt.newExtraArgs, semver.MustParse("1.19.1"))
   190  			g.Expect(err).ToNot(HaveOccurred())
   191  
   192  			var actualConfig corev1.ConfigMap
   193  			g.Expect(w.Client.Get(
   194  				ctx,
   195  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   196  				&actualConfig,
   197  			)).To(Succeed())
   198  			g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey]))
   199  		})
   200  	}
   201  }
   202  
   203  func TestRemoveEtcdMemberForMachine(t *testing.T) {
   204  	machine := &clusterv1.Machine{
   205  		Status: clusterv1.MachineStatus{
   206  			NodeRef: &corev1.ObjectReference{
   207  				Name: "cp1",
   208  			},
   209  		},
   210  	}
   211  	cp1 := &corev1.Node{
   212  		ObjectMeta: metav1.ObjectMeta{
   213  			Name:      "cp1",
   214  			Namespace: "cp1",
   215  			Labels: map[string]string{
   216  				labelNodeRoleControlPlane: "",
   217  			},
   218  		},
   219  	}
   220  	cp1DiffNS := cp1.DeepCopy()
   221  	cp1DiffNS.Namespace = "diff-ns"
   222  
   223  	cp2 := cp1.DeepCopy()
   224  	cp2.Name = "cp2"
   225  	cp2.Namespace = "cp2"
   226  
   227  	tests := []struct {
   228  		name                string
   229  		machine             *clusterv1.Machine
   230  		etcdClientGenerator etcdClientFor
   231  		objs                []client.Object
   232  		expectErr           bool
   233  	}{
   234  		{
   235  			name:      "does nothing if the machine is nil",
   236  			machine:   nil,
   237  			expectErr: false,
   238  		},
   239  		{
   240  			name: "does nothing if the machine has no node",
   241  			machine: &clusterv1.Machine{
   242  				Status: clusterv1.MachineStatus{
   243  					NodeRef: nil,
   244  				},
   245  			},
   246  			expectErr: false,
   247  		},
   248  		{
   249  			name:      "returns an error if there are less than 2 control plane nodes",
   250  			machine:   machine,
   251  			objs:      []client.Object{cp1},
   252  			expectErr: true,
   253  		},
   254  		{
   255  			name:                "returns an error if it fails to create the etcd client",
   256  			machine:             machine,
   257  			objs:                []client.Object{cp1, cp2},
   258  			etcdClientGenerator: &fakeEtcdClientGenerator{forNodesErr: errors.New("no client")},
   259  			expectErr:           true,
   260  		},
   261  		{
   262  			name:    "returns an error if the client errors getting etcd members",
   263  			machine: machine,
   264  			objs:    []client.Object{cp1, cp2},
   265  			etcdClientGenerator: &fakeEtcdClientGenerator{
   266  				forNodesClient: &etcd.Client{
   267  					EtcdClient: &fake2.FakeEtcdClient{
   268  						ErrorResponse: errors.New("cannot get etcd members"),
   269  					},
   270  				},
   271  			},
   272  			expectErr: true,
   273  		},
   274  		{
   275  			name:    "returns an error if the client errors removing the etcd member",
   276  			machine: machine,
   277  			objs:    []client.Object{cp1, cp2},
   278  			etcdClientGenerator: &fakeEtcdClientGenerator{
   279  				forNodesClient: &etcd.Client{
   280  					EtcdClient: &fake2.FakeEtcdClient{
   281  						ErrorResponse: errors.New("cannot remove etcd member"),
   282  						MemberListResponse: &clientv3.MemberListResponse{
   283  							Members: []*pb.Member{
   284  								{Name: "cp1", ID: uint64(1)},
   285  								{Name: "test-2", ID: uint64(2)},
   286  								{Name: "test-3", ID: uint64(3)},
   287  							},
   288  						},
   289  						AlarmResponse: &clientv3.AlarmResponse{
   290  							Alarms: []*pb.AlarmMember{},
   291  						},
   292  					},
   293  				},
   294  			},
   295  			expectErr: true,
   296  		},
   297  		{
   298  			name:    "removes the member from etcd",
   299  			machine: machine,
   300  			objs:    []client.Object{cp1, cp2},
   301  			etcdClientGenerator: &fakeEtcdClientGenerator{
   302  				forNodesClient: &etcd.Client{
   303  					EtcdClient: &fake2.FakeEtcdClient{
   304  						MemberListResponse: &clientv3.MemberListResponse{
   305  							Members: []*pb.Member{
   306  								{Name: "cp1", ID: uint64(1)},
   307  								{Name: "test-2", ID: uint64(2)},
   308  								{Name: "test-3", ID: uint64(3)},
   309  							},
   310  						},
   311  						AlarmResponse: &clientv3.AlarmResponse{
   312  							Alarms: []*pb.AlarmMember{},
   313  						},
   314  					},
   315  				},
   316  			},
   317  			expectErr: false,
   318  		},
   319  	}
   320  
   321  	for _, tt := range tests {
   322  		t.Run(tt.name, func(t *testing.T) {
   323  			g := NewWithT(t)
   324  			fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()
   325  			w := &Workload{
   326  				Client:              fakeClient,
   327  				etcdClientGenerator: tt.etcdClientGenerator,
   328  			}
   329  			err := w.RemoveEtcdMemberForMachine(ctx, tt.machine)
   330  			if tt.expectErr {
   331  				g.Expect(err).To(HaveOccurred())
   332  				return
   333  			}
   334  			g.Expect(err).ToNot(HaveOccurred())
   335  		})
   336  	}
   337  }
   338  
   339  func TestForwardEtcdLeadership(t *testing.T) {
   340  	t.Run("handles errors correctly", func(t *testing.T) {
   341  		tests := []struct {
   342  			name                string
   343  			machine             *clusterv1.Machine
   344  			leaderCandidate     *clusterv1.Machine
   345  			etcdClientGenerator etcdClientFor
   346  			k8sClient           client.Client
   347  			expectErr           bool
   348  		}{
   349  			{
   350  				name:      "does nothing if the machine is nil",
   351  				machine:   nil,
   352  				expectErr: false,
   353  			},
   354  			{
   355  				name: "does nothing if machine's NodeRef is nil",
   356  				machine: defaultMachine(func(m *clusterv1.Machine) {
   357  					m.Status.NodeRef = nil
   358  				}),
   359  				expectErr: false,
   360  			},
   361  			{
   362  				name:            "returns an error if the leader candidate is nil",
   363  				machine:         defaultMachine(),
   364  				leaderCandidate: nil,
   365  				expectErr:       true,
   366  			},
   367  			{
   368  				name:    "returns an error if the leader candidate's noderef is nil",
   369  				machine: defaultMachine(),
   370  				leaderCandidate: defaultMachine(func(m *clusterv1.Machine) {
   371  					m.Status.NodeRef = nil
   372  				}),
   373  				expectErr: true,
   374  			},
   375  			{
   376  				name:            "returns an error if it can't retrieve the list of control plane nodes",
   377  				machine:         defaultMachine(),
   378  				leaderCandidate: defaultMachine(),
   379  				k8sClient:       &fakeClient{listErr: errors.New("failed to list nodes")},
   380  				expectErr:       true,
   381  			},
   382  			{
   383  				name:                "returns an error if it can't create an etcd client",
   384  				machine:             defaultMachine(),
   385  				leaderCandidate:     defaultMachine(),
   386  				k8sClient:           &fakeClient{},
   387  				etcdClientGenerator: &fakeEtcdClientGenerator{forLeaderErr: errors.New("no etcdClient")},
   388  				expectErr:           true,
   389  			},
   390  			{
   391  				name:            "returns error if it fails to get etcd members",
   392  				machine:         defaultMachine(),
   393  				leaderCandidate: defaultMachine(),
   394  				k8sClient:       &fakeClient{},
   395  				etcdClientGenerator: &fakeEtcdClientGenerator{
   396  					forLeaderClient: &etcd.Client{
   397  						EtcdClient: &fake2.FakeEtcdClient{
   398  							ErrorResponse: errors.New("cannot get etcd members"),
   399  						},
   400  					},
   401  				},
   402  				expectErr: true,
   403  			},
   404  		}
   405  		for _, tt := range tests {
   406  			t.Run(tt.name, func(t *testing.T) {
   407  				g := NewWithT(t)
   408  				w := &Workload{
   409  					Client:              tt.k8sClient,
   410  					etcdClientGenerator: tt.etcdClientGenerator,
   411  				}
   412  				err := w.ForwardEtcdLeadership(ctx, tt.machine, tt.leaderCandidate)
   413  				if tt.expectErr {
   414  					g.Expect(err).To(HaveOccurred())
   415  					return
   416  				}
   417  				g.Expect(err).ToNot(HaveOccurred())
   418  			})
   419  		}
   420  	})
   421  
   422  	t.Run("does nothing if the machine is not the leader", func(t *testing.T) {
   423  		g := NewWithT(t)
   424  		fakeEtcdClient := &fake2.FakeEtcdClient{
   425  			MemberListResponse: &clientv3.MemberListResponse{
   426  				Members: []*pb.Member{
   427  					{Name: "machine-node", ID: uint64(101)},
   428  				},
   429  			},
   430  			AlarmResponse: &clientv3.AlarmResponse{
   431  				Alarms: []*pb.AlarmMember{},
   432  			},
   433  		}
   434  		etcdClientGenerator := &fakeEtcdClientGenerator{
   435  			forLeaderClient: &etcd.Client{
   436  				EtcdClient: fakeEtcdClient,
   437  				LeaderID:   555,
   438  			},
   439  		}
   440  
   441  		w := &Workload{
   442  			Client: &fakeClient{list: &corev1.NodeList{
   443  				Items: []corev1.Node{nodeNamed("leader-node")},
   444  			}},
   445  			etcdClientGenerator: etcdClientGenerator,
   446  		}
   447  		err := w.ForwardEtcdLeadership(ctx, defaultMachine(), defaultMachine())
   448  		g.Expect(err).ToNot(HaveOccurred())
   449  		g.Expect(fakeEtcdClient.MovedLeader).To(BeEquivalentTo(0))
   450  	})
   451  
   452  	t.Run("move etcd leader", func(t *testing.T) {
   453  		tests := []struct {
   454  			name               string
   455  			leaderCandidate    *clusterv1.Machine
   456  			etcdMoveErr        error
   457  			expectedMoveLeader uint64
   458  			expectErr          bool
   459  		}{
   460  			{
   461  				name: "it moves the etcd leadership to the leader candidate",
   462  				leaderCandidate: defaultMachine(func(m *clusterv1.Machine) {
   463  					m.Status.NodeRef.Name = "candidate-node"
   464  				}),
   465  				expectedMoveLeader: 12345,
   466  			},
   467  			{
   468  				name: "returns error if failed to move to the leader candidate",
   469  				leaderCandidate: defaultMachine(func(m *clusterv1.Machine) {
   470  					m.Status.NodeRef.Name = "candidate-node"
   471  				}),
   472  				etcdMoveErr: errors.New("move err"),
   473  				expectErr:   true,
   474  			},
   475  			{
   476  				name: "returns error if the leader candidate doesn't exist in etcd",
   477  				leaderCandidate: defaultMachine(func(m *clusterv1.Machine) {
   478  					m.Status.NodeRef.Name = "some other node"
   479  				}),
   480  				expectErr: true,
   481  			},
   482  		}
   483  
   484  		currentLeader := defaultMachine(func(m *clusterv1.Machine) {
   485  			m.Status.NodeRef.Name = "current-leader"
   486  		})
   487  		for _, tt := range tests {
   488  			t.Run(tt.name, func(t *testing.T) {
   489  				g := NewWithT(t)
   490  				fakeEtcdClient := &fake2.FakeEtcdClient{
   491  					ErrorResponse: tt.etcdMoveErr,
   492  					MemberListResponse: &clientv3.MemberListResponse{
   493  						Members: []*pb.Member{
   494  							{Name: currentLeader.Status.NodeRef.Name, ID: uint64(101)},
   495  							{Name: "other-node", ID: uint64(1034)},
   496  							{Name: "candidate-node", ID: uint64(12345)},
   497  						},
   498  					},
   499  					AlarmResponse: &clientv3.AlarmResponse{
   500  						Alarms: []*pb.AlarmMember{},
   501  					},
   502  				}
   503  
   504  				etcdClientGenerator := &fakeEtcdClientGenerator{
   505  					forLeaderClient: &etcd.Client{
   506  						EtcdClient: fakeEtcdClient,
   507  						// this etcdClient belongs to the machine-node
   508  						LeaderID: 101,
   509  					},
   510  				}
   511  
   512  				w := &Workload{
   513  					etcdClientGenerator: etcdClientGenerator,
   514  					Client: &fakeClient{list: &corev1.NodeList{
   515  						Items: []corev1.Node{nodeNamed("leader-node"), nodeNamed("other-node"), nodeNamed("candidate-node")},
   516  					}},
   517  				}
   518  				err := w.ForwardEtcdLeadership(ctx, currentLeader, tt.leaderCandidate)
   519  				if tt.expectErr {
   520  					g.Expect(err).To(HaveOccurred())
   521  					return
   522  				}
   523  				g.Expect(err).ToNot(HaveOccurred())
   524  				g.Expect(fakeEtcdClient.MovedLeader).To(BeEquivalentTo(tt.expectedMoveLeader))
   525  			})
   526  		}
   527  	})
   528  }
   529  
   530  func TestReconcileEtcdMembers(t *testing.T) {
   531  	kubeadmConfig := &corev1.ConfigMap{
   532  		ObjectMeta: metav1.ObjectMeta{
   533  			Name:      kubeadmConfigKey,
   534  			Namespace: metav1.NamespaceSystem,
   535  		},
   536  		Data: map[string]string{
   537  			clusterStatusKey: yaml.Raw(`
   538  				apiEndpoints:
   539  				  ip-10-0-0-1.ec2.internal:
   540  				    advertiseAddress: 10.0.0.1
   541  				    bindPort: 6443
   542  				  ip-10-0-0-2.ec2.internal:
   543  				    advertiseAddress: 10.0.0.2
   544  				    bindPort: 6443
   545  				    someFieldThatIsAddedInTheFuture: bar
   546  				  ip-10-0-0-3.ec2.internal:
   547  				    advertiseAddress: 10.0.0.3
   548  				    bindPort: 6443
   549  				apiVersion: kubeadm.k8s.io/v1beta2
   550  				kind: ClusterStatus
   551  				`),
   552  		},
   553  	}
   554  	kubeadmConfigWithoutClusterStatus := kubeadmConfig.DeepCopy()
   555  	delete(kubeadmConfigWithoutClusterStatus.Data, clusterStatusKey)
   556  
   557  	node1 := &corev1.Node{
   558  		ObjectMeta: metav1.ObjectMeta{
   559  			Name:      "ip-10-0-0-1.ec2.internal",
   560  			Namespace: "ns1",
   561  			Labels: map[string]string{
   562  				labelNodeRoleControlPlane: "",
   563  			},
   564  		},
   565  	}
   566  	node2 := node1.DeepCopy()
   567  	node2.Name = "ip-10-0-0-2.ec2.internal"
   568  
   569  	fakeEtcdClient := &fake2.FakeEtcdClient{
   570  		MemberListResponse: &clientv3.MemberListResponse{
   571  			Members: []*pb.Member{
   572  				{Name: "ip-10-0-0-1.ec2.internal", ID: uint64(1)},
   573  				{Name: "ip-10-0-0-2.ec2.internal", ID: uint64(2)},
   574  				{Name: "ip-10-0-0-3.ec2.internal", ID: uint64(3)},
   575  			},
   576  		},
   577  		AlarmResponse: &clientv3.AlarmResponse{
   578  			Alarms: []*pb.AlarmMember{},
   579  		},
   580  	}
   581  
   582  	tests := []struct {
   583  		name                string
   584  		kubernetesVersion   semver.Version
   585  		objs                []client.Object
   586  		nodes               []string
   587  		etcdClientGenerator etcdClientFor
   588  		expectErr           bool
   589  		assert              func(*WithT, client.Client)
   590  	}{
   591  		{
   592  			// the node to be removed is ip-10-0-0-3.ec2.internal since the
   593  			// other two have nodes
   594  			name:              "successfully removes the etcd member without a node and removes the node from kubeadm config for Kubernetes version < 1.22.0",
   595  			kubernetesVersion: semver.MustParse("1.19.1"), // Kubernetes version < 1.22.0 has ClusterStatus
   596  			objs:              []client.Object{node1.DeepCopy(), node2.DeepCopy(), kubeadmConfig.DeepCopy()},
   597  			nodes:             []string{node1.Name, node2.Name},
   598  			etcdClientGenerator: &fakeEtcdClientGenerator{
   599  				forNodesClient: &etcd.Client{
   600  					EtcdClient: fakeEtcdClient,
   601  				},
   602  			},
   603  			expectErr: false,
   604  			assert: func(g *WithT, c client.Client) {
   605  				g.Expect(fakeEtcdClient.RemovedMember).To(Equal(uint64(3)))
   606  
   607  				var actualConfig corev1.ConfigMap
   608  				g.Expect(c.Get(
   609  					ctx,
   610  					client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   611  					&actualConfig,
   612  				)).To(Succeed())
   613  				expectedOutput := yaml.Raw(`
   614  					apiEndpoints:
   615  					  ip-10-0-0-1.ec2.internal:
   616  					    advertiseAddress: 10.0.0.1
   617  					    bindPort: 6443
   618  					  ip-10-0-0-2.ec2.internal:
   619  					    advertiseAddress: 10.0.0.2
   620  					    bindPort: 6443
   621  					apiVersion: kubeadm.k8s.io/v1beta2
   622  					kind: ClusterStatus
   623  					`)
   624  				g.Expect(actualConfig.Data[clusterStatusKey]).To(Equal(expectedOutput))
   625  			},
   626  		},
   627  		{
   628  			// the node to be removed is ip-10-0-0-3.ec2.internal since the
   629  			// other two have nodes
   630  			name:              "successfully removes the etcd member without a node for Kubernetes version >= 1.22.0",
   631  			kubernetesVersion: minKubernetesVersionWithoutClusterStatus, // Kubernetes version >= 1.22.0 does not have ClusterStatus
   632  			objs:              []client.Object{node1.DeepCopy(), node2.DeepCopy(), kubeadmConfigWithoutClusterStatus.DeepCopy()},
   633  			nodes:             []string{node1.Name, node2.Name},
   634  			etcdClientGenerator: &fakeEtcdClientGenerator{
   635  				forNodesClient: &etcd.Client{
   636  					EtcdClient: fakeEtcdClient,
   637  				},
   638  			},
   639  			expectErr: false,
   640  			assert: func(g *WithT, c client.Client) {
   641  				g.Expect(fakeEtcdClient.RemovedMember).To(Equal(uint64(3)))
   642  
   643  				var actualConfig corev1.ConfigMap
   644  				g.Expect(c.Get(
   645  					ctx,
   646  					client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   647  					&actualConfig,
   648  				)).To(Succeed())
   649  				g.Expect(actualConfig.Data).ToNot(HaveKey(clusterStatusKey))
   650  			},
   651  		},
   652  		{
   653  			name:  "return error if there aren't enough control plane nodes",
   654  			objs:  []client.Object{node1.DeepCopy(), kubeadmConfig.DeepCopy()},
   655  			nodes: []string{node1.Name},
   656  			etcdClientGenerator: &fakeEtcdClientGenerator{
   657  				forNodesClient: &etcd.Client{
   658  					EtcdClient: fakeEtcdClient,
   659  				},
   660  			},
   661  			expectErr: true,
   662  		},
   663  	}
   664  
   665  	for _, tt := range tests {
   666  		t.Run(tt.name, func(t *testing.T) {
   667  			g := NewWithT(t)
   668  
   669  			for _, o := range tt.objs {
   670  				g.Expect(env.CreateAndWait(ctx, o)).To(Succeed())
   671  				defer func(do client.Object) {
   672  					g.Expect(env.CleanupAndWait(ctx, do)).To(Succeed())
   673  				}(o)
   674  			}
   675  
   676  			w := &Workload{
   677  				Client:              env.Client,
   678  				etcdClientGenerator: tt.etcdClientGenerator,
   679  			}
   680  			ctx := context.TODO()
   681  			_, err := w.ReconcileEtcdMembers(ctx, tt.nodes, tt.kubernetesVersion)
   682  			if tt.expectErr {
   683  				g.Expect(err).To(HaveOccurred())
   684  				return
   685  			}
   686  			g.Expect(err).ToNot(HaveOccurred())
   687  
   688  			if tt.assert != nil {
   689  				tt.assert(g, env.Client)
   690  			}
   691  		})
   692  	}
   693  }
   694  
   695  func TestRemoveNodeFromKubeadmConfigMap(t *testing.T) {
   696  	tests := []struct {
   697  		name              string
   698  		apiEndpoint       string
   699  		clusterStatusData string
   700  		wantClusterStatus string
   701  	}{
   702  		{
   703  			name:        "removes the api endpoint",
   704  			apiEndpoint: "ip-10-0-0-2.ec2.internal",
   705  			clusterStatusData: yaml.Raw(`
   706  				apiEndpoints:
   707  				  ip-10-0-0-1.ec2.internal:
   708  				    advertiseAddress: 10.0.0.1
   709  				    bindPort: 6443
   710  				  ip-10-0-0-2.ec2.internal:
   711  				    advertiseAddress: 10.0.0.2
   712  				    bindPort: 6443
   713  				apiVersion: kubeadm.k8s.io/v1beta2
   714  				kind: ClusterStatus
   715  				`),
   716  			wantClusterStatus: yaml.Raw(`
   717  				apiEndpoints:
   718  				  ip-10-0-0-1.ec2.internal:
   719  				    advertiseAddress: 10.0.0.1
   720  				    bindPort: 6443
   721  				apiVersion: kubeadm.k8s.io/v1beta2
   722  				kind: ClusterStatus
   723  				`),
   724  		},
   725  		{
   726  			name:        "no op if the api endpoint does not exists",
   727  			apiEndpoint: "ip-10-0-0-2.ec2.internal",
   728  			clusterStatusData: yaml.Raw(`
   729  				apiEndpoints:
   730  				  ip-10-0-0-1.ec2.internal:
   731  				    advertiseAddress: 10.0.0.1
   732  				    bindPort: 6443
   733  				apiVersion: kubeadm.k8s.io/v1beta2
   734  				kind: ClusterStatus
   735  				`),
   736  			wantClusterStatus: yaml.Raw(`
   737  				apiEndpoints:
   738  				  ip-10-0-0-1.ec2.internal:
   739  				    advertiseAddress: 10.0.0.1
   740  				    bindPort: 6443
   741  				apiVersion: kubeadm.k8s.io/v1beta2
   742  				kind: ClusterStatus
   743  				`),
   744  		},
   745  	}
   746  	for _, tt := range tests {
   747  		t.Run(tt.name, func(t *testing.T) {
   748  			g := NewWithT(t)
   749  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
   750  				ObjectMeta: metav1.ObjectMeta{
   751  					Name:      kubeadmConfigKey,
   752  					Namespace: metav1.NamespaceSystem,
   753  				},
   754  				Data: map[string]string{
   755  					clusterStatusKey: tt.clusterStatusData,
   756  				},
   757  			}).Build()
   758  
   759  			w := &Workload{
   760  				Client: fakeClient,
   761  			}
   762  			err := w.RemoveNodeFromKubeadmConfigMap(ctx, tt.apiEndpoint, semver.MustParse("1.19.1"))
   763  			g.Expect(err).ToNot(HaveOccurred())
   764  
   765  			var actualConfig corev1.ConfigMap
   766  			g.Expect(w.Client.Get(
   767  				ctx,
   768  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   769  				&actualConfig,
   770  			)).To(Succeed())
   771  			g.Expect(actualConfig.Data[clusterStatusKey]).Should(Equal(tt.wantClusterStatus), cmp.Diff(tt.wantClusterStatus, actualConfig.Data[clusterStatusKey]))
   772  		})
   773  	}
   774  }
   775  
   776  type fakeEtcdClientGenerator struct {
   777  	forNodesClient     *etcd.Client
   778  	forNodesClientFunc func([]string) (*etcd.Client, error)
   779  	forLeaderClient    *etcd.Client
   780  	forNodesErr        error
   781  	forLeaderErr       error
   782  }
   783  
   784  func (c *fakeEtcdClientGenerator) forFirstAvailableNode(_ context.Context, n []string) (*etcd.Client, error) {
   785  	if c.forNodesClientFunc != nil {
   786  		return c.forNodesClientFunc(n)
   787  	}
   788  	return c.forNodesClient, c.forNodesErr
   789  }
   790  
   791  func (c *fakeEtcdClientGenerator) forLeader(_ context.Context, _ []string) (*etcd.Client, error) {
   792  	return c.forLeaderClient, c.forLeaderErr
   793  }
   794  
   795  func defaultMachine(transforms ...func(m *clusterv1.Machine)) *clusterv1.Machine {
   796  	m := &clusterv1.Machine{
   797  		Status: clusterv1.MachineStatus{
   798  			NodeRef: &corev1.ObjectReference{
   799  				Name: "machine-node",
   800  			},
   801  		},
   802  	}
   803  	for _, t := range transforms {
   804  		t(m)
   805  	}
   806  	return m
   807  }
   808  
   809  func nodeNamed(name string) corev1.Node {
   810  	node := corev1.Node{
   811  		ObjectMeta: metav1.ObjectMeta{
   812  			Name: name,
   813  		},
   814  	}
   815  	return node
   816  }