sigs.k8s.io/cluster-api@v1.7.1/controlplane/kubeadm/internal/workload_cluster_etcd_test.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package internal
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"testing"
    23  
    24  	"github.com/blang/semver/v4"
    25  	"github.com/google/go-cmp/cmp"
    26  	. "github.com/onsi/gomega"
    27  	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
    28  	clientv3 "go.etcd.io/etcd/client/v3"
    29  	corev1 "k8s.io/api/core/v1"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"sigs.k8s.io/controller-runtime/pkg/client"
    32  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    33  
    34  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    35  	bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
    36  	"sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd"
    37  	fake2 "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/fake"
    38  	utilyaml "sigs.k8s.io/cluster-api/util/yaml"
    39  )
    40  
    41  func TestUpdateEtcdExternalInKubeadmConfigMap(t *testing.T) {
    42  	tests := []struct {
    43  		name                     string
    44  		clusterConfigurationData string
    45  		externalEtcd             *bootstrapv1.ExternalEtcd
    46  		wantClusterConfiguration string
    47  	}{
    48  		{
    49  			name: "it should set external etcd configuration with external etcd",
    50  			clusterConfigurationData: utilyaml.Raw(`
    51  				apiVersion: kubeadm.k8s.io/v1beta2
    52  				kind: ClusterConfiguration
    53  				etcd:
    54  				  external: {}
    55  				`),
    56  			externalEtcd: &bootstrapv1.ExternalEtcd{
    57  				Endpoints: []string{"1.2.3.4"},
    58  				CAFile:    "/tmp/ca_file.pem",
    59  				CertFile:  "/tmp/cert_file.crt",
    60  				KeyFile:   "/tmp/key_file.key",
    61  			},
    62  			wantClusterConfiguration: utilyaml.Raw(`
    63  				apiServer: {}
    64  				apiVersion: kubeadm.k8s.io/v1beta2
    65  				controllerManager: {}
    66  				dns: {}
    67  				etcd:
    68  				  external:
    69  				    caFile: /tmp/ca_file.pem
    70  				    certFile: /tmp/cert_file.crt
    71  				    endpoints:
    72  				    - 1.2.3.4
    73  				    keyFile: /tmp/key_file.key
    74  				kind: ClusterConfiguration
    75  				networking: {}
    76  				scheduler: {}
    77  				`),
    78  		},
    79  		{
    80  			name: "no op when local etcd configuration already exists",
    81  			clusterConfigurationData: utilyaml.Raw(`
    82  				apiVersion: kubeadm.k8s.io/v1beta2
    83  				kind: ClusterConfiguration
    84  				etcd:
    85  				  local: {}
    86  				`),
    87  			externalEtcd: &bootstrapv1.ExternalEtcd{
    88  				Endpoints: []string{"1.2.3.4"},
    89  				CAFile:    "/tmp/ca_file.pem",
    90  				CertFile:  "/tmp/cert_file.crt",
    91  				KeyFile:   "/tmp/key_file.key",
    92  			},
    93  			wantClusterConfiguration: utilyaml.Raw(`
    94  				apiVersion: kubeadm.k8s.io/v1beta2
    95  				kind: ClusterConfiguration
    96  				etcd:
    97  				  local: {}
    98  				`),
    99  		},
   100  	}
   101  
   102  	for _, tt := range tests {
   103  		t.Run(tt.name, func(t *testing.T) {
   104  			g := NewWithT(t)
   105  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
   106  				ObjectMeta: metav1.ObjectMeta{
   107  					Name:      kubeadmConfigKey,
   108  					Namespace: metav1.NamespaceSystem,
   109  				},
   110  				Data: map[string]string{
   111  					clusterConfigurationKey: tt.clusterConfigurationData,
   112  				},
   113  			}).Build()
   114  
   115  			w := &Workload{
   116  				Client: fakeClient,
   117  			}
   118  			err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateEtcdExternalInKubeadmConfigMap(tt.externalEtcd))
   119  			g.Expect(err).ToNot(HaveOccurred())
   120  
   121  			var actualConfig corev1.ConfigMap
   122  			g.Expect(w.Client.Get(
   123  				ctx,
   124  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   125  				&actualConfig,
   126  			)).To(Succeed())
   127  			g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey]))
   128  		})
   129  	}
   130  }
   131  
   132  func TestUpdateEtcdLocalInKubeadmConfigMap(t *testing.T) {
   133  	tests := []struct {
   134  		name                     string
   135  		clusterConfigurationData string
   136  		localEtcd                *bootstrapv1.LocalEtcd
   137  		wantClusterConfiguration string
   138  	}{
   139  		{
   140  			name: "it should set local etcd configuration with local etcd",
   141  			clusterConfigurationData: utilyaml.Raw(`
   142  				apiVersion: kubeadm.k8s.io/v1beta2
   143  				kind: ClusterConfiguration
   144  				etcd:
   145  				  local: {}
   146  				`),
   147  			localEtcd: &bootstrapv1.LocalEtcd{
   148  				ImageMeta: bootstrapv1.ImageMeta{
   149  					ImageRepository: "example.com/k8s",
   150  					ImageTag:        "v1.6.0",
   151  				},
   152  				ExtraArgs: map[string]string{
   153  					"foo": "bar",
   154  				},
   155  			},
   156  			wantClusterConfiguration: utilyaml.Raw(`
   157  				apiServer: {}
   158  				apiVersion: kubeadm.k8s.io/v1beta2
   159  				controllerManager: {}
   160  				dns: {}
   161  				etcd:
   162  				  local:
   163  				    extraArgs:
   164  				      foo: bar
   165  				    imageRepository: example.com/k8s
   166  				    imageTag: v1.6.0
   167  				kind: ClusterConfiguration
   168  				networking: {}
   169  				scheduler: {}
   170  				`),
   171  		},
   172  		{
   173  			name: "no op when external etcd configuration already exists",
   174  			clusterConfigurationData: utilyaml.Raw(`
   175  				apiVersion: kubeadm.k8s.io/v1beta2
   176  				kind: ClusterConfiguration
   177  				etcd:
   178  				  external: {}
   179  				`),
   180  			localEtcd: &bootstrapv1.LocalEtcd{
   181  				ImageMeta: bootstrapv1.ImageMeta{
   182  					ImageRepository: "example.com/k8s",
   183  					ImageTag:        "v1.6.0",
   184  				},
   185  				ExtraArgs: map[string]string{
   186  					"foo": "bar",
   187  				},
   188  			},
   189  			wantClusterConfiguration: utilyaml.Raw(`
   190  				apiVersion: kubeadm.k8s.io/v1beta2
   191  				kind: ClusterConfiguration
   192  				etcd:
   193  				  external: {}
   194  				`),
   195  		},
   196  	}
   197  
   198  	for _, tt := range tests {
   199  		t.Run(tt.name, func(t *testing.T) {
   200  			g := NewWithT(t)
   201  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
   202  				ObjectMeta: metav1.ObjectMeta{
   203  					Name:      kubeadmConfigKey,
   204  					Namespace: metav1.NamespaceSystem,
   205  				},
   206  				Data: map[string]string{
   207  					clusterConfigurationKey: tt.clusterConfigurationData,
   208  				},
   209  			}).Build()
   210  
   211  			w := &Workload{
   212  				Client: fakeClient,
   213  			}
   214  			err := w.UpdateClusterConfiguration(ctx, semver.MustParse("1.19.1"), w.UpdateEtcdLocalInKubeadmConfigMap(tt.localEtcd))
   215  			g.Expect(err).ToNot(HaveOccurred())
   216  
   217  			var actualConfig corev1.ConfigMap
   218  			g.Expect(w.Client.Get(
   219  				ctx,
   220  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   221  				&actualConfig,
   222  			)).To(Succeed())
   223  			g.Expect(actualConfig.Data[clusterConfigurationKey]).Should(Equal(tt.wantClusterConfiguration), cmp.Diff(tt.wantClusterConfiguration, actualConfig.Data[clusterConfigurationKey]))
   224  		})
   225  	}
   226  }
   227  
   228  func TestRemoveEtcdMemberForMachine(t *testing.T) {
   229  	machine := &clusterv1.Machine{
   230  		Status: clusterv1.MachineStatus{
   231  			NodeRef: &corev1.ObjectReference{
   232  				Name: "cp1",
   233  			},
   234  		},
   235  	}
   236  	cp1 := &corev1.Node{
   237  		ObjectMeta: metav1.ObjectMeta{
   238  			Name:      "cp1",
   239  			Namespace: "cp1",
   240  			Labels: map[string]string{
   241  				labelNodeRoleControlPlane: "",
   242  			},
   243  		},
   244  	}
   245  	cp1DiffNS := cp1.DeepCopy()
   246  	cp1DiffNS.Namespace = "diff-ns"
   247  
   248  	cp2 := cp1.DeepCopy()
   249  	cp2.Name = "cp2"
   250  	cp2.Namespace = "cp2"
   251  
   252  	tests := []struct {
   253  		name                string
   254  		machine             *clusterv1.Machine
   255  		etcdClientGenerator etcdClientFor
   256  		objs                []client.Object
   257  		expectErr           bool
   258  	}{
   259  		{
   260  			name:      "does nothing if the machine is nil",
   261  			machine:   nil,
   262  			expectErr: false,
   263  		},
   264  		{
   265  			name: "does nothing if the machine has no node",
   266  			machine: &clusterv1.Machine{
   267  				Status: clusterv1.MachineStatus{
   268  					NodeRef: nil,
   269  				},
   270  			},
   271  			expectErr: false,
   272  		},
   273  		{
   274  			name:      "returns an error if there are less than 2 control plane nodes",
   275  			machine:   machine,
   276  			objs:      []client.Object{cp1},
   277  			expectErr: true,
   278  		},
   279  		{
   280  			name:                "returns an error if it fails to create the etcd client",
   281  			machine:             machine,
   282  			objs:                []client.Object{cp1, cp2},
   283  			etcdClientGenerator: &fakeEtcdClientGenerator{forNodesErr: errors.New("no client")},
   284  			expectErr:           true,
   285  		},
   286  		{
   287  			name:    "returns an error if the client errors getting etcd members",
   288  			machine: machine,
   289  			objs:    []client.Object{cp1, cp2},
   290  			etcdClientGenerator: &fakeEtcdClientGenerator{
   291  				forNodesClient: &etcd.Client{
   292  					EtcdClient: &fake2.FakeEtcdClient{
   293  						ErrorResponse: errors.New("cannot get etcd members"),
   294  					},
   295  				},
   296  			},
   297  			expectErr: true,
   298  		},
   299  		{
   300  			name:    "returns an error if the client errors removing the etcd member",
   301  			machine: machine,
   302  			objs:    []client.Object{cp1, cp2},
   303  			etcdClientGenerator: &fakeEtcdClientGenerator{
   304  				forNodesClient: &etcd.Client{
   305  					EtcdClient: &fake2.FakeEtcdClient{
   306  						ErrorResponse: errors.New("cannot remove etcd member"),
   307  						MemberListResponse: &clientv3.MemberListResponse{
   308  							Members: []*pb.Member{
   309  								{Name: "cp1", ID: uint64(1)},
   310  								{Name: "test-2", ID: uint64(2)},
   311  								{Name: "test-3", ID: uint64(3)},
   312  							},
   313  						},
   314  						AlarmResponse: &clientv3.AlarmResponse{
   315  							Alarms: []*pb.AlarmMember{},
   316  						},
   317  					},
   318  				},
   319  			},
   320  			expectErr: true,
   321  		},
   322  		{
   323  			name:    "removes the member from etcd",
   324  			machine: machine,
   325  			objs:    []client.Object{cp1, cp2},
   326  			etcdClientGenerator: &fakeEtcdClientGenerator{
   327  				forNodesClient: &etcd.Client{
   328  					EtcdClient: &fake2.FakeEtcdClient{
   329  						MemberListResponse: &clientv3.MemberListResponse{
   330  							Members: []*pb.Member{
   331  								{Name: "cp1", ID: uint64(1)},
   332  								{Name: "test-2", ID: uint64(2)},
   333  								{Name: "test-3", ID: uint64(3)},
   334  							},
   335  						},
   336  						AlarmResponse: &clientv3.AlarmResponse{
   337  							Alarms: []*pb.AlarmMember{},
   338  						},
   339  					},
   340  				},
   341  			},
   342  			expectErr: false,
   343  		},
   344  	}
   345  
   346  	for _, tt := range tests {
   347  		t.Run(tt.name, func(t *testing.T) {
   348  			g := NewWithT(t)
   349  			fakeClient := fake.NewClientBuilder().WithObjects(tt.objs...).Build()
   350  			w := &Workload{
   351  				Client:              fakeClient,
   352  				etcdClientGenerator: tt.etcdClientGenerator,
   353  			}
   354  			err := w.RemoveEtcdMemberForMachine(ctx, tt.machine)
   355  			if tt.expectErr {
   356  				g.Expect(err).To(HaveOccurred())
   357  				return
   358  			}
   359  			g.Expect(err).ToNot(HaveOccurred())
   360  		})
   361  	}
   362  }
   363  
   364  func TestForwardEtcdLeadership(t *testing.T) {
   365  	t.Run("handles errors correctly", func(t *testing.T) {
   366  		tests := []struct {
   367  			name                string
   368  			machine             *clusterv1.Machine
   369  			leaderCandidate     *clusterv1.Machine
   370  			etcdClientGenerator etcdClientFor
   371  			k8sClient           client.Client
   372  			expectErr           bool
   373  		}{
   374  			{
   375  				name:      "does nothing if the machine is nil",
   376  				machine:   nil,
   377  				expectErr: false,
   378  			},
   379  			{
   380  				name: "does nothing if machine's NodeRef is nil",
   381  				machine: defaultMachine(func(m *clusterv1.Machine) {
   382  					m.Status.NodeRef = nil
   383  				}),
   384  				expectErr: false,
   385  			},
   386  			{
   387  				name:            "returns an error if the leader candidate is nil",
   388  				machine:         defaultMachine(),
   389  				leaderCandidate: nil,
   390  				expectErr:       true,
   391  			},
   392  			{
   393  				name:    "returns an error if the leader candidate's noderef is nil",
   394  				machine: defaultMachine(),
   395  				leaderCandidate: defaultMachine(func(m *clusterv1.Machine) {
   396  					m.Status.NodeRef = nil
   397  				}),
   398  				expectErr: true,
   399  			},
   400  			{
   401  				name:            "returns an error if it can't retrieve the list of control plane nodes",
   402  				machine:         defaultMachine(),
   403  				leaderCandidate: defaultMachine(),
   404  				k8sClient:       &fakeClient{listErr: errors.New("failed to list nodes")},
   405  				expectErr:       true,
   406  			},
   407  			{
   408  				name:                "returns an error if it can't create an etcd client",
   409  				machine:             defaultMachine(),
   410  				leaderCandidate:     defaultMachine(),
   411  				k8sClient:           &fakeClient{},
   412  				etcdClientGenerator: &fakeEtcdClientGenerator{forLeaderErr: errors.New("no etcdClient")},
   413  				expectErr:           true,
   414  			},
   415  			{
   416  				name:            "returns error if it fails to get etcd members",
   417  				machine:         defaultMachine(),
   418  				leaderCandidate: defaultMachine(),
   419  				k8sClient:       &fakeClient{},
   420  				etcdClientGenerator: &fakeEtcdClientGenerator{
   421  					forLeaderClient: &etcd.Client{
   422  						EtcdClient: &fake2.FakeEtcdClient{
   423  							ErrorResponse: errors.New("cannot get etcd members"),
   424  						},
   425  					},
   426  				},
   427  				expectErr: true,
   428  			},
   429  		}
   430  		for _, tt := range tests {
   431  			t.Run(tt.name, func(t *testing.T) {
   432  				g := NewWithT(t)
   433  				w := &Workload{
   434  					Client:              tt.k8sClient,
   435  					etcdClientGenerator: tt.etcdClientGenerator,
   436  				}
   437  				err := w.ForwardEtcdLeadership(ctx, tt.machine, tt.leaderCandidate)
   438  				if tt.expectErr {
   439  					g.Expect(err).To(HaveOccurred())
   440  					return
   441  				}
   442  				g.Expect(err).ToNot(HaveOccurred())
   443  			})
   444  		}
   445  	})
   446  
   447  	t.Run("does nothing if the machine is not the leader", func(t *testing.T) {
   448  		g := NewWithT(t)
   449  		fakeEtcdClient := &fake2.FakeEtcdClient{
   450  			MemberListResponse: &clientv3.MemberListResponse{
   451  				Members: []*pb.Member{
   452  					{Name: "machine-node", ID: uint64(101)},
   453  				},
   454  			},
   455  			AlarmResponse: &clientv3.AlarmResponse{
   456  				Alarms: []*pb.AlarmMember{},
   457  			},
   458  		}
   459  		etcdClientGenerator := &fakeEtcdClientGenerator{
   460  			forLeaderClient: &etcd.Client{
   461  				EtcdClient: fakeEtcdClient,
   462  				LeaderID:   555,
   463  			},
   464  		}
   465  
   466  		w := &Workload{
   467  			Client: &fakeClient{list: &corev1.NodeList{
   468  				Items: []corev1.Node{nodeNamed("leader-node")},
   469  			}},
   470  			etcdClientGenerator: etcdClientGenerator,
   471  		}
   472  		err := w.ForwardEtcdLeadership(ctx, defaultMachine(), defaultMachine())
   473  		g.Expect(err).ToNot(HaveOccurred())
   474  		g.Expect(fakeEtcdClient.MovedLeader).To(BeEquivalentTo(0))
   475  	})
   476  
   477  	t.Run("move etcd leader", func(t *testing.T) {
   478  		tests := []struct {
   479  			name               string
   480  			leaderCandidate    *clusterv1.Machine
   481  			etcdMoveErr        error
   482  			expectedMoveLeader uint64
   483  			expectErr          bool
   484  		}{
   485  			{
   486  				name: "it moves the etcd leadership to the leader candidate",
   487  				leaderCandidate: defaultMachine(func(m *clusterv1.Machine) {
   488  					m.Status.NodeRef.Name = "candidate-node"
   489  				}),
   490  				expectedMoveLeader: 12345,
   491  			},
   492  			{
   493  				name: "returns error if failed to move to the leader candidate",
   494  				leaderCandidate: defaultMachine(func(m *clusterv1.Machine) {
   495  					m.Status.NodeRef.Name = "candidate-node"
   496  				}),
   497  				etcdMoveErr: errors.New("move err"),
   498  				expectErr:   true,
   499  			},
   500  			{
   501  				name: "returns error if the leader candidate doesn't exist in etcd",
   502  				leaderCandidate: defaultMachine(func(m *clusterv1.Machine) {
   503  					m.Status.NodeRef.Name = "some other node"
   504  				}),
   505  				expectErr: true,
   506  			},
   507  		}
   508  
   509  		currentLeader := defaultMachine(func(m *clusterv1.Machine) {
   510  			m.Status.NodeRef.Name = "current-leader"
   511  		})
   512  		for _, tt := range tests {
   513  			t.Run(tt.name, func(t *testing.T) {
   514  				g := NewWithT(t)
   515  				fakeEtcdClient := &fake2.FakeEtcdClient{
   516  					ErrorResponse: tt.etcdMoveErr,
   517  					MemberListResponse: &clientv3.MemberListResponse{
   518  						Members: []*pb.Member{
   519  							{Name: currentLeader.Status.NodeRef.Name, ID: uint64(101)},
   520  							{Name: "other-node", ID: uint64(1034)},
   521  							{Name: "candidate-node", ID: uint64(12345)},
   522  						},
   523  					},
   524  					AlarmResponse: &clientv3.AlarmResponse{
   525  						Alarms: []*pb.AlarmMember{},
   526  					},
   527  				}
   528  
   529  				etcdClientGenerator := &fakeEtcdClientGenerator{
   530  					forLeaderClient: &etcd.Client{
   531  						EtcdClient: fakeEtcdClient,
   532  						// this etcdClient belongs to the machine-node
   533  						LeaderID: 101,
   534  					},
   535  				}
   536  
   537  				w := &Workload{
   538  					etcdClientGenerator: etcdClientGenerator,
   539  					Client: &fakeClient{list: &corev1.NodeList{
   540  						Items: []corev1.Node{nodeNamed("leader-node"), nodeNamed("other-node"), nodeNamed("candidate-node")},
   541  					}},
   542  				}
   543  				err := w.ForwardEtcdLeadership(ctx, currentLeader, tt.leaderCandidate)
   544  				if tt.expectErr {
   545  					g.Expect(err).To(HaveOccurred())
   546  					return
   547  				}
   548  				g.Expect(err).ToNot(HaveOccurred())
   549  				g.Expect(fakeEtcdClient.MovedLeader).To(BeEquivalentTo(tt.expectedMoveLeader))
   550  			})
   551  		}
   552  	})
   553  }
   554  
   555  func TestReconcileEtcdMembers(t *testing.T) {
   556  	kubeadmConfig := &corev1.ConfigMap{
   557  		ObjectMeta: metav1.ObjectMeta{
   558  			Name:      kubeadmConfigKey,
   559  			Namespace: metav1.NamespaceSystem,
   560  		},
   561  		Data: map[string]string{
   562  			clusterStatusKey: utilyaml.Raw(`
   563  				apiEndpoints:
   564  				  ip-10-0-0-1.ec2.internal:
   565  				    advertiseAddress: 10.0.0.1
   566  				    bindPort: 6443
   567  				  ip-10-0-0-2.ec2.internal:
   568  				    advertiseAddress: 10.0.0.2
   569  				    bindPort: 6443
   570  				    someFieldThatIsAddedInTheFuture: bar
   571  				  ip-10-0-0-3.ec2.internal:
   572  				    advertiseAddress: 10.0.0.3
   573  				    bindPort: 6443
   574  				apiVersion: kubeadm.k8s.io/v1beta2
   575  				kind: ClusterStatus
   576  				`),
   577  		},
   578  	}
   579  	kubeadmConfigWithoutClusterStatus := kubeadmConfig.DeepCopy()
   580  	delete(kubeadmConfigWithoutClusterStatus.Data, clusterStatusKey)
   581  
   582  	node1 := &corev1.Node{
   583  		ObjectMeta: metav1.ObjectMeta{
   584  			Name:      "ip-10-0-0-1.ec2.internal",
   585  			Namespace: "ns1",
   586  			Labels: map[string]string{
   587  				labelNodeRoleControlPlane: "",
   588  			},
   589  		},
   590  	}
   591  	node2 := node1.DeepCopy()
   592  	node2.Name = "ip-10-0-0-2.ec2.internal"
   593  
   594  	fakeEtcdClient := &fake2.FakeEtcdClient{
   595  		MemberListResponse: &clientv3.MemberListResponse{
   596  			Members: []*pb.Member{
   597  				{Name: "ip-10-0-0-1.ec2.internal", ID: uint64(1)},
   598  				{Name: "ip-10-0-0-2.ec2.internal", ID: uint64(2)},
   599  				{Name: "ip-10-0-0-3.ec2.internal", ID: uint64(3)},
   600  			},
   601  		},
   602  		AlarmResponse: &clientv3.AlarmResponse{
   603  			Alarms: []*pb.AlarmMember{},
   604  		},
   605  	}
   606  
   607  	tests := []struct {
   608  		name                string
   609  		kubernetesVersion   semver.Version
   610  		objs                []client.Object
   611  		nodes               []string
   612  		etcdClientGenerator etcdClientFor
   613  		expectErr           bool
   614  		assert              func(*WithT, client.Client)
   615  	}{
   616  		{
   617  			// the node to be removed is ip-10-0-0-3.ec2.internal since the
   618  			// other two have nodes
   619  			name:              "successfully removes the etcd member without a node and removes the node from kubeadm config for Kubernetes version < 1.22.0",
   620  			kubernetesVersion: semver.MustParse("1.19.1"), // Kubernetes version < 1.22.0 has ClusterStatus
   621  			objs:              []client.Object{node1.DeepCopy(), node2.DeepCopy(), kubeadmConfig.DeepCopy()},
   622  			nodes:             []string{node1.Name, node2.Name},
   623  			etcdClientGenerator: &fakeEtcdClientGenerator{
   624  				forNodesClient: &etcd.Client{
   625  					EtcdClient: fakeEtcdClient,
   626  				},
   627  			},
   628  			expectErr: false,
   629  			assert: func(g *WithT, c client.Client) {
   630  				g.Expect(fakeEtcdClient.RemovedMember).To(Equal(uint64(3)))
   631  
   632  				var actualConfig corev1.ConfigMap
   633  				g.Expect(c.Get(
   634  					ctx,
   635  					client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   636  					&actualConfig,
   637  				)).To(Succeed())
   638  				expectedOutput := utilyaml.Raw(`
   639  					apiEndpoints:
   640  					  ip-10-0-0-1.ec2.internal:
   641  					    advertiseAddress: 10.0.0.1
   642  					    bindPort: 6443
   643  					  ip-10-0-0-2.ec2.internal:
   644  					    advertiseAddress: 10.0.0.2
   645  					    bindPort: 6443
   646  					apiVersion: kubeadm.k8s.io/v1beta2
   647  					kind: ClusterStatus
   648  					`)
   649  				g.Expect(actualConfig.Data[clusterStatusKey]).To(Equal(expectedOutput))
   650  			},
   651  		},
   652  		{
   653  			// the node to be removed is ip-10-0-0-3.ec2.internal since the
   654  			// other two have nodes
   655  			name:              "successfully removes the etcd member without a node for Kubernetes version >= 1.22.0",
   656  			kubernetesVersion: minKubernetesVersionWithoutClusterStatus, // Kubernetes version >= 1.22.0 does not have ClusterStatus
   657  			objs:              []client.Object{node1.DeepCopy(), node2.DeepCopy(), kubeadmConfigWithoutClusterStatus.DeepCopy()},
   658  			nodes:             []string{node1.Name, node2.Name},
   659  			etcdClientGenerator: &fakeEtcdClientGenerator{
   660  				forNodesClient: &etcd.Client{
   661  					EtcdClient: fakeEtcdClient,
   662  				},
   663  			},
   664  			expectErr: false,
   665  			assert: func(g *WithT, c client.Client) {
   666  				g.Expect(fakeEtcdClient.RemovedMember).To(Equal(uint64(3)))
   667  
   668  				var actualConfig corev1.ConfigMap
   669  				g.Expect(c.Get(
   670  					ctx,
   671  					client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   672  					&actualConfig,
   673  				)).To(Succeed())
   674  				g.Expect(actualConfig.Data).ToNot(HaveKey(clusterStatusKey))
   675  			},
   676  		},
   677  		{
   678  			name:  "return error if there aren't enough control plane nodes",
   679  			objs:  []client.Object{node1.DeepCopy(), kubeadmConfig.DeepCopy()},
   680  			nodes: []string{node1.Name},
   681  			etcdClientGenerator: &fakeEtcdClientGenerator{
   682  				forNodesClient: &etcd.Client{
   683  					EtcdClient: fakeEtcdClient,
   684  				},
   685  			},
   686  			expectErr: true,
   687  		},
   688  	}
   689  
   690  	for _, tt := range tests {
   691  		t.Run(tt.name, func(t *testing.T) {
   692  			g := NewWithT(t)
   693  
   694  			for _, o := range tt.objs {
   695  				g.Expect(env.CreateAndWait(ctx, o)).To(Succeed())
   696  				defer func(do client.Object) {
   697  					g.Expect(env.CleanupAndWait(ctx, do)).To(Succeed())
   698  				}(o)
   699  			}
   700  
   701  			w := &Workload{
   702  				Client:              env.Client,
   703  				etcdClientGenerator: tt.etcdClientGenerator,
   704  			}
   705  			ctx := context.TODO()
   706  			_, err := w.ReconcileEtcdMembers(ctx, tt.nodes, tt.kubernetesVersion)
   707  			if tt.expectErr {
   708  				g.Expect(err).To(HaveOccurred())
   709  				return
   710  			}
   711  			g.Expect(err).ToNot(HaveOccurred())
   712  
   713  			if tt.assert != nil {
   714  				tt.assert(g, env.Client)
   715  			}
   716  		})
   717  	}
   718  }
   719  
   720  func TestRemoveNodeFromKubeadmConfigMap(t *testing.T) {
   721  	tests := []struct {
   722  		name              string
   723  		apiEndpoint       string
   724  		clusterStatusData string
   725  		wantClusterStatus string
   726  	}{
   727  		{
   728  			name:        "removes the api endpoint",
   729  			apiEndpoint: "ip-10-0-0-2.ec2.internal",
   730  			clusterStatusData: utilyaml.Raw(`
   731  				apiEndpoints:
   732  				  ip-10-0-0-1.ec2.internal:
   733  				    advertiseAddress: 10.0.0.1
   734  				    bindPort: 6443
   735  				  ip-10-0-0-2.ec2.internal:
   736  				    advertiseAddress: 10.0.0.2
   737  				    bindPort: 6443
   738  				apiVersion: kubeadm.k8s.io/v1beta2
   739  				kind: ClusterStatus
   740  				`),
   741  			wantClusterStatus: utilyaml.Raw(`
   742  				apiEndpoints:
   743  				  ip-10-0-0-1.ec2.internal:
   744  				    advertiseAddress: 10.0.0.1
   745  				    bindPort: 6443
   746  				apiVersion: kubeadm.k8s.io/v1beta2
   747  				kind: ClusterStatus
   748  				`),
   749  		},
   750  		{
   751  			name:        "no op if the api endpoint does not exists",
   752  			apiEndpoint: "ip-10-0-0-2.ec2.internal",
   753  			clusterStatusData: utilyaml.Raw(`
   754  				apiEndpoints:
   755  				  ip-10-0-0-1.ec2.internal:
   756  				    advertiseAddress: 10.0.0.1
   757  				    bindPort: 6443
   758  				apiVersion: kubeadm.k8s.io/v1beta2
   759  				kind: ClusterStatus
   760  				`),
   761  			wantClusterStatus: utilyaml.Raw(`
   762  				apiEndpoints:
   763  				  ip-10-0-0-1.ec2.internal:
   764  				    advertiseAddress: 10.0.0.1
   765  				    bindPort: 6443
   766  				apiVersion: kubeadm.k8s.io/v1beta2
   767  				kind: ClusterStatus
   768  				`),
   769  		},
   770  	}
   771  	for _, tt := range tests {
   772  		t.Run(tt.name, func(t *testing.T) {
   773  			g := NewWithT(t)
   774  			fakeClient := fake.NewClientBuilder().WithObjects(&corev1.ConfigMap{
   775  				ObjectMeta: metav1.ObjectMeta{
   776  					Name:      kubeadmConfigKey,
   777  					Namespace: metav1.NamespaceSystem,
   778  				},
   779  				Data: map[string]string{
   780  					clusterStatusKey: tt.clusterStatusData,
   781  				},
   782  			}).Build()
   783  
   784  			w := &Workload{
   785  				Client: fakeClient,
   786  			}
   787  			err := w.RemoveNodeFromKubeadmConfigMap(ctx, tt.apiEndpoint, semver.MustParse("1.19.1"))
   788  			g.Expect(err).ToNot(HaveOccurred())
   789  
   790  			var actualConfig corev1.ConfigMap
   791  			g.Expect(w.Client.Get(
   792  				ctx,
   793  				client.ObjectKey{Name: kubeadmConfigKey, Namespace: metav1.NamespaceSystem},
   794  				&actualConfig,
   795  			)).To(Succeed())
   796  			g.Expect(actualConfig.Data[clusterStatusKey]).Should(Equal(tt.wantClusterStatus), cmp.Diff(tt.wantClusterStatus, actualConfig.Data[clusterStatusKey]))
   797  		})
   798  	}
   799  }
   800  
   801  type fakeEtcdClientGenerator struct {
   802  	forNodesClient     *etcd.Client
   803  	forNodesClientFunc func([]string) (*etcd.Client, error)
   804  	forLeaderClient    *etcd.Client
   805  	forNodesErr        error
   806  	forLeaderErr       error
   807  }
   808  
   809  func (c *fakeEtcdClientGenerator) forFirstAvailableNode(_ context.Context, n []string) (*etcd.Client, error) {
   810  	if c.forNodesClientFunc != nil {
   811  		return c.forNodesClientFunc(n)
   812  	}
   813  	return c.forNodesClient, c.forNodesErr
   814  }
   815  
   816  func (c *fakeEtcdClientGenerator) forLeader(_ context.Context, _ []string) (*etcd.Client, error) {
   817  	return c.forLeaderClient, c.forLeaderErr
   818  }
   819  
   820  func defaultMachine(transforms ...func(m *clusterv1.Machine)) *clusterv1.Machine {
   821  	m := &clusterv1.Machine{
   822  		Status: clusterv1.MachineStatus{
   823  			NodeRef: &corev1.ObjectReference{
   824  				Name: "machine-node",
   825  			},
   826  		},
   827  	}
   828  	for _, t := range transforms {
   829  		t(m)
   830  	}
   831  	return m
   832  }
   833  
   834  func nodeNamed(name string) corev1.Node {
   835  	node := corev1.Node{
   836  		ObjectMeta: metav1.ObjectMeta{
   837  			Name: name,
   838  		},
   839  	}
   840  	return node
   841  }