sigs.k8s.io/cluster-api@v1.7.1/controlplane/kubeadm/internal/workload_cluster_conditions_test.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package internal
    18  
    19  import (
    20  	"fmt"
    21  	"testing"
    22  
    23  	. "github.com/onsi/gomega"
    24  	"github.com/pkg/errors"
    25  	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
    26  	clientv3 "go.etcd.io/etcd/client/v3"
    27  	corev1 "k8s.io/api/core/v1"
    28  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    29  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    30  	"k8s.io/apimachinery/pkg/runtime/schema"
    31  	"sigs.k8s.io/controller-runtime/pkg/client"
    32  
    33  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    34  	bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
    35  	controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1"
    36  	"sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd"
    37  	fake2 "sigs.k8s.io/cluster-api/controlplane/kubeadm/internal/etcd/fake"
    38  	"sigs.k8s.io/cluster-api/util/collections"
    39  	"sigs.k8s.io/cluster-api/util/conditions"
    40  )
    41  
    42  func TestUpdateEtcdConditions(t *testing.T) {
    43  	tests := []struct {
    44  		name                      string
    45  		kcp                       *controlplanev1.KubeadmControlPlane
    46  		machines                  []*clusterv1.Machine
    47  		injectClient              client.Client // This test is injecting a fake client because it is required to create nodes with a controlled Status or to fail with a specific error.
    48  		injectEtcdClientGenerator etcdClientFor // This test is injecting a fake etcdClientGenerator because it is required to nodes with a controlled Status or to fail with a specific error.
    49  		expectedKCPCondition      *clusterv1.Condition
    50  		expectedMachineConditions map[string]clusterv1.Conditions
    51  	}{
    52  		{
    53  			name: "if list nodes return an error should report all the conditions Unknown",
    54  			machines: []*clusterv1.Machine{
    55  				fakeMachine("m1"),
    56  			},
    57  			injectClient: &fakeClient{
    58  				listErr: errors.New("failed to list nodes"),
    59  			},
    60  			expectedKCPCondition: conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterInspectionFailedReason, "Failed to list nodes which are hosting the etcd members"),
    61  			expectedMachineConditions: map[string]clusterv1.Conditions{
    62  				"m1": {
    63  					*conditions.UnknownCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to get the node which is hosting the etcd member"),
    64  				},
    65  			},
    66  		},
    67  		{
    68  			name: "node without machine should be ignored if there are provisioning machines",
    69  			machines: []*clusterv1.Machine{
    70  				fakeMachine("m1"), // without NodeRef (provisioning)
    71  			},
    72  			injectClient: &fakeClient{
    73  				list: &corev1.NodeList{
    74  					Items: []corev1.Node{*fakeNode("n1")},
    75  				},
    76  			},
    77  			expectedKCPCondition: nil,
    78  			expectedMachineConditions: map[string]clusterv1.Conditions{
    79  				"m1": {},
    80  			},
    81  		},
    82  		{
    83  			name:     "node without machine should report a problem at KCP level if there are no provisioning machines",
    84  			machines: []*clusterv1.Machine{},
    85  			injectClient: &fakeClient{
    86  				list: &corev1.NodeList{
    87  					Items: []corev1.Node{*fakeNode("n1")},
    88  				},
    89  			},
    90  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane node %s does not have a corresponding machine", "n1"),
    91  		},
    92  		{
    93  			name: "failure creating the etcd client should report unknown condition",
    94  			machines: []*clusterv1.Machine{
    95  				fakeMachine("m1", withNodeRef("n1")),
    96  			},
    97  			injectClient: &fakeClient{
    98  				list: &corev1.NodeList{
    99  					Items: []corev1.Node{*fakeNode("n1")},
   100  				},
   101  			},
   102  			injectEtcdClientGenerator: &fakeEtcdClientGenerator{
   103  				forNodesErr: errors.New("failed to get client for node"),
   104  			},
   105  			expectedKCPCondition: conditions.UnknownCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnknownReason, "Following machines are reporting unknown etcd member status: m1"),
   106  			expectedMachineConditions: map[string]clusterv1.Conditions{
   107  				"m1": {
   108  					*conditions.UnknownCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberInspectionFailedReason, "Failed to connect to the etcd pod on the %s node: failed to get client for node", "n1"),
   109  				},
   110  			},
   111  		},
   112  		{
   113  			name: "etcd client reporting status errors should be reflected into a false condition",
   114  			machines: []*clusterv1.Machine{
   115  				fakeMachine("m1", withNodeRef("n1")),
   116  			},
   117  			injectClient: &fakeClient{
   118  				list: &corev1.NodeList{
   119  					Items: []corev1.Node{*fakeNode("n1")},
   120  				},
   121  			},
   122  			injectEtcdClientGenerator: &fakeEtcdClientGenerator{
   123  				forNodesClient: &etcd.Client{
   124  					EtcdClient: &fake2.FakeEtcdClient{
   125  						EtcdEndpoints: []string{},
   126  					},
   127  					Errors: []string{"some errors"},
   128  				},
   129  			},
   130  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following machines are reporting etcd member errors: %s", "m1"),
   131  			expectedMachineConditions: map[string]clusterv1.Conditions{
   132  				"m1": {
   133  					*conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member status reports errors: %s", "some errors"),
   134  				},
   135  			},
   136  		},
   137  		{
   138  			name: "failure listing members should report false condition",
   139  			machines: []*clusterv1.Machine{
   140  				fakeMachine("m1", withNodeRef("n1")),
   141  			},
   142  			injectClient: &fakeClient{
   143  				list: &corev1.NodeList{
   144  					Items: []corev1.Node{*fakeNode("n1")},
   145  				},
   146  			},
   147  			injectEtcdClientGenerator: &fakeEtcdClientGenerator{
   148  				forNodesClient: &etcd.Client{
   149  					EtcdClient: &fake2.FakeEtcdClient{
   150  						EtcdEndpoints: []string{},
   151  						ErrorResponse: errors.New("failed to list members"),
   152  					},
   153  				},
   154  			},
   155  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following machines are reporting etcd member errors: %s", "m1"),
   156  			expectedMachineConditions: map[string]clusterv1.Conditions{
   157  				"m1": {
   158  					*conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Failed get answer from the etcd member on the %s node", "n1"),
   159  				},
   160  			},
   161  		},
   162  		{
   163  			name: "an etcd member with alarms should report false condition",
   164  			machines: []*clusterv1.Machine{
   165  				fakeMachine("m1", withNodeRef("n1")),
   166  			},
   167  			injectClient: &fakeClient{
   168  				list: &corev1.NodeList{
   169  					Items: []corev1.Node{*fakeNode("n1")},
   170  				},
   171  			},
   172  			injectEtcdClientGenerator: &fakeEtcdClientGenerator{
   173  				forNodesClient: &etcd.Client{
   174  					EtcdClient: &fake2.FakeEtcdClient{
   175  						EtcdEndpoints: []string{},
   176  						MemberListResponse: &clientv3.MemberListResponse{
   177  							Members: []*pb.Member{
   178  								{Name: "n1", ID: uint64(1)},
   179  							},
   180  						},
   181  						AlarmResponse: &clientv3.AlarmResponse{
   182  							Alarms: []*pb.AlarmMember{
   183  								{MemberID: uint64(1), Alarm: 1}, // NOSPACE
   184  							},
   185  						},
   186  					},
   187  				},
   188  			},
   189  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following machines are reporting etcd member errors: %s", "m1"),
   190  			expectedMachineConditions: map[string]clusterv1.Conditions{
   191  				"m1": {
   192  					*conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Etcd member reports alarms: %s", "NOSPACE"),
   193  				},
   194  			},
   195  		},
   196  		{
   197  			name: "etcd members with different Cluster ID should report false condition",
   198  			machines: []*clusterv1.Machine{
   199  				fakeMachine("m1", withNodeRef("n1")),
   200  				fakeMachine("m2", withNodeRef("n2")),
   201  			},
   202  			injectClient: &fakeClient{
   203  				list: &corev1.NodeList{
   204  					Items: []corev1.Node{
   205  						*fakeNode("n1"),
   206  						*fakeNode("n2"),
   207  					},
   208  				},
   209  			},
   210  			injectEtcdClientGenerator: &fakeEtcdClientGenerator{
   211  				forNodesClientFunc: func(n []string) (*etcd.Client, error) {
   212  					switch n[0] {
   213  					case "n1":
   214  						return &etcd.Client{
   215  							EtcdClient: &fake2.FakeEtcdClient{
   216  								EtcdEndpoints: []string{},
   217  								MemberListResponse: &clientv3.MemberListResponse{
   218  									Header: &pb.ResponseHeader{
   219  										ClusterId: uint64(1),
   220  									},
   221  									Members: []*pb.Member{
   222  										{Name: "n1", ID: uint64(1)},
   223  										{Name: "n2", ID: uint64(2)},
   224  									},
   225  								},
   226  								AlarmResponse: &clientv3.AlarmResponse{
   227  									Alarms: []*pb.AlarmMember{},
   228  								},
   229  							},
   230  						}, nil
   231  					case "n2":
   232  						return &etcd.Client{
   233  							EtcdClient: &fake2.FakeEtcdClient{
   234  								EtcdEndpoints: []string{},
   235  								MemberListResponse: &clientv3.MemberListResponse{
   236  									Header: &pb.ResponseHeader{
   237  										ClusterId: uint64(2), // different Cluster ID
   238  									},
   239  									Members: []*pb.Member{
   240  										{Name: "n1", ID: uint64(1)},
   241  										{Name: "n2", ID: uint64(2)},
   242  									},
   243  								},
   244  								AlarmResponse: &clientv3.AlarmResponse{
   245  									Alarms: []*pb.AlarmMember{},
   246  								},
   247  							},
   248  						}, nil
   249  					default:
   250  						return nil, errors.New("no client for this node")
   251  					}
   252  				},
   253  			},
   254  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following machines are reporting etcd member errors: %s", "m2"),
   255  			expectedMachineConditions: map[string]clusterv1.Conditions{
   256  				"m1": {
   257  					*conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition),
   258  				},
   259  				"m2": {
   260  					*conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "etcd member has cluster ID %d, but all previously seen etcd members have cluster ID %d", uint64(2), uint64(1)),
   261  				},
   262  			},
   263  		},
   264  		{
   265  			name: "etcd members with different member list should report false condition",
   266  			machines: []*clusterv1.Machine{
   267  				fakeMachine("m1", withNodeRef("n1")),
   268  				fakeMachine("m2", withNodeRef("n2")),
   269  			},
   270  			injectClient: &fakeClient{
   271  				list: &corev1.NodeList{
   272  					Items: []corev1.Node{
   273  						*fakeNode("n1"),
   274  						*fakeNode("n2"),
   275  					},
   276  				},
   277  			},
   278  			injectEtcdClientGenerator: &fakeEtcdClientGenerator{
   279  				forNodesClientFunc: func(n []string) (*etcd.Client, error) {
   280  					switch n[0] {
   281  					case "n1":
   282  						return &etcd.Client{
   283  							EtcdClient: &fake2.FakeEtcdClient{
   284  								EtcdEndpoints: []string{},
   285  								MemberListResponse: &clientv3.MemberListResponse{
   286  									Header: &pb.ResponseHeader{
   287  										ClusterId: uint64(1),
   288  									},
   289  									Members: []*pb.Member{
   290  										{Name: "n1", ID: uint64(1)},
   291  										{Name: "n2", ID: uint64(2)},
   292  									},
   293  								},
   294  								AlarmResponse: &clientv3.AlarmResponse{
   295  									Alarms: []*pb.AlarmMember{},
   296  								},
   297  							},
   298  						}, nil
   299  					case "n2":
   300  						return &etcd.Client{
   301  							EtcdClient: &fake2.FakeEtcdClient{
   302  								EtcdEndpoints: []string{},
   303  								MemberListResponse: &clientv3.MemberListResponse{
   304  									Header: &pb.ResponseHeader{
   305  										ClusterId: uint64(1),
   306  									},
   307  									Members: []*pb.Member{ // different member list
   308  										{Name: "n2", ID: uint64(2)},
   309  										{Name: "n3", ID: uint64(3)},
   310  									},
   311  								},
   312  								AlarmResponse: &clientv3.AlarmResponse{
   313  									Alarms: []*pb.AlarmMember{},
   314  								},
   315  							},
   316  						}, nil
   317  					default:
   318  						return nil, errors.New("no client for this node")
   319  					}
   320  				},
   321  			},
   322  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following machines are reporting etcd member errors: %s", "m2"),
   323  			expectedMachineConditions: map[string]clusterv1.Conditions{
   324  				"m1": {
   325  					*conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition),
   326  				},
   327  				"m2": {
   328  					*conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "etcd member reports the cluster is composed by members [n2 n3], but all previously seen etcd members are reporting [n1 n2]"),
   329  				},
   330  			},
   331  		},
   332  		{
   333  			name: "a machine without a member should report false condition",
   334  			machines: []*clusterv1.Machine{
   335  				fakeMachine("m1", withNodeRef("n1")),
   336  				fakeMachine("m2", withNodeRef("n2")),
   337  			},
   338  			injectClient: &fakeClient{
   339  				list: &corev1.NodeList{
   340  					Items: []corev1.Node{
   341  						*fakeNode("n1"),
   342  						*fakeNode("n2"),
   343  					},
   344  				},
   345  			},
   346  			injectEtcdClientGenerator: &fakeEtcdClientGenerator{
   347  				forNodesClientFunc: func(n []string) (*etcd.Client, error) {
   348  					switch n[0] {
   349  					case "n1":
   350  						return &etcd.Client{
   351  							EtcdClient: &fake2.FakeEtcdClient{
   352  								EtcdEndpoints: []string{},
   353  								MemberListResponse: &clientv3.MemberListResponse{
   354  									Header: &pb.ResponseHeader{
   355  										ClusterId: uint64(1),
   356  									},
   357  									Members: []*pb.Member{
   358  										{Name: "n1", ID: uint64(1)},
   359  										// member n2 is missing
   360  									},
   361  								},
   362  								AlarmResponse: &clientv3.AlarmResponse{
   363  									Alarms: []*pb.AlarmMember{},
   364  								},
   365  							},
   366  						}, nil
   367  					default:
   368  						return nil, errors.New("no client for this node")
   369  					}
   370  				},
   371  			},
   372  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.EtcdClusterHealthyCondition, controlplanev1.EtcdClusterUnhealthyReason, clusterv1.ConditionSeverityError, "Following machines are reporting etcd member errors: %s", "m2"),
   373  			expectedMachineConditions: map[string]clusterv1.Conditions{
   374  				"m1": {
   375  					*conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition),
   376  				},
   377  				"m2": {
   378  					*conditions.FalseCondition(controlplanev1.MachineEtcdMemberHealthyCondition, controlplanev1.EtcdMemberUnhealthyReason, clusterv1.ConditionSeverityError, "Missing etcd member"),
   379  				},
   380  			},
   381  		},
   382  		{
   383  			name: "healthy etcd members should report true",
   384  			machines: []*clusterv1.Machine{
   385  				fakeMachine("m1", withNodeRef("n1")),
   386  				fakeMachine("m2", withNodeRef("n2")),
   387  			},
   388  			injectClient: &fakeClient{
   389  				list: &corev1.NodeList{
   390  					Items: []corev1.Node{
   391  						*fakeNode("n1"),
   392  						*fakeNode("n2"),
   393  					},
   394  				},
   395  			},
   396  			injectEtcdClientGenerator: &fakeEtcdClientGenerator{
   397  				forNodesClientFunc: func(n []string) (*etcd.Client, error) {
   398  					switch n[0] {
   399  					case "n1":
   400  						return &etcd.Client{
   401  							EtcdClient: &fake2.FakeEtcdClient{
   402  								EtcdEndpoints: []string{},
   403  								MemberListResponse: &clientv3.MemberListResponse{
   404  									Header: &pb.ResponseHeader{
   405  										ClusterId: uint64(1),
   406  									},
   407  									Members: []*pb.Member{
   408  										{Name: "n1", ID: uint64(1)},
   409  										{Name: "n2", ID: uint64(2)},
   410  									},
   411  								},
   412  								AlarmResponse: &clientv3.AlarmResponse{
   413  									Alarms: []*pb.AlarmMember{},
   414  								},
   415  							},
   416  						}, nil
   417  					case "n2":
   418  						return &etcd.Client{
   419  							EtcdClient: &fake2.FakeEtcdClient{
   420  								EtcdEndpoints: []string{},
   421  								MemberListResponse: &clientv3.MemberListResponse{
   422  									Header: &pb.ResponseHeader{
   423  										ClusterId: uint64(1),
   424  									},
   425  									Members: []*pb.Member{
   426  										{Name: "n1", ID: uint64(1)},
   427  										{Name: "n2", ID: uint64(2)},
   428  									},
   429  								},
   430  								AlarmResponse: &clientv3.AlarmResponse{
   431  									Alarms: []*pb.AlarmMember{},
   432  								},
   433  							},
   434  						}, nil
   435  					default:
   436  						return nil, errors.New("no client for this node")
   437  					}
   438  				},
   439  			},
   440  			expectedKCPCondition: conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition),
   441  			expectedMachineConditions: map[string]clusterv1.Conditions{
   442  				"m1": {
   443  					*conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition),
   444  				},
   445  				"m2": {
   446  					*conditions.TrueCondition(controlplanev1.MachineEtcdMemberHealthyCondition),
   447  				},
   448  			},
   449  		},
   450  		{
   451  			name: "Eternal etcd should set a condition at KCP level",
   452  			kcp: &controlplanev1.KubeadmControlPlane{
   453  				Spec: controlplanev1.KubeadmControlPlaneSpec{
   454  					KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
   455  						ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
   456  							Etcd: bootstrapv1.Etcd{
   457  								External: &bootstrapv1.ExternalEtcd{},
   458  							},
   459  						},
   460  					},
   461  				},
   462  			},
   463  			expectedKCPCondition: conditions.TrueCondition(controlplanev1.EtcdClusterHealthyCondition),
   464  		},
   465  	}
   466  	for _, tt := range tests {
   467  		t.Run(tt.name, func(t *testing.T) {
   468  			g := NewWithT(t)
   469  
   470  			if tt.kcp == nil {
   471  				tt.kcp = &controlplanev1.KubeadmControlPlane{}
   472  			}
   473  			w := &Workload{
   474  				Client:              tt.injectClient,
   475  				etcdClientGenerator: tt.injectEtcdClientGenerator,
   476  			}
   477  			controlPane := &ControlPlane{
   478  				KCP:      tt.kcp,
   479  				Machines: collections.FromMachines(tt.machines...),
   480  			}
   481  			w.UpdateEtcdConditions(ctx, controlPane)
   482  
   483  			if tt.expectedKCPCondition != nil {
   484  				g.Expect(*conditions.Get(tt.kcp, controlplanev1.EtcdClusterHealthyCondition)).To(conditions.MatchCondition(*tt.expectedKCPCondition))
   485  			}
   486  			for _, m := range tt.machines {
   487  				g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name))
   488  				g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineConditions[m.Name]), "unexpected conditions for machine %s", m.Name)
   489  			}
   490  		})
   491  	}
   492  }
   493  
   494  func TestUpdateStaticPodConditions(t *testing.T) {
   495  	n1APIServerPodName := staticPodName("kube-apiserver", "n1")
   496  	n1APIServerPodKey := client.ObjectKey{
   497  		Namespace: metav1.NamespaceSystem,
   498  		Name:      n1APIServerPodName,
   499  	}.String()
   500  	n1ControllerManagerPodName := staticPodName("kube-controller-manager", "n1")
   501  	n1ControllerManagerPodNKey := client.ObjectKey{
   502  		Namespace: metav1.NamespaceSystem,
   503  		Name:      n1ControllerManagerPodName,
   504  	}.String()
   505  	n1SchedulerPodName := staticPodName("kube-scheduler", "n1")
   506  	n1SchedulerPodKey := client.ObjectKey{
   507  		Namespace: metav1.NamespaceSystem,
   508  		Name:      n1SchedulerPodName,
   509  	}.String()
   510  	n1EtcdPodName := staticPodName("etcd", "n1")
   511  	n1EtcdPodKey := client.ObjectKey{
   512  		Namespace: metav1.NamespaceSystem,
   513  		Name:      n1EtcdPodName,
   514  	}.String()
   515  	tests := []struct {
   516  		name                      string
   517  		kcp                       *controlplanev1.KubeadmControlPlane
   518  		machines                  []*clusterv1.Machine
   519  		injectClient              client.Client // This test is injecting a fake client because it is required to create nodes with a controlled Status or to fail with a specific error.
   520  		expectedKCPCondition      *clusterv1.Condition
   521  		expectedMachineConditions map[string]clusterv1.Conditions
   522  	}{
   523  		{
   524  			name: "if list nodes return an error, it should report all the conditions Unknown",
   525  			machines: []*clusterv1.Machine{
   526  				fakeMachine("m1"),
   527  			},
   528  			injectClient: &fakeClient{
   529  				listErr: errors.New("failed to list nodes"),
   530  			},
   531  			expectedKCPCondition: conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsInspectionFailedReason, "Failed to list nodes which are hosting control plane components: failed to list nodes"),
   532  			expectedMachineConditions: map[string]clusterv1.Conditions{
   533  				"m1": {
   534  					*conditions.UnknownCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the node which is hosting this component: failed to list nodes"),
   535  					*conditions.UnknownCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the node which is hosting this component: failed to list nodes"),
   536  					*conditions.UnknownCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the node which is hosting this component: failed to list nodes"),
   537  					*conditions.UnknownCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Failed to get the node which is hosting this component: failed to list nodes"),
   538  				},
   539  			},
   540  		},
   541  		{
   542  			name: "If there are provisioning machines, a node without machine should be ignored",
   543  			machines: []*clusterv1.Machine{
   544  				fakeMachine("m1"), // without NodeRef (provisioning)
   545  			},
   546  			injectClient: &fakeClient{
   547  				list: &corev1.NodeList{
   548  					Items: []corev1.Node{*fakeNode("n1")},
   549  				},
   550  			},
   551  			expectedKCPCondition: nil,
   552  			expectedMachineConditions: map[string]clusterv1.Conditions{
   553  				"m1": {},
   554  			},
   555  		},
   556  		{
   557  			name:     "If there are no provisioning machines, a node without machine should be reported as False condition at KCP level",
   558  			machines: []*clusterv1.Machine{},
   559  			injectClient: &fakeClient{
   560  				list: &corev1.NodeList{
   561  					Items: []corev1.Node{*fakeNode("n1")},
   562  				},
   563  			},
   564  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Control plane node %s does not have a corresponding machine", "n1"),
   565  		},
   566  		{
   567  			name: "A node with unreachable taint should report all the conditions Unknown",
   568  			machines: []*clusterv1.Machine{
   569  				fakeMachine("m1", withNodeRef("n1")),
   570  			},
   571  			injectClient: &fakeClient{
   572  				list: &corev1.NodeList{
   573  					Items: []corev1.Node{*fakeNode("n1", withUnreachableTaint())},
   574  				},
   575  			},
   576  			expectedKCPCondition: conditions.UnknownCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnknownReason, "Following machines are reporting unknown control plane status: m1"),
   577  			expectedMachineConditions: map[string]clusterv1.Conditions{
   578  				"m1": {
   579  					*conditions.UnknownCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"),
   580  					*conditions.UnknownCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"),
   581  					*conditions.UnknownCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"),
   582  					*conditions.UnknownCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodInspectionFailedReason, "Node is unreachable"),
   583  				},
   584  			},
   585  		},
   586  		{
   587  			name: "A provisioning machine without node should be ignored",
   588  			machines: []*clusterv1.Machine{
   589  				fakeMachine("m1"), // without NodeRef (provisioning)
   590  			},
   591  			injectClient: &fakeClient{
   592  				list: &corev1.NodeList{},
   593  			},
   594  			expectedKCPCondition: nil,
   595  			expectedMachineConditions: map[string]clusterv1.Conditions{
   596  				"m1": {},
   597  			},
   598  		},
   599  		{
   600  			name: "A provisioned machine without node should report all the conditions as false",
   601  			machines: []*clusterv1.Machine{
   602  				fakeMachine("m1", withNodeRef("n1")),
   603  			},
   604  			injectClient: &fakeClient{
   605  				list: &corev1.NodeList{},
   606  			},
   607  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following machines are reporting control plane errors: %s", "m1"),
   608  			expectedMachineConditions: map[string]clusterv1.Conditions{
   609  				"m1": {
   610  					*conditions.FalseCondition(controlplanev1.MachineAPIServerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing node"),
   611  					*conditions.FalseCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing node"),
   612  					*conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing node"),
   613  					*conditions.FalseCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Missing node"),
   614  				},
   615  			},
   616  		},
   617  		{
   618  			name: "Should surface control plane components errors",
   619  			machines: []*clusterv1.Machine{
   620  				fakeMachine("m1", withNodeRef("n1")),
   621  			},
   622  			injectClient: &fakeClient{
   623  				list: &corev1.NodeList{
   624  					Items: []corev1.Node{*fakeNode("n1")},
   625  				},
   626  				get: map[string]interface{}{
   627  					n1APIServerPodKey: fakePod(n1APIServerPodName,
   628  						withPhase(corev1.PodRunning),
   629  						withCondition(corev1.PodReady, corev1.ConditionTrue),
   630  					),
   631  					n1ControllerManagerPodNKey: fakePod(n1ControllerManagerPodName,
   632  						withPhase(corev1.PodPending),
   633  						withCondition(corev1.PodScheduled, corev1.ConditionFalse),
   634  					),
   635  					n1SchedulerPodKey: fakePod(n1SchedulerPodName,
   636  						withPhase(corev1.PodFailed),
   637  					),
   638  					n1EtcdPodKey: fakePod(n1EtcdPodName,
   639  						withPhase(corev1.PodSucceeded),
   640  					),
   641  				},
   642  			},
   643  			expectedKCPCondition: conditions.FalseCondition(controlplanev1.ControlPlaneComponentsHealthyCondition, controlplanev1.ControlPlaneComponentsUnhealthyReason, clusterv1.ConditionSeverityError, "Following machines are reporting control plane errors: %s", "m1"),
   644  			expectedMachineConditions: map[string]clusterv1.Conditions{
   645  				"m1": {
   646  					*conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition),
   647  					*conditions.FalseCondition(controlplanev1.MachineControllerManagerPodHealthyCondition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled"),
   648  					*conditions.FalseCondition(controlplanev1.MachineSchedulerPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"),
   649  					*conditions.FalseCondition(controlplanev1.MachineEtcdPodHealthyCondition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"),
   650  				},
   651  			},
   652  		},
   653  		{
   654  			name: "Should surface control plane components health",
   655  			machines: []*clusterv1.Machine{
   656  				fakeMachine("m1", withNodeRef("n1")),
   657  			},
   658  			injectClient: &fakeClient{
   659  				list: &corev1.NodeList{
   660  					Items: []corev1.Node{*fakeNode("n1")},
   661  				},
   662  				get: map[string]interface{}{
   663  					n1APIServerPodKey: fakePod(n1APIServerPodName,
   664  						withPhase(corev1.PodRunning),
   665  						withCondition(corev1.PodReady, corev1.ConditionTrue),
   666  					),
   667  					n1ControllerManagerPodNKey: fakePod(n1ControllerManagerPodName,
   668  						withPhase(corev1.PodRunning),
   669  						withCondition(corev1.PodReady, corev1.ConditionTrue),
   670  					),
   671  					n1SchedulerPodKey: fakePod(n1SchedulerPodName,
   672  						withPhase(corev1.PodRunning),
   673  						withCondition(corev1.PodReady, corev1.ConditionTrue),
   674  					),
   675  					n1EtcdPodKey: fakePod(n1EtcdPodName,
   676  						withPhase(corev1.PodRunning),
   677  						withCondition(corev1.PodReady, corev1.ConditionTrue),
   678  					),
   679  				},
   680  			},
   681  			expectedKCPCondition: conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition),
   682  			expectedMachineConditions: map[string]clusterv1.Conditions{
   683  				"m1": {
   684  					*conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition),
   685  					*conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition),
   686  					*conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition),
   687  					*conditions.TrueCondition(controlplanev1.MachineEtcdPodHealthyCondition),
   688  				},
   689  			},
   690  		},
   691  		{
   692  			name: "Should surface control plane components health with eternal etcd",
   693  			kcp: &controlplanev1.KubeadmControlPlane{
   694  				Spec: controlplanev1.KubeadmControlPlaneSpec{
   695  					KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
   696  						ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
   697  							Etcd: bootstrapv1.Etcd{
   698  								External: &bootstrapv1.ExternalEtcd{},
   699  							},
   700  						},
   701  					},
   702  				},
   703  			},
   704  			machines: []*clusterv1.Machine{
   705  				fakeMachine("m1", withNodeRef("n1")),
   706  			},
   707  			injectClient: &fakeClient{
   708  				list: &corev1.NodeList{
   709  					Items: []corev1.Node{*fakeNode("n1")},
   710  				},
   711  				get: map[string]interface{}{
   712  					n1APIServerPodKey: fakePod(n1APIServerPodName,
   713  						withPhase(corev1.PodRunning),
   714  						withCondition(corev1.PodReady, corev1.ConditionTrue),
   715  					),
   716  					n1ControllerManagerPodNKey: fakePod(n1ControllerManagerPodName,
   717  						withPhase(corev1.PodRunning),
   718  						withCondition(corev1.PodReady, corev1.ConditionTrue),
   719  					),
   720  					n1SchedulerPodKey: fakePod(n1SchedulerPodName,
   721  						withPhase(corev1.PodRunning),
   722  						withCondition(corev1.PodReady, corev1.ConditionTrue),
   723  					),
   724  					// no static pod for etcd
   725  				},
   726  			},
   727  			expectedKCPCondition: conditions.TrueCondition(controlplanev1.ControlPlaneComponentsHealthyCondition),
   728  			expectedMachineConditions: map[string]clusterv1.Conditions{
   729  				"m1": {
   730  					*conditions.TrueCondition(controlplanev1.MachineAPIServerPodHealthyCondition),
   731  					*conditions.TrueCondition(controlplanev1.MachineControllerManagerPodHealthyCondition),
   732  					*conditions.TrueCondition(controlplanev1.MachineSchedulerPodHealthyCondition),
   733  					// no condition for etcd Pod
   734  				},
   735  			},
   736  		},
   737  	}
   738  
   739  	for _, tt := range tests {
   740  		t.Run(tt.name, func(t *testing.T) {
   741  			g := NewWithT(t)
   742  
   743  			if tt.kcp == nil {
   744  				tt.kcp = &controlplanev1.KubeadmControlPlane{}
   745  			}
   746  			w := &Workload{
   747  				Client: tt.injectClient,
   748  			}
   749  			controlPane := &ControlPlane{
   750  				KCP:      tt.kcp,
   751  				Machines: collections.FromMachines(tt.machines...),
   752  			}
   753  			w.UpdateStaticPodConditions(ctx, controlPane)
   754  
   755  			if tt.expectedKCPCondition != nil {
   756  				g.Expect(*conditions.Get(tt.kcp, controlplanev1.ControlPlaneComponentsHealthyCondition)).To(conditions.MatchCondition(*tt.expectedKCPCondition))
   757  			}
   758  			for _, m := range tt.machines {
   759  				g.Expect(tt.expectedMachineConditions).To(HaveKey(m.Name))
   760  				g.Expect(m.GetConditions()).To(conditions.MatchConditions(tt.expectedMachineConditions[m.Name]))
   761  			}
   762  		})
   763  	}
   764  }
   765  
   766  func TestUpdateStaticPodCondition(t *testing.T) {
   767  	machine := &clusterv1.Machine{}
   768  	nodeName := "node"
   769  	component := "kube-component"
   770  	condition := clusterv1.ConditionType("kubeComponentHealthy")
   771  	podName := staticPodName(component, nodeName)
   772  	podkey := client.ObjectKey{
   773  		Namespace: metav1.NamespaceSystem,
   774  		Name:      podName,
   775  	}.String()
   776  
   777  	tests := []struct {
   778  		name              string
   779  		injectClient      client.Client // This test is injecting a fake client because it is required to create pods with a controlled Status or to fail with a specific error.
   780  		node              *corev1.Node
   781  		expectedCondition clusterv1.Condition
   782  	}{
   783  		{
   784  			name:              "if node Ready is unknown, assume pod status is stale",
   785  			node:              fakeNode(nodeName, withReadyCondition(corev1.ConditionUnknown)),
   786  			expectedCondition: *conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Node Ready condition is unknown, pod data might be stale"),
   787  		},
   788  		{
   789  			name: "if gets pod return a NotFound error should report PodCondition=False, PodMissing",
   790  			injectClient: &fakeClient{
   791  				getErr: apierrors.NewNotFound(schema.ParseGroupResource("Pod"), component),
   792  			},
   793  			node:              fakeNode(nodeName),
   794  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodMissingReason, clusterv1.ConditionSeverityError, "Pod kube-component-node is missing"),
   795  		},
   796  		{
   797  			name: "if gets pod return a generic error should report PodCondition=Unknown, PodInspectionFailed",
   798  			injectClient: &fakeClient{
   799  				getErr: errors.New("get failure"),
   800  			},
   801  			node:              fakeNode(nodeName),
   802  			expectedCondition: *conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Failed to get pod status"),
   803  		},
   804  		{
   805  			name: "pending pod not yet scheduled should report PodCondition=False, PodProvisioning",
   806  			injectClient: &fakeClient{
   807  				get: map[string]interface{}{
   808  					podkey: fakePod(podName,
   809  						withPhase(corev1.PodPending),
   810  						withCondition(corev1.PodScheduled, corev1.ConditionFalse),
   811  					),
   812  				},
   813  			},
   814  			node:              fakeNode(nodeName),
   815  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting to be scheduled"),
   816  		},
   817  		{
   818  			name: "pending pod running init containers should report PodCondition=False, PodProvisioning",
   819  			injectClient: &fakeClient{
   820  				get: map[string]interface{}{
   821  					podkey: fakePod(podName,
   822  						withPhase(corev1.PodPending),
   823  						withCondition(corev1.PodScheduled, corev1.ConditionTrue),
   824  						withCondition(corev1.PodInitialized, corev1.ConditionFalse),
   825  					),
   826  				},
   827  			},
   828  			node:              fakeNode(nodeName),
   829  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Running init containers"),
   830  		},
   831  		{
   832  			name: "pending pod with PodScheduled and PodInitialized report PodCondition=False, PodProvisioning",
   833  			injectClient: &fakeClient{
   834  				get: map[string]interface{}{
   835  					podkey: fakePod(podName,
   836  						withPhase(corev1.PodPending),
   837  						withCondition(corev1.PodScheduled, corev1.ConditionTrue),
   838  						withCondition(corev1.PodInitialized, corev1.ConditionTrue),
   839  					),
   840  				},
   841  			},
   842  			node:              fakeNode(nodeName),
   843  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, ""),
   844  		},
   845  		{
   846  			name: "running pod with podReady should report PodCondition=true",
   847  			injectClient: &fakeClient{
   848  				get: map[string]interface{}{
   849  					podkey: fakePod(podName,
   850  						withPhase(corev1.PodRunning),
   851  						withCondition(corev1.PodReady, corev1.ConditionTrue),
   852  					),
   853  				},
   854  			},
   855  			node:              fakeNode(nodeName),
   856  			expectedCondition: *conditions.TrueCondition(condition),
   857  		},
   858  		{
   859  			name: "running pod with ContainerStatus Waiting should report PodCondition=False, PodProvisioning",
   860  			injectClient: &fakeClient{
   861  				get: map[string]interface{}{
   862  					podkey: fakePod(podName,
   863  						withPhase(corev1.PodRunning),
   864  						withContainerStatus(corev1.ContainerStatus{
   865  							State: corev1.ContainerState{
   866  								Waiting: &corev1.ContainerStateWaiting{Reason: "Waiting something"},
   867  							},
   868  						}),
   869  					),
   870  				},
   871  			},
   872  			node:              fakeNode(nodeName),
   873  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting something"),
   874  		},
   875  		{
   876  			name: "running pod with ContainerStatus Waiting but with exit code != 0 should report PodCondition=False, PodFailed",
   877  			injectClient: &fakeClient{
   878  				get: map[string]interface{}{
   879  					podkey: fakePod(podName,
   880  						withPhase(corev1.PodRunning),
   881  						withContainerStatus(corev1.ContainerStatus{
   882  							State: corev1.ContainerState{
   883  								Waiting: &corev1.ContainerStateWaiting{Reason: "Waiting something"},
   884  							},
   885  							LastTerminationState: corev1.ContainerState{
   886  								Terminated: &corev1.ContainerStateTerminated{
   887  									ExitCode: 1,
   888  								},
   889  							},
   890  						}),
   891  					),
   892  				},
   893  			},
   894  			node:              fakeNode(nodeName),
   895  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Waiting something"),
   896  		},
   897  		{
   898  			name: "running pod with ContainerStatus Terminated should report PodCondition=False, PodFailed",
   899  			injectClient: &fakeClient{
   900  				get: map[string]interface{}{
   901  					podkey: fakePod(podName,
   902  						withPhase(corev1.PodRunning),
   903  						withContainerStatus(corev1.ContainerStatus{
   904  							State: corev1.ContainerState{
   905  								Terminated: &corev1.ContainerStateTerminated{Reason: "Something failed"},
   906  							},
   907  						}),
   908  					),
   909  				},
   910  			},
   911  			node:              fakeNode(nodeName),
   912  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "Something failed"),
   913  		},
   914  		{
   915  			name: "running pod without podReady and without Container status messages should report PodCondition=False, PodProvisioning",
   916  			injectClient: &fakeClient{
   917  				get: map[string]interface{}{
   918  					podkey: fakePod(podName,
   919  						withPhase(corev1.PodRunning),
   920  					),
   921  				},
   922  			},
   923  			node:              fakeNode(nodeName),
   924  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodProvisioningReason, clusterv1.ConditionSeverityInfo, "Waiting for startup or readiness probes"),
   925  		},
   926  		{
   927  			name: "failed pod should report PodCondition=False, PodFailed",
   928  			injectClient: &fakeClient{
   929  				get: map[string]interface{}{
   930  					podkey: fakePod(podName,
   931  						withPhase(corev1.PodFailed),
   932  					),
   933  				},
   934  			},
   935  			node:              fakeNode(nodeName),
   936  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"),
   937  		},
   938  		{
   939  			name: "succeeded pod should report PodCondition=False, PodFailed",
   940  			injectClient: &fakeClient{
   941  				get: map[string]interface{}{
   942  					podkey: fakePod(podName,
   943  						withPhase(corev1.PodSucceeded),
   944  					),
   945  				},
   946  			},
   947  			node:              fakeNode(nodeName),
   948  			expectedCondition: *conditions.FalseCondition(condition, controlplanev1.PodFailedReason, clusterv1.ConditionSeverityError, "All the containers have been terminated"),
   949  		},
   950  		{
   951  			name: "pod in unknown phase should report PodCondition=Unknown, PodInspectionFailed",
   952  			injectClient: &fakeClient{
   953  				get: map[string]interface{}{
   954  					podkey: fakePod(podName,
   955  						withPhase(corev1.PodUnknown),
   956  					),
   957  				},
   958  			},
   959  			node:              fakeNode(nodeName),
   960  			expectedCondition: *conditions.UnknownCondition(condition, controlplanev1.PodInspectionFailedReason, "Pod is reporting unknown status"),
   961  		},
   962  	}
   963  
   964  	for _, tt := range tests {
   965  		t.Run(tt.name, func(t *testing.T) {
   966  			g := NewWithT(t)
   967  
   968  			w := &Workload{
   969  				Client: tt.injectClient,
   970  			}
   971  			w.updateStaticPodCondition(ctx, machine, *tt.node, component, condition)
   972  
   973  			g.Expect(*conditions.Get(machine, condition)).To(conditions.MatchCondition(tt.expectedCondition))
   974  		})
   975  	}
   976  }
   977  
   978  type fakeNodeOption func(*corev1.Node)
   979  
   980  func fakeNode(name string, options ...fakeNodeOption) *corev1.Node {
   981  	p := &corev1.Node{
   982  		ObjectMeta: metav1.ObjectMeta{
   983  			Name: name,
   984  			Labels: map[string]string{
   985  				labelNodeRoleControlPlane: "",
   986  			},
   987  		},
   988  	}
   989  	for _, opt := range options {
   990  		opt(p)
   991  	}
   992  	return p
   993  }
   994  
   995  func withUnreachableTaint() fakeNodeOption {
   996  	return func(node *corev1.Node) {
   997  		node.Spec.Taints = append(node.Spec.Taints, corev1.Taint{
   998  			Key:    corev1.TaintNodeUnreachable,
   999  			Effect: corev1.TaintEffectNoExecute,
  1000  		})
  1001  	}
  1002  }
  1003  
  1004  func withReadyCondition(status corev1.ConditionStatus) fakeNodeOption {
  1005  	return func(node *corev1.Node) {
  1006  		node.Status.Conditions = append(node.Status.Conditions, corev1.NodeCondition{
  1007  			Type:   corev1.NodeReady,
  1008  			Status: status,
  1009  		})
  1010  	}
  1011  }
  1012  
  1013  type fakeMachineOption func(*clusterv1.Machine)
  1014  
  1015  func fakeMachine(name string, options ...fakeMachineOption) *clusterv1.Machine {
  1016  	p := &clusterv1.Machine{
  1017  		ObjectMeta: metav1.ObjectMeta{
  1018  			Name: name,
  1019  		},
  1020  	}
  1021  	for _, opt := range options {
  1022  		opt(p)
  1023  	}
  1024  	return p
  1025  }
  1026  
  1027  func withNodeRef(ref string) fakeMachineOption {
  1028  	return func(machine *clusterv1.Machine) {
  1029  		machine.Status.NodeRef = &corev1.ObjectReference{
  1030  			Kind: "Node",
  1031  			Name: ref,
  1032  		}
  1033  	}
  1034  }
  1035  
  1036  func withMachineReadyCondition(status corev1.ConditionStatus, severity clusterv1.ConditionSeverity) fakeMachineOption {
  1037  	return func(machine *clusterv1.Machine) {
  1038  		machine.Status.Conditions = append(machine.Status.Conditions, clusterv1.Condition{
  1039  			Type:     clusterv1.MachinesReadyCondition,
  1040  			Status:   status,
  1041  			Severity: severity,
  1042  		})
  1043  	}
  1044  }
  1045  
  1046  type fakePodOption func(*corev1.Pod)
  1047  
  1048  func fakePod(name string, options ...fakePodOption) *corev1.Pod {
  1049  	p := &corev1.Pod{
  1050  		ObjectMeta: metav1.ObjectMeta{
  1051  			Name:      name,
  1052  			Namespace: metav1.NamespaceSystem,
  1053  		},
  1054  	}
  1055  	for _, opt := range options {
  1056  		opt(p)
  1057  	}
  1058  	return p
  1059  }
  1060  
  1061  func withPhase(phase corev1.PodPhase) fakePodOption {
  1062  	return func(pod *corev1.Pod) {
  1063  		pod.Status.Phase = phase
  1064  	}
  1065  }
  1066  
  1067  func withContainerStatus(status corev1.ContainerStatus) fakePodOption {
  1068  	return func(pod *corev1.Pod) {
  1069  		pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, status)
  1070  	}
  1071  }
  1072  
  1073  func withCondition(condition corev1.PodConditionType, status corev1.ConditionStatus) fakePodOption {
  1074  	return func(pod *corev1.Pod) {
  1075  		c := corev1.PodCondition{
  1076  			Type:   condition,
  1077  			Status: status,
  1078  		}
  1079  		pod.Status.Conditions = append(pod.Status.Conditions, c)
  1080  	}
  1081  }
  1082  
  1083  func TestAggregateFromMachinesToKCP(t *testing.T) {
  1084  	conditionType := controlplanev1.ControlPlaneComponentsHealthyCondition
  1085  	unhealthyReason := "unhealthy reason"
  1086  	unknownReason := "unknown reason"
  1087  	note := "some notes"
  1088  
  1089  	tests := []struct {
  1090  		name              string
  1091  		machines          []*clusterv1.Machine
  1092  		kcpErrors         []string
  1093  		expectedCondition clusterv1.Condition
  1094  	}{
  1095  		{
  1096  			name: "kcp machines with errors",
  1097  			machines: []*clusterv1.Machine{
  1098  				fakeMachine("m1", withMachineReadyCondition(corev1.ConditionFalse, clusterv1.ConditionSeverityError)),
  1099  			},
  1100  			expectedCondition: *conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityError, fmt.Sprintf("Following machines are reporting %s errors: %s", note, "m1")),
  1101  		},
  1102  		{
  1103  			name: "input kcp errors",
  1104  			machines: []*clusterv1.Machine{
  1105  				fakeMachine("m1", withMachineReadyCondition(corev1.ConditionTrue, clusterv1.ConditionSeverityNone)),
  1106  			},
  1107  			kcpErrors:         []string{"something error"},
  1108  			expectedCondition: *conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityError, "something error"),
  1109  		},
  1110  		{
  1111  			name: "kcp machines with warnings",
  1112  			machines: []*clusterv1.Machine{
  1113  				fakeMachine("m1", withMachineReadyCondition(corev1.ConditionFalse, clusterv1.ConditionSeverityWarning)),
  1114  			},
  1115  			expectedCondition: *conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityWarning, fmt.Sprintf("Following machines are reporting %s warnings: %s", note, "m1")),
  1116  		},
  1117  		{
  1118  			name: "kcp machines with info",
  1119  			machines: []*clusterv1.Machine{
  1120  				fakeMachine("m1", withMachineReadyCondition(corev1.ConditionFalse, clusterv1.ConditionSeverityInfo)),
  1121  			},
  1122  			expectedCondition: *conditions.FalseCondition(conditionType, unhealthyReason, clusterv1.ConditionSeverityInfo, fmt.Sprintf("Following machines are reporting %s info: %s", note, "m1")),
  1123  		},
  1124  		{
  1125  			name: "kcp machines with true",
  1126  			machines: []*clusterv1.Machine{
  1127  				fakeMachine("m1", withMachineReadyCondition(corev1.ConditionTrue, clusterv1.ConditionSeverityNone)),
  1128  			},
  1129  			expectedCondition: *conditions.TrueCondition(conditionType),
  1130  		},
  1131  		{
  1132  			name: "kcp machines with unknown",
  1133  			machines: []*clusterv1.Machine{
  1134  				fakeMachine("m1", withMachineReadyCondition(corev1.ConditionUnknown, clusterv1.ConditionSeverityNone)),
  1135  			},
  1136  			expectedCondition: *conditions.UnknownCondition(conditionType, unknownReason, fmt.Sprintf("Following machines are reporting unknown %s status: %s", note, "m1")),
  1137  		},
  1138  	}
  1139  
  1140  	for _, tt := range tests {
  1141  		t.Run(tt.name, func(t *testing.T) {
  1142  			g := NewWithT(t)
  1143  
  1144  			input := aggregateFromMachinesToKCPInput{
  1145  				controlPlane: &ControlPlane{
  1146  					KCP:      &controlplanev1.KubeadmControlPlane{},
  1147  					Machines: collections.FromMachines(tt.machines...),
  1148  				},
  1149  				machineConditions: []clusterv1.ConditionType{clusterv1.MachinesReadyCondition},
  1150  				kcpErrors:         tt.kcpErrors,
  1151  				condition:         conditionType,
  1152  				unhealthyReason:   unhealthyReason,
  1153  				unknownReason:     unknownReason,
  1154  				note:              note,
  1155  			}
  1156  			aggregateFromMachinesToKCP(input)
  1157  
  1158  			g.Expect(*conditions.Get(input.controlPlane.KCP, conditionType)).To(conditions.MatchCondition(tt.expectedCondition))
  1159  		})
  1160  	}
  1161  }