sigs.k8s.io/cluster-api@v1.7.1/internal/controllers/machine/machine_controller_noderef_test.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package machine
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"testing"
    23  	"time"
    24  
    25  	. "github.com/onsi/gomega"
    26  	corev1 "k8s.io/api/core/v1"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    29  	"k8s.io/utils/ptr"
    30  	ctrl "sigs.k8s.io/controller-runtime"
    31  	"sigs.k8s.io/controller-runtime/pkg/client"
    32  	"sigs.k8s.io/controller-runtime/pkg/handler"
    33  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    34  
    35  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    36  	"sigs.k8s.io/cluster-api/controllers/remote"
    37  	"sigs.k8s.io/cluster-api/util"
    38  	"sigs.k8s.io/cluster-api/util/kubeconfig"
    39  )
    40  
    41  func TestGetNode(t *testing.T) {
    42  	g := NewWithT(t)
    43  
    44  	ns, err := env.CreateNamespace(ctx, "test-get-node")
    45  	g.Expect(err).ToNot(HaveOccurred())
    46  
    47  	// Set up cluster to test against.
    48  	testCluster := &clusterv1.Cluster{
    49  		ObjectMeta: metav1.ObjectMeta{
    50  			GenerateName: "test-get-node-",
    51  			Namespace:    ns.Name,
    52  		},
    53  	}
    54  
    55  	g.Expect(env.Create(ctx, testCluster)).To(Succeed())
    56  	g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed())
    57  	defer func(do ...client.Object) {
    58  		g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
    59  	}(ns, testCluster)
    60  
    61  	testCases := []struct {
    62  		name            string
    63  		node            *corev1.Node
    64  		providerIDInput string
    65  		error           error
    66  	}{
    67  		{
    68  			name: "full providerID matches",
    69  			node: &corev1.Node{
    70  				ObjectMeta: metav1.ObjectMeta{
    71  					Name: "test-get-node-node-1",
    72  				},
    73  				Spec: corev1.NodeSpec{
    74  					ProviderID: "aws://us-east-1/test-get-node-1",
    75  				},
    76  			},
    77  			providerIDInput: "aws://us-east-1/test-get-node-1",
    78  		},
    79  		{
    80  			name: "aws prefix: cloudProvider and ID matches",
    81  			node: &corev1.Node{
    82  				ObjectMeta: metav1.ObjectMeta{
    83  					Name: "test-get-node-node-2",
    84  				},
    85  				Spec: corev1.NodeSpec{
    86  					ProviderID: "aws://us-west-2/test-get-node-2",
    87  				},
    88  			},
    89  			providerIDInput: "aws://us-west-2/test-get-node-2",
    90  		},
    91  		{
    92  			name: "gce prefix, cloudProvider and ID matches",
    93  			node: &corev1.Node{
    94  				ObjectMeta: metav1.ObjectMeta{
    95  					Name: "test-get-node-gce-node-2",
    96  				},
    97  				Spec: corev1.NodeSpec{
    98  					ProviderID: "gce://us-central1/test-get-node-2",
    99  				},
   100  			},
   101  			providerIDInput: "gce://us-central1/test-get-node-2",
   102  		},
   103  		{
   104  			name: "Node is not found",
   105  			node: &corev1.Node{
   106  				ObjectMeta: metav1.ObjectMeta{
   107  					Name: "test-get-node-not-found",
   108  				},
   109  				Spec: corev1.NodeSpec{
   110  					ProviderID: "gce://us-central1/anything",
   111  				},
   112  			},
   113  			providerIDInput: "gce://not-found",
   114  			error:           ErrNodeNotFound,
   115  		},
   116  	}
   117  
   118  	nodesToCleanup := make([]client.Object, 0, len(testCases))
   119  	for _, tc := range testCases {
   120  		g.Expect(env.Create(ctx, tc.node)).To(Succeed())
   121  		nodesToCleanup = append(nodesToCleanup, tc.node)
   122  	}
   123  	defer func(do ...client.Object) {
   124  		g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
   125  	}(nodesToCleanup...)
   126  
   127  	tracker, err := remote.NewClusterCacheTracker(
   128  		env.Manager, remote.ClusterCacheTrackerOptions{
   129  			Indexes: []remote.Index{remote.NodeProviderIDIndex},
   130  		},
   131  	)
   132  	g.Expect(err).ToNot(HaveOccurred())
   133  
   134  	r := &Reconciler{
   135  		Tracker:                   tracker,
   136  		Client:                    env,
   137  		UnstructuredCachingClient: env,
   138  	}
   139  
   140  	w, err := ctrl.NewControllerManagedBy(env.Manager).For(&corev1.Node{}).Build(r)
   141  	g.Expect(err).ToNot(HaveOccurred())
   142  
   143  	g.Expect(tracker.Watch(ctx, remote.WatchInput{
   144  		Name:    "TestGetNode",
   145  		Cluster: util.ObjectKey(testCluster),
   146  		Watcher: w,
   147  		Kind:    &corev1.Node{},
   148  		EventHandler: handler.EnqueueRequestsFromMapFunc(func(context.Context, client.Object) []reconcile.Request {
   149  			return nil
   150  		}),
   151  	})).To(Succeed())
   152  
   153  	for _, tc := range testCases {
   154  		t.Run(tc.name, func(t *testing.T) {
   155  			g := NewWithT(t)
   156  			remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(testCluster))
   157  			g.Expect(err).ToNot(HaveOccurred())
   158  
   159  			node, err := r.getNode(ctx, remoteClient, tc.providerIDInput)
   160  			if tc.error != nil {
   161  				g.Expect(err).To(Equal(tc.error))
   162  				return
   163  			}
   164  			g.Expect(err).ToNot(HaveOccurred())
   165  			g.Expect(node.Name).To(Equal(tc.node.Name))
   166  		})
   167  	}
   168  }
   169  
   170  func TestNodeLabelSync(t *testing.T) {
   171  	defaultCluster := &clusterv1.Cluster{
   172  		ObjectMeta: metav1.ObjectMeta{
   173  			Name:      "test-cluster",
   174  			Namespace: metav1.NamespaceDefault,
   175  		},
   176  	}
   177  
   178  	defaultInfraMachine := &unstructured.Unstructured{
   179  		Object: map[string]interface{}{
   180  			"kind":       "GenericInfrastructureMachine",
   181  			"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
   182  			"metadata": map[string]interface{}{
   183  				"name":      "infra-config1",
   184  				"namespace": metav1.NamespaceDefault,
   185  			},
   186  		},
   187  	}
   188  
   189  	defaultMachine := clusterv1.Machine{
   190  		ObjectMeta: metav1.ObjectMeta{
   191  			Name:      "machine-test",
   192  			Namespace: metav1.NamespaceDefault,
   193  			Labels: map[string]string{
   194  				clusterv1.MachineControlPlaneLabel: "",
   195  			},
   196  		},
   197  		Spec: clusterv1.MachineSpec{
   198  			ClusterName: defaultCluster.Name,
   199  			Bootstrap: clusterv1.Bootstrap{
   200  				ConfigRef: &corev1.ObjectReference{
   201  					APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
   202  					Kind:       "GenericBootstrapConfig",
   203  					Name:       "bootstrap-config1",
   204  				},
   205  			},
   206  			InfrastructureRef: corev1.ObjectReference{
   207  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
   208  				Kind:       "GenericInfrastructureMachine",
   209  				Name:       "infra-config1",
   210  			},
   211  		},
   212  	}
   213  
   214  	t.Run("Should sync node labels", func(t *testing.T) {
   215  		g := NewWithT(t)
   216  
   217  		ns, err := env.CreateNamespace(ctx, "test-node-label-sync")
   218  		g.Expect(err).ToNot(HaveOccurred())
   219  		defer func() {
   220  			g.Expect(env.Cleanup(ctx, ns)).To(Succeed())
   221  		}()
   222  
   223  		nodeProviderID := fmt.Sprintf("test://%s", util.RandomString(6))
   224  
   225  		cluster := defaultCluster.DeepCopy()
   226  		cluster.Namespace = ns.Name
   227  
   228  		infraMachine := defaultInfraMachine.DeepCopy()
   229  		infraMachine.SetNamespace(ns.Name)
   230  
   231  		interruptibleTrueInfraMachineStatus := map[string]interface{}{
   232  			"interruptible": true,
   233  			"ready":         true,
   234  		}
   235  		interruptibleFalseInfraMachineStatus := map[string]interface{}{
   236  			"interruptible": false,
   237  			"ready":         true,
   238  		}
   239  
   240  		machine := defaultMachine.DeepCopy()
   241  		machine.Namespace = ns.Name
   242  		machine.Spec.ProviderID = ptr.To(nodeProviderID)
   243  
   244  		// Set Machine labels.
   245  		machine.Labels = map[string]string{}
   246  		// The expectation is that these labels will be synced to the Node.
   247  		managedMachineLabels := map[string]string{
   248  			clusterv1.NodeRoleLabelPrefix + "/anyRole": "",
   249  
   250  			clusterv1.ManagedNodeLabelDomain:                                  "valueFromMachine",
   251  			"custom-prefix." + clusterv1.ManagedNodeLabelDomain:               "valueFromMachine",
   252  			clusterv1.ManagedNodeLabelDomain + "/anything":                    "valueFromMachine",
   253  			"custom-prefix." + clusterv1.ManagedNodeLabelDomain + "/anything": "valueFromMachine",
   254  
   255  			clusterv1.NodeRestrictionLabelDomain:                                  "valueFromMachine",
   256  			"custom-prefix." + clusterv1.NodeRestrictionLabelDomain:               "valueFromMachine",
   257  			clusterv1.NodeRestrictionLabelDomain + "/anything":                    "valueFromMachine",
   258  			"custom-prefix." + clusterv1.NodeRestrictionLabelDomain + "/anything": "valueFromMachine",
   259  		}
   260  		for k, v := range managedMachineLabels {
   261  			machine.Labels[k] = v
   262  		}
   263  		// The expectation is that these labels will not be synced to the Node.
   264  		unmanagedMachineLabels := map[string]string{
   265  			"foo":                               "",
   266  			"bar":                               "",
   267  			"company.xyz/node.cluster.x-k8s.io": "not-managed",
   268  			"gpu-node.cluster.x-k8s.io":         "not-managed",
   269  			"company.xyz/node-restriction.kubernetes.io": "not-managed",
   270  			"gpu-node-restriction.kubernetes.io":         "not-managed",
   271  		}
   272  		for k, v := range unmanagedMachineLabels {
   273  			machine.Labels[k] = v
   274  		}
   275  
   276  		// Create Node.
   277  		node := &corev1.Node{
   278  			ObjectMeta: metav1.ObjectMeta{
   279  				GenerateName: "machine-test-node-",
   280  			},
   281  			Spec: corev1.NodeSpec{ProviderID: nodeProviderID},
   282  			Status: corev1.NodeStatus{
   283  				Addresses: []corev1.NodeAddress{
   284  					{
   285  						Type:    corev1.NodeInternalIP,
   286  						Address: "1.1.1.1",
   287  					},
   288  					{
   289  						Type:    corev1.NodeInternalIP,
   290  						Address: "2.2.2.2",
   291  					},
   292  				},
   293  			},
   294  		}
   295  
   296  		// Set Node labels
   297  		// The expectation is that these labels will be overwritten by the labels
   298  		// from the Machine by the node label sync.
   299  		node.Labels = map[string]string{}
   300  		managedNodeLabelsToBeOverWritten := map[string]string{
   301  			clusterv1.NodeRoleLabelPrefix + "/anyRole": "valueFromNode",
   302  
   303  			clusterv1.ManagedNodeLabelDomain:                                  "valueFromNode",
   304  			"custom-prefix." + clusterv1.ManagedNodeLabelDomain:               "valueFromNode",
   305  			clusterv1.ManagedNodeLabelDomain + "/anything":                    "valueFromNode",
   306  			"custom-prefix." + clusterv1.ManagedNodeLabelDomain + "/anything": "valueFromNode",
   307  
   308  			clusterv1.NodeRestrictionLabelDomain:                                  "valueFromNode",
   309  			"custom-prefix." + clusterv1.NodeRestrictionLabelDomain:               "valueFromNode",
   310  			clusterv1.NodeRestrictionLabelDomain + "/anything":                    "valueFromNode",
   311  			"custom-prefix." + clusterv1.NodeRestrictionLabelDomain + "/anything": "valueFromNode",
   312  		}
   313  		for k, v := range managedNodeLabelsToBeOverWritten {
   314  			node.Labels[k] = v
   315  		}
   316  		// The expectation is that these labels will be preserved by the node label sync.
   317  		unmanagedNodeLabelsToBePreserved := map[string]string{
   318  			"node-role.kubernetes.io/control-plane": "",
   319  			"label":                                 "valueFromNode",
   320  		}
   321  		for k, v := range unmanagedNodeLabelsToBePreserved {
   322  			node.Labels[k] = v
   323  		}
   324  
   325  		g.Expect(env.Create(ctx, node)).To(Succeed())
   326  		defer func() {
   327  			g.Expect(env.Cleanup(ctx, node)).To(Succeed())
   328  		}()
   329  
   330  		g.Expect(env.Create(ctx, cluster)).To(Succeed())
   331  		defaultKubeconfigSecret := kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
   332  		g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
   333  
   334  		g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
   335  		// Set InfrastructureMachine .status.interruptible and .status.ready to true.
   336  		interruptibleTrueInfraMachine := infraMachine.DeepCopy()
   337  		g.Expect(unstructured.SetNestedMap(interruptibleTrueInfraMachine.Object, interruptibleTrueInfraMachineStatus, "status")).Should(Succeed())
   338  		g.Expect(env.Status().Patch(ctx, interruptibleTrueInfraMachine, client.MergeFrom(infraMachine))).Should(Succeed())
   339  
   340  		g.Expect(env.Create(ctx, machine)).To(Succeed())
   341  
   342  		// Validate that the right labels where synced to the Node.
   343  		g.Eventually(func(g Gomega) bool {
   344  			if err := env.Get(ctx, client.ObjectKeyFromObject(node), node); err != nil {
   345  				return false
   346  			}
   347  
   348  			// Managed Machine Labels should have been synced to the Node.
   349  			for k, v := range managedMachineLabels {
   350  				g.Expect(node.Labels).To(HaveKeyWithValue(k, v))
   351  			}
   352  
   353  			// Interruptible label should be set on the node.
   354  			g.Expect(node.Labels).To(HaveKey(clusterv1.InterruptibleLabel))
   355  
   356  			// Unmanaged Machine labels should not have been synced to the Node.
   357  			for k, v := range unmanagedMachineLabels {
   358  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   359  			}
   360  
   361  			// Pre-existing managed Node labels should have been overwritten on the Node.
   362  			for k, v := range managedNodeLabelsToBeOverWritten {
   363  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   364  			}
   365  			// Pre-existing unmanaged Node labels should have been preserved on the Node.
   366  			for k, v := range unmanagedNodeLabelsToBePreserved {
   367  				g.Expect(node.Labels).To(HaveKeyWithValue(k, v))
   368  			}
   369  
   370  			return true
   371  		}, 10*time.Second).Should(BeTrue())
   372  
   373  		// Set InfrastructureMachine .status.interruptible to false.
   374  		interruptibleFalseInfraMachine := interruptibleTrueInfraMachine.DeepCopy()
   375  		g.Expect(unstructured.SetNestedMap(interruptibleFalseInfraMachine.Object, interruptibleFalseInfraMachineStatus, "status")).Should(Succeed())
   376  		g.Expect(env.Status().Patch(ctx, interruptibleFalseInfraMachine, client.MergeFrom(interruptibleTrueInfraMachine))).Should(Succeed())
   377  
   378  		// Remove managed labels from Machine.
   379  		modifiedMachine := machine.DeepCopy()
   380  		for k := range managedMachineLabels {
   381  			delete(modifiedMachine.Labels, k)
   382  		}
   383  		g.Expect(env.Patch(ctx, modifiedMachine, client.MergeFrom(machine))).To(Succeed())
   384  
   385  		// Validate that managed Machine labels were removed from the Node and all others are not changed.
   386  		g.Eventually(func(g Gomega) bool {
   387  			if err := env.Get(ctx, client.ObjectKeyFromObject(node), node); err != nil {
   388  				return false
   389  			}
   390  
   391  			// Managed Machine Labels should have been removed from the Node now.
   392  			for k, v := range managedMachineLabels {
   393  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   394  			}
   395  
   396  			// Interruptible label should not be on node.
   397  			g.Expect(node.Labels).NotTo(HaveKey(clusterv1.InterruptibleLabel))
   398  
   399  			// Unmanaged Machine labels should not have been synced at all to the Node.
   400  			for k, v := range unmanagedMachineLabels {
   401  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   402  			}
   403  
   404  			// Pre-existing managed Node labels have been overwritten earlier by the managed Machine labels.
   405  			// Now that the managed Machine labels have been removed, they should still not exist.
   406  			for k, v := range managedNodeLabelsToBeOverWritten {
   407  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   408  			}
   409  			// Pre-existing unmanaged Node labels should have been preserved on the Node.
   410  			for k, v := range unmanagedNodeLabelsToBePreserved {
   411  				g.Expect(node.Labels).To(HaveKeyWithValue(k, v))
   412  			}
   413  
   414  			return true
   415  		}, 10*time.Second).Should(BeTrue())
   416  	})
   417  }
   418  
   419  func TestSummarizeNodeConditions(t *testing.T) {
   420  	testCases := []struct {
   421  		name       string
   422  		conditions []corev1.NodeCondition
   423  		status     corev1.ConditionStatus
   424  	}{
   425  		{
   426  			name: "node is healthy",
   427  			conditions: []corev1.NodeCondition{
   428  				{Type: corev1.NodeReady, Status: corev1.ConditionTrue},
   429  				{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionFalse},
   430  				{Type: corev1.NodeDiskPressure, Status: corev1.ConditionFalse},
   431  				{Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse},
   432  			},
   433  			status: corev1.ConditionTrue,
   434  		},
   435  		{
   436  			name: "all conditions are unknown",
   437  			conditions: []corev1.NodeCondition{
   438  				{Type: corev1.NodeReady, Status: corev1.ConditionUnknown},
   439  				{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionUnknown},
   440  				{Type: corev1.NodeDiskPressure, Status: corev1.ConditionUnknown},
   441  				{Type: corev1.NodePIDPressure, Status: corev1.ConditionUnknown},
   442  			},
   443  			status: corev1.ConditionUnknown,
   444  		},
   445  		{
   446  			name: "multiple semantically failed condition",
   447  			conditions: []corev1.NodeCondition{
   448  				{Type: corev1.NodeReady, Status: corev1.ConditionUnknown},
   449  				{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionTrue},
   450  				{Type: corev1.NodeDiskPressure, Status: corev1.ConditionTrue},
   451  				{Type: corev1.NodePIDPressure, Status: corev1.ConditionTrue},
   452  			},
   453  			status: corev1.ConditionFalse,
   454  		},
   455  		{
   456  			name: "one positive condition when the rest is unknown",
   457  			conditions: []corev1.NodeCondition{
   458  				{Type: corev1.NodeReady, Status: corev1.ConditionTrue},
   459  				{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionUnknown},
   460  				{Type: corev1.NodeDiskPressure, Status: corev1.ConditionUnknown},
   461  				{Type: corev1.NodePIDPressure, Status: corev1.ConditionUnknown},
   462  			},
   463  			status: corev1.ConditionTrue,
   464  		},
   465  	}
   466  	for _, test := range testCases {
   467  		t.Run(test.name, func(t *testing.T) {
   468  			g := NewWithT(t)
   469  			node := &corev1.Node{
   470  				ObjectMeta: metav1.ObjectMeta{
   471  					Name: "node-1",
   472  				},
   473  				Status: corev1.NodeStatus{
   474  					Conditions: test.conditions,
   475  				},
   476  			}
   477  			status, _ := summarizeNodeConditions(node)
   478  			g.Expect(status).To(Equal(test.status))
   479  		})
   480  	}
   481  }
   482  
   483  func TestGetManagedLabels(t *testing.T) {
   484  	// Create managedLabels map from known managed prefixes.
   485  	managedLabels := map[string]string{
   486  		clusterv1.NodeRoleLabelPrefix + "/anyRole": "",
   487  
   488  		clusterv1.ManagedNodeLabelDomain:                                  "",
   489  		"custom-prefix." + clusterv1.ManagedNodeLabelDomain:               "",
   490  		clusterv1.ManagedNodeLabelDomain + "/anything":                    "",
   491  		"custom-prefix." + clusterv1.ManagedNodeLabelDomain + "/anything": "",
   492  
   493  		clusterv1.NodeRestrictionLabelDomain:                                  "",
   494  		"custom-prefix." + clusterv1.NodeRestrictionLabelDomain:               "",
   495  		clusterv1.NodeRestrictionLabelDomain + "/anything":                    "",
   496  		"custom-prefix." + clusterv1.NodeRestrictionLabelDomain + "/anything": "",
   497  	}
   498  
   499  	// Append arbitrary labels.
   500  	allLabels := map[string]string{
   501  		"foo":                               "",
   502  		"bar":                               "",
   503  		"company.xyz/node.cluster.x-k8s.io": "not-managed",
   504  		"gpu-node.cluster.x-k8s.io":         "not-managed",
   505  		"company.xyz/node-restriction.kubernetes.io": "not-managed",
   506  		"gpu-node-restriction.kubernetes.io":         "not-managed",
   507  	}
   508  	for k, v := range managedLabels {
   509  		allLabels[k] = v
   510  	}
   511  
   512  	g := NewWithT(t)
   513  	got := getManagedLabels(allLabels)
   514  	g.Expect(got).To(BeEquivalentTo(managedLabels))
   515  }
   516  
   517  func TestPatchNode(t *testing.T) {
   518  	clusterName := "test-cluster"
   519  
   520  	testCases := []struct {
   521  		name                string
   522  		oldNode             *corev1.Node
   523  		newLabels           map[string]string
   524  		newAnnotations      map[string]string
   525  		expectedLabels      map[string]string
   526  		expectedAnnotations map[string]string
   527  		expectedTaints      []corev1.Taint
   528  		machine             *clusterv1.Machine
   529  		ms                  *clusterv1.MachineSet
   530  		md                  *clusterv1.MachineDeployment
   531  	}{
   532  		{
   533  			name: "Check that patch works even if there are Status.Addresses with the same key",
   534  			oldNode: &corev1.Node{
   535  				ObjectMeta: metav1.ObjectMeta{
   536  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   537  				},
   538  				Status: corev1.NodeStatus{
   539  					Addresses: []corev1.NodeAddress{
   540  						{
   541  							Type:    corev1.NodeInternalIP,
   542  							Address: "1.1.1.1",
   543  						},
   544  						{
   545  							Type:    corev1.NodeInternalIP,
   546  							Address: "2.2.2.2",
   547  						},
   548  					},
   549  				},
   550  			},
   551  			newLabels:      map[string]string{"foo": "bar"},
   552  			expectedLabels: map[string]string{"foo": "bar"},
   553  			expectedAnnotations: map[string]string{
   554  				clusterv1.LabelsFromMachineAnnotation: "foo",
   555  			},
   556  			expectedTaints: []corev1.Taint{
   557  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   558  			},
   559  			machine: newFakeMachine(metav1.NamespaceDefault, clusterName),
   560  			ms:      newFakeMachineSet(metav1.NamespaceDefault, clusterName),
   561  			md:      newFakeMachineDeployment(metav1.NamespaceDefault, clusterName),
   562  		},
   563  		// Labels (CAPI owns a subset of labels, everything else should be preserved)
   564  		{
   565  			name: "Existing labels should be preserved if there are no label from machines",
   566  			oldNode: &corev1.Node{
   567  				ObjectMeta: metav1.ObjectMeta{
   568  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   569  					Labels: map[string]string{
   570  						"not-managed-by-capi": "foo",
   571  					},
   572  				},
   573  			},
   574  			expectedLabels: map[string]string{
   575  				"not-managed-by-capi": "foo",
   576  			},
   577  			expectedTaints: []corev1.Taint{
   578  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   579  			},
   580  			machine: newFakeMachine(metav1.NamespaceDefault, clusterName),
   581  			ms:      newFakeMachineSet(metav1.NamespaceDefault, clusterName),
   582  			md:      newFakeMachineDeployment(metav1.NamespaceDefault, clusterName),
   583  		},
   584  		{
   585  			name: "Add label must preserve existing labels",
   586  			oldNode: &corev1.Node{
   587  				ObjectMeta: metav1.ObjectMeta{
   588  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   589  					Labels: map[string]string{
   590  						"not-managed-by-capi": "foo",
   591  					},
   592  				},
   593  			},
   594  			newLabels: map[string]string{
   595  				"label-from-machine": "foo",
   596  			},
   597  			expectedLabels: map[string]string{
   598  				"not-managed-by-capi": "foo",
   599  				"label-from-machine":  "foo",
   600  			},
   601  			expectedAnnotations: map[string]string{
   602  				clusterv1.LabelsFromMachineAnnotation: "label-from-machine",
   603  			},
   604  			expectedTaints: []corev1.Taint{
   605  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   606  			},
   607  			machine: newFakeMachine(metav1.NamespaceDefault, clusterName),
   608  			ms:      newFakeMachineSet(metav1.NamespaceDefault, clusterName),
   609  			md:      newFakeMachineDeployment(metav1.NamespaceDefault, clusterName),
   610  		},
   611  		{
   612  			name: "CAPI takes ownership of existing labels if they are set from machines",
   613  			oldNode: &corev1.Node{
   614  				ObjectMeta: metav1.ObjectMeta{
   615  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   616  					Labels: map[string]string{
   617  						clusterv1.NodeRoleLabelPrefix: "foo",
   618  					},
   619  				},
   620  			},
   621  			newLabels: map[string]string{
   622  				clusterv1.NodeRoleLabelPrefix: "control-plane",
   623  			},
   624  			expectedLabels: map[string]string{
   625  				clusterv1.NodeRoleLabelPrefix: "control-plane",
   626  			},
   627  			expectedAnnotations: map[string]string{
   628  				clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   629  			},
   630  			expectedTaints: []corev1.Taint{
   631  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   632  			},
   633  			machine: newFakeMachine(metav1.NamespaceDefault, clusterName),
   634  			ms:      newFakeMachineSet(metav1.NamespaceDefault, clusterName),
   635  			md:      newFakeMachineDeployment(metav1.NamespaceDefault, clusterName),
   636  		},
   637  		{
   638  			name: "change a label previously set from machines",
   639  			oldNode: &corev1.Node{
   640  				ObjectMeta: metav1.ObjectMeta{
   641  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   642  					Labels: map[string]string{
   643  						clusterv1.NodeRoleLabelPrefix: "foo",
   644  					},
   645  					Annotations: map[string]string{
   646  						clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   647  					},
   648  				},
   649  			},
   650  			newLabels: map[string]string{
   651  				clusterv1.NodeRoleLabelPrefix: "control-plane",
   652  			},
   653  			expectedLabels: map[string]string{
   654  				clusterv1.NodeRoleLabelPrefix: "control-plane",
   655  			},
   656  			expectedAnnotations: map[string]string{
   657  				clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   658  			},
   659  			expectedTaints: []corev1.Taint{
   660  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   661  			},
   662  			machine: newFakeMachine(metav1.NamespaceDefault, clusterName),
   663  			ms:      newFakeMachineSet(metav1.NamespaceDefault, clusterName),
   664  			md:      newFakeMachineDeployment(metav1.NamespaceDefault, clusterName),
   665  		},
   666  		{
   667  			name: "Delete a label previously set from machines",
   668  			oldNode: &corev1.Node{
   669  				ObjectMeta: metav1.ObjectMeta{
   670  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   671  					Labels: map[string]string{
   672  						clusterv1.NodeRoleLabelPrefix: "foo",
   673  						"not-managed-by-capi":         "foo",
   674  					},
   675  					Annotations: map[string]string{
   676  						clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   677  					},
   678  				},
   679  			},
   680  			expectedLabels: map[string]string{
   681  				"not-managed-by-capi": "foo",
   682  			},
   683  			expectedAnnotations: map[string]string{
   684  				clusterv1.LabelsFromMachineAnnotation: "",
   685  			},
   686  			expectedTaints: []corev1.Taint{
   687  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   688  			},
   689  			machine: newFakeMachine(metav1.NamespaceDefault, clusterName),
   690  			ms:      newFakeMachineSet(metav1.NamespaceDefault, clusterName),
   691  			md:      newFakeMachineDeployment(metav1.NamespaceDefault, clusterName),
   692  		},
   693  		{
   694  			name: "Label previously set from machine, already removed out of band, annotation should be cleaned up",
   695  			oldNode: &corev1.Node{
   696  				ObjectMeta: metav1.ObjectMeta{
   697  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   698  					Annotations: map[string]string{
   699  						clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   700  					},
   701  				},
   702  			},
   703  			expectedAnnotations: map[string]string{
   704  				clusterv1.LabelsFromMachineAnnotation: "",
   705  			},
   706  			expectedTaints: []corev1.Taint{
   707  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   708  			},
   709  			machine: newFakeMachine(metav1.NamespaceDefault, clusterName),
   710  			ms:      newFakeMachineSet(metav1.NamespaceDefault, clusterName),
   711  			md:      newFakeMachineDeployment(metav1.NamespaceDefault, clusterName),
   712  		},
   713  		// Add annotations (CAPI only enforces some annotations and never changes or removes them)
   714  		{
   715  			name: "Add CAPI annotations",
   716  			oldNode: &corev1.Node{
   717  				ObjectMeta: metav1.ObjectMeta{
   718  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   719  					Annotations: map[string]string{
   720  						"not-managed-by-capi": "foo",
   721  					},
   722  				},
   723  			},
   724  			newAnnotations: map[string]string{
   725  				clusterv1.ClusterNameAnnotation:      "foo",
   726  				clusterv1.ClusterNamespaceAnnotation: "bar",
   727  				clusterv1.MachineAnnotation:          "baz",
   728  			},
   729  			expectedAnnotations: map[string]string{
   730  				clusterv1.ClusterNameAnnotation:       "foo",
   731  				clusterv1.ClusterNamespaceAnnotation:  "bar",
   732  				clusterv1.MachineAnnotation:           "baz",
   733  				"not-managed-by-capi":                 "foo",
   734  				clusterv1.LabelsFromMachineAnnotation: "",
   735  			},
   736  			expectedTaints: []corev1.Taint{
   737  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   738  			},
   739  			machine: newFakeMachine(metav1.NamespaceDefault, clusterName),
   740  			ms:      newFakeMachineSet(metav1.NamespaceDefault, clusterName),
   741  			md:      newFakeMachineDeployment(metav1.NamespaceDefault, clusterName),
   742  		},
   743  		// Taint (CAPI only remove one taint if it exists, other taints should be preserved)
   744  		{
   745  			name: "Removes NodeUninitializedTaint if present",
   746  			oldNode: &corev1.Node{
   747  				ObjectMeta: metav1.ObjectMeta{
   748  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   749  				},
   750  				Spec: corev1.NodeSpec{
   751  					Taints: []corev1.Taint{
   752  						{
   753  							Key:    "node-role.kubernetes.io/control-plane",
   754  							Effect: corev1.TaintEffectNoSchedule,
   755  						},
   756  						clusterv1.NodeUninitializedTaint,
   757  					},
   758  				},
   759  			},
   760  			expectedAnnotations: map[string]string{
   761  				clusterv1.LabelsFromMachineAnnotation: "",
   762  			},
   763  			expectedTaints: []corev1.Taint{
   764  				{
   765  					Key:    "node-role.kubernetes.io/control-plane",
   766  					Effect: corev1.TaintEffectNoSchedule,
   767  				},
   768  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   769  			},
   770  			machine: newFakeMachine(metav1.NamespaceDefault, clusterName),
   771  			ms:      newFakeMachineSet(metav1.NamespaceDefault, clusterName),
   772  			md:      newFakeMachineDeployment(metav1.NamespaceDefault, clusterName),
   773  		},
   774  		{
   775  			name: "Ensure NodeOutdatedRevisionTaint to be set if a node is associated to an outdated machineset",
   776  			oldNode: &corev1.Node{
   777  				ObjectMeta: metav1.ObjectMeta{
   778  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   779  				},
   780  			},
   781  			expectedAnnotations: map[string]string{
   782  				clusterv1.LabelsFromMachineAnnotation: "",
   783  			},
   784  			expectedTaints: []corev1.Taint{
   785  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   786  				clusterv1.NodeOutdatedRevisionTaint,
   787  			},
   788  			machine: &clusterv1.Machine{
   789  				ObjectMeta: metav1.ObjectMeta{
   790  					Name:      fmt.Sprintf("ma-%s", util.RandomString(6)),
   791  					Namespace: metav1.NamespaceDefault,
   792  					Labels: map[string]string{
   793  						clusterv1.MachineSetNameLabel:        "test-ms-outdated",
   794  						clusterv1.MachineDeploymentNameLabel: "test-md-outdated",
   795  					},
   796  					OwnerReferences: []metav1.OwnerReference{{
   797  						Kind:       "MachineSet",
   798  						Name:       "test-ms-outdated",
   799  						APIVersion: clusterv1.GroupVersion.String(),
   800  						UID:        "uid",
   801  					}},
   802  				},
   803  				Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName),
   804  			},
   805  			ms: &clusterv1.MachineSet{
   806  				ObjectMeta: metav1.ObjectMeta{
   807  					Name:      "test-ms-outdated",
   808  					Namespace: metav1.NamespaceDefault,
   809  					Annotations: map[string]string{
   810  						clusterv1.RevisionAnnotation: "1",
   811  					},
   812  				},
   813  				Spec: clusterv1.MachineSetSpec{
   814  					ClusterName: clusterName,
   815  					Template: clusterv1.MachineTemplateSpec{
   816  						Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName),
   817  					},
   818  				},
   819  			},
   820  			md: &clusterv1.MachineDeployment{
   821  				ObjectMeta: metav1.ObjectMeta{
   822  					Name:      "test-md-outdated",
   823  					Namespace: metav1.NamespaceDefault,
   824  					Annotations: map[string]string{
   825  						clusterv1.RevisionAnnotation: "2",
   826  					},
   827  				},
   828  				Spec: clusterv1.MachineDeploymentSpec{
   829  					ClusterName: clusterName,
   830  					Template: clusterv1.MachineTemplateSpec{
   831  						Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName),
   832  					},
   833  				},
   834  			},
   835  		},
   836  		{
   837  			name: "Removes NodeOutdatedRevisionTaint if a node is associated to a non-outdated machineset",
   838  			oldNode: &corev1.Node{
   839  				ObjectMeta: metav1.ObjectMeta{
   840  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   841  				},
   842  				Spec: corev1.NodeSpec{
   843  					Taints: []corev1.Taint{
   844  						clusterv1.NodeOutdatedRevisionTaint,
   845  					},
   846  				},
   847  			},
   848  			expectedAnnotations: map[string]string{
   849  				clusterv1.LabelsFromMachineAnnotation: "",
   850  			},
   851  			expectedTaints: []corev1.Taint{
   852  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   853  			},
   854  			machine: &clusterv1.Machine{
   855  				ObjectMeta: metav1.ObjectMeta{
   856  					Name:      fmt.Sprintf("ma-%s", util.RandomString(6)),
   857  					Namespace: metav1.NamespaceDefault,
   858  					Labels: map[string]string{
   859  						clusterv1.MachineSetNameLabel:        "test-ms-not-outdated",
   860  						clusterv1.MachineDeploymentNameLabel: "test-md-not-outdated",
   861  					},
   862  					OwnerReferences: []metav1.OwnerReference{{
   863  						Kind:       "MachineSet",
   864  						Name:       "test-ms-not-outdated",
   865  						APIVersion: clusterv1.GroupVersion.String(),
   866  						UID:        "uid",
   867  					}},
   868  				},
   869  				Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName),
   870  			},
   871  			ms: &clusterv1.MachineSet{
   872  				ObjectMeta: metav1.ObjectMeta{
   873  					Name:      "test-ms-not-outdated",
   874  					Namespace: metav1.NamespaceDefault,
   875  					Annotations: map[string]string{
   876  						clusterv1.RevisionAnnotation: "3",
   877  					},
   878  				},
   879  				Spec: clusterv1.MachineSetSpec{
   880  					ClusterName: clusterName,
   881  					Template: clusterv1.MachineTemplateSpec{
   882  						Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName),
   883  					},
   884  				},
   885  			},
   886  			md: &clusterv1.MachineDeployment{
   887  				ObjectMeta: metav1.ObjectMeta{
   888  					Name:      "test-md-not-outdated",
   889  					Namespace: metav1.NamespaceDefault,
   890  					Annotations: map[string]string{
   891  						clusterv1.RevisionAnnotation: "2",
   892  					},
   893  				},
   894  				Spec: clusterv1.MachineDeploymentSpec{
   895  					ClusterName: clusterName,
   896  					Template: clusterv1.MachineTemplateSpec{
   897  						Spec: newFakeMachineSpec(metav1.NamespaceDefault, clusterName),
   898  					},
   899  				},
   900  			},
   901  		},
   902  	}
   903  
   904  	r := Reconciler{
   905  		Client:                    env,
   906  		UnstructuredCachingClient: env,
   907  	}
   908  	for _, tc := range testCases {
   909  		t.Run(tc.name, func(t *testing.T) {
   910  			g := NewWithT(t)
   911  			oldNode := tc.oldNode.DeepCopy()
   912  			machine := tc.machine.DeepCopy()
   913  			ms := tc.ms.DeepCopy()
   914  			md := tc.md.DeepCopy()
   915  
   916  			g.Expect(env.CreateAndWait(ctx, oldNode)).To(Succeed())
   917  			g.Expect(env.CreateAndWait(ctx, machine)).To(Succeed())
   918  			g.Expect(env.CreateAndWait(ctx, ms)).To(Succeed())
   919  			g.Expect(env.CreateAndWait(ctx, md)).To(Succeed())
   920  			t.Cleanup(func() {
   921  				_ = env.CleanupAndWait(ctx, oldNode, machine, ms, md)
   922  			})
   923  
   924  			err := r.patchNode(ctx, env, oldNode, tc.newLabels, tc.newAnnotations, tc.machine)
   925  			g.Expect(err).ToNot(HaveOccurred())
   926  
   927  			g.Eventually(func(g Gomega) {
   928  				gotNode := &corev1.Node{}
   929  				err = env.Get(ctx, client.ObjectKeyFromObject(oldNode), gotNode)
   930  				g.Expect(err).ToNot(HaveOccurred())
   931  
   932  				g.Expect(gotNode.Labels).To(BeComparableTo(tc.expectedLabels))
   933  				g.Expect(gotNode.Annotations).To(BeComparableTo(tc.expectedAnnotations))
   934  				g.Expect(gotNode.Spec.Taints).To(BeComparableTo(tc.expectedTaints))
   935  			}, 10*time.Second).Should(Succeed())
   936  		})
   937  	}
   938  }
   939  
   940  func newFakeMachineSpec(namespace, clusterName string) clusterv1.MachineSpec {
   941  	return clusterv1.MachineSpec{
   942  		ClusterName: clusterName,
   943  		Bootstrap: clusterv1.Bootstrap{
   944  			ConfigRef: &corev1.ObjectReference{
   945  				APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha3",
   946  				Kind:       "KubeadmConfigTemplate",
   947  				Name:       fmt.Sprintf("%s-md-0", clusterName),
   948  				Namespace:  namespace,
   949  			},
   950  		},
   951  		InfrastructureRef: corev1.ObjectReference{
   952  			APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha3",
   953  			Kind:       "FakeMachineTemplate",
   954  			Name:       fmt.Sprintf("%s-md-0", clusterName),
   955  			Namespace:  namespace,
   956  		},
   957  	}
   958  }
   959  
   960  func newFakeMachine(namespace, clusterName string) *clusterv1.Machine {
   961  	return &clusterv1.Machine{
   962  		ObjectMeta: metav1.ObjectMeta{
   963  			Name:      fmt.Sprintf("ma-%s", util.RandomString(6)),
   964  			Namespace: namespace,
   965  		},
   966  		Spec: newFakeMachineSpec(namespace, clusterName),
   967  	}
   968  }
   969  
   970  func newFakeMachineSet(namespace, clusterName string) *clusterv1.MachineSet {
   971  	return &clusterv1.MachineSet{
   972  		ObjectMeta: metav1.ObjectMeta{
   973  			Name:      fmt.Sprintf("ms-%s", util.RandomString(6)),
   974  			Namespace: namespace,
   975  		},
   976  		Spec: clusterv1.MachineSetSpec{
   977  			ClusterName: clusterName,
   978  			Template: clusterv1.MachineTemplateSpec{
   979  				Spec: newFakeMachineSpec(namespace, clusterName),
   980  			},
   981  		},
   982  	}
   983  }
   984  
   985  func newFakeMachineDeployment(namespace, clusterName string) *clusterv1.MachineDeployment {
   986  	return &clusterv1.MachineDeployment{
   987  		ObjectMeta: metav1.ObjectMeta{
   988  			Name:      fmt.Sprintf("md-%s", util.RandomString(6)),
   989  			Namespace: namespace,
   990  		},
   991  		Spec: clusterv1.MachineDeploymentSpec{
   992  			ClusterName: clusterName,
   993  			Template: clusterv1.MachineTemplateSpec{
   994  				Spec: newFakeMachineSpec(namespace, clusterName),
   995  			},
   996  		},
   997  	}
   998  }