sigs.k8s.io/cluster-api@v1.6.3/internal/controllers/machine/machine_controller_noderef_test.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package machine
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"testing"
    23  	"time"
    24  
    25  	. "github.com/onsi/gomega"
    26  	corev1 "k8s.io/api/core/v1"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
    29  	"k8s.io/utils/pointer"
    30  	ctrl "sigs.k8s.io/controller-runtime"
    31  	"sigs.k8s.io/controller-runtime/pkg/client"
    32  	"sigs.k8s.io/controller-runtime/pkg/handler"
    33  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    34  
    35  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    36  	"sigs.k8s.io/cluster-api/controllers/remote"
    37  	"sigs.k8s.io/cluster-api/util"
    38  	"sigs.k8s.io/cluster-api/util/kubeconfig"
    39  )
    40  
    41  func TestGetNode(t *testing.T) {
    42  	g := NewWithT(t)
    43  
    44  	ns, err := env.CreateNamespace(ctx, "test-get-node")
    45  	g.Expect(err).ToNot(HaveOccurred())
    46  
    47  	// Set up cluster to test against.
    48  	testCluster := &clusterv1.Cluster{
    49  		ObjectMeta: metav1.ObjectMeta{
    50  			GenerateName: "test-get-node-",
    51  			Namespace:    ns.Name,
    52  		},
    53  	}
    54  
    55  	g.Expect(env.Create(ctx, testCluster)).To(Succeed())
    56  	g.Expect(env.CreateKubeconfigSecret(ctx, testCluster)).To(Succeed())
    57  	defer func(do ...client.Object) {
    58  		g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
    59  	}(ns, testCluster)
    60  
    61  	testCases := []struct {
    62  		name            string
    63  		node            *corev1.Node
    64  		providerIDInput string
    65  		error           error
    66  	}{
    67  		{
    68  			name: "full providerID matches",
    69  			node: &corev1.Node{
    70  				ObjectMeta: metav1.ObjectMeta{
    71  					Name: "test-get-node-node-1",
    72  				},
    73  				Spec: corev1.NodeSpec{
    74  					ProviderID: "aws://us-east-1/test-get-node-1",
    75  				},
    76  			},
    77  			providerIDInput: "aws://us-east-1/test-get-node-1",
    78  		},
    79  		{
    80  			name: "aws prefix: cloudProvider and ID matches",
    81  			node: &corev1.Node{
    82  				ObjectMeta: metav1.ObjectMeta{
    83  					Name: "test-get-node-node-2",
    84  				},
    85  				Spec: corev1.NodeSpec{
    86  					ProviderID: "aws://us-west-2/test-get-node-2",
    87  				},
    88  			},
    89  			providerIDInput: "aws://us-west-2/test-get-node-2",
    90  		},
    91  		{
    92  			name: "gce prefix, cloudProvider and ID matches",
    93  			node: &corev1.Node{
    94  				ObjectMeta: metav1.ObjectMeta{
    95  					Name: "test-get-node-gce-node-2",
    96  				},
    97  				Spec: corev1.NodeSpec{
    98  					ProviderID: "gce://us-central1/test-get-node-2",
    99  				},
   100  			},
   101  			providerIDInput: "gce://us-central1/test-get-node-2",
   102  		},
   103  		{
   104  			name: "Node is not found",
   105  			node: &corev1.Node{
   106  				ObjectMeta: metav1.ObjectMeta{
   107  					Name: "test-get-node-not-found",
   108  				},
   109  				Spec: corev1.NodeSpec{
   110  					ProviderID: "gce://us-central1/anything",
   111  				},
   112  			},
   113  			providerIDInput: "gce://not-found",
   114  			error:           ErrNodeNotFound,
   115  		},
   116  	}
   117  
   118  	nodesToCleanup := make([]client.Object, 0, len(testCases))
   119  	for _, tc := range testCases {
   120  		g.Expect(env.Create(ctx, tc.node)).To(Succeed())
   121  		nodesToCleanup = append(nodesToCleanup, tc.node)
   122  	}
   123  	defer func(do ...client.Object) {
   124  		g.Expect(env.Cleanup(ctx, do...)).To(Succeed())
   125  	}(nodesToCleanup...)
   126  
   127  	tracker, err := remote.NewClusterCacheTracker(
   128  		env.Manager, remote.ClusterCacheTrackerOptions{
   129  			Indexes: []remote.Index{remote.NodeProviderIDIndex},
   130  		},
   131  	)
   132  	g.Expect(err).ToNot(HaveOccurred())
   133  
   134  	r := &Reconciler{
   135  		Tracker:                   tracker,
   136  		Client:                    env,
   137  		UnstructuredCachingClient: env,
   138  	}
   139  
   140  	w, err := ctrl.NewControllerManagedBy(env.Manager).For(&corev1.Node{}).Build(r)
   141  	g.Expect(err).ToNot(HaveOccurred())
   142  
   143  	g.Expect(tracker.Watch(ctx, remote.WatchInput{
   144  		Name:    "TestGetNode",
   145  		Cluster: util.ObjectKey(testCluster),
   146  		Watcher: w,
   147  		Kind:    &corev1.Node{},
   148  		EventHandler: handler.EnqueueRequestsFromMapFunc(func(context.Context, client.Object) []reconcile.Request {
   149  			return nil
   150  		}),
   151  	})).To(Succeed())
   152  
   153  	for _, tc := range testCases {
   154  		t.Run(tc.name, func(t *testing.T) {
   155  			g := NewWithT(t)
   156  			remoteClient, err := r.Tracker.GetClient(ctx, util.ObjectKey(testCluster))
   157  			g.Expect(err).ToNot(HaveOccurred())
   158  
   159  			node, err := r.getNode(ctx, remoteClient, tc.providerIDInput)
   160  			if tc.error != nil {
   161  				g.Expect(err).To(Equal(tc.error))
   162  				return
   163  			}
   164  			g.Expect(err).ToNot(HaveOccurred())
   165  			g.Expect(node.Name).To(Equal(tc.node.Name))
   166  		})
   167  	}
   168  }
   169  
   170  func TestNodeLabelSync(t *testing.T) {
   171  	defaultCluster := &clusterv1.Cluster{
   172  		ObjectMeta: metav1.ObjectMeta{
   173  			Name:      "test-cluster",
   174  			Namespace: metav1.NamespaceDefault,
   175  		},
   176  	}
   177  
   178  	defaultInfraMachine := &unstructured.Unstructured{
   179  		Object: map[string]interface{}{
   180  			"kind":       "GenericInfrastructureMachine",
   181  			"apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1",
   182  			"metadata": map[string]interface{}{
   183  				"name":      "infra-config1",
   184  				"namespace": metav1.NamespaceDefault,
   185  			},
   186  		},
   187  	}
   188  
   189  	defaultMachine := clusterv1.Machine{
   190  		ObjectMeta: metav1.ObjectMeta{
   191  			Name:      "machine-test",
   192  			Namespace: metav1.NamespaceDefault,
   193  			Labels: map[string]string{
   194  				clusterv1.MachineControlPlaneLabel: "",
   195  			},
   196  		},
   197  		Spec: clusterv1.MachineSpec{
   198  			ClusterName: defaultCluster.Name,
   199  			Bootstrap: clusterv1.Bootstrap{
   200  				ConfigRef: &corev1.ObjectReference{
   201  					APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1",
   202  					Kind:       "GenericBootstrapConfig",
   203  					Name:       "bootstrap-config1",
   204  				},
   205  			},
   206  			InfrastructureRef: corev1.ObjectReference{
   207  				APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1",
   208  				Kind:       "GenericInfrastructureMachine",
   209  				Name:       "infra-config1",
   210  			},
   211  		},
   212  	}
   213  
   214  	t.Run("Should sync node labels", func(t *testing.T) {
   215  		g := NewWithT(t)
   216  
   217  		ns, err := env.CreateNamespace(ctx, "test-node-label-sync")
   218  		g.Expect(err).ToNot(HaveOccurred())
   219  		defer func() {
   220  			g.Expect(env.Cleanup(ctx, ns)).To(Succeed())
   221  		}()
   222  
   223  		nodeProviderID := fmt.Sprintf("test://%s", util.RandomString(6))
   224  
   225  		cluster := defaultCluster.DeepCopy()
   226  		cluster.Namespace = ns.Name
   227  
   228  		infraMachine := defaultInfraMachine.DeepCopy()
   229  		infraMachine.SetNamespace(ns.Name)
   230  
   231  		interruptibleTrueInfraMachineStatus := map[string]interface{}{
   232  			"interruptible": true,
   233  			"ready":         true,
   234  		}
   235  		interruptibleFalseInfraMachineStatus := map[string]interface{}{
   236  			"interruptible": false,
   237  			"ready":         true,
   238  		}
   239  
   240  		machine := defaultMachine.DeepCopy()
   241  		machine.Namespace = ns.Name
   242  		machine.Spec.ProviderID = pointer.String(nodeProviderID)
   243  
   244  		// Set Machine labels.
   245  		machine.Labels = map[string]string{}
   246  		// The expectation is that these labels will be synced to the Node.
   247  		managedMachineLabels := map[string]string{
   248  			clusterv1.NodeRoleLabelPrefix + "/anyRole": "",
   249  
   250  			clusterv1.ManagedNodeLabelDomain:                                  "valueFromMachine",
   251  			"custom-prefix." + clusterv1.ManagedNodeLabelDomain:               "valueFromMachine",
   252  			clusterv1.ManagedNodeLabelDomain + "/anything":                    "valueFromMachine",
   253  			"custom-prefix." + clusterv1.ManagedNodeLabelDomain + "/anything": "valueFromMachine",
   254  
   255  			clusterv1.NodeRestrictionLabelDomain:                                  "valueFromMachine",
   256  			"custom-prefix." + clusterv1.NodeRestrictionLabelDomain:               "valueFromMachine",
   257  			clusterv1.NodeRestrictionLabelDomain + "/anything":                    "valueFromMachine",
   258  			"custom-prefix." + clusterv1.NodeRestrictionLabelDomain + "/anything": "valueFromMachine",
   259  		}
   260  		for k, v := range managedMachineLabels {
   261  			machine.Labels[k] = v
   262  		}
   263  		// The expectation is that these labels will not be synced to the Node.
   264  		unmanagedMachineLabels := map[string]string{
   265  			"foo":                               "",
   266  			"bar":                               "",
   267  			"company.xyz/node.cluster.x-k8s.io": "not-managed",
   268  			"gpu-node.cluster.x-k8s.io":         "not-managed",
   269  			"company.xyz/node-restriction.kubernetes.io": "not-managed",
   270  			"gpu-node-restriction.kubernetes.io":         "not-managed",
   271  		}
   272  		for k, v := range unmanagedMachineLabels {
   273  			machine.Labels[k] = v
   274  		}
   275  
   276  		// Create Node.
   277  		node := &corev1.Node{
   278  			ObjectMeta: metav1.ObjectMeta{
   279  				GenerateName: "machine-test-node-",
   280  			},
   281  			Spec: corev1.NodeSpec{ProviderID: nodeProviderID},
   282  			Status: corev1.NodeStatus{
   283  				Addresses: []corev1.NodeAddress{
   284  					{
   285  						Type:    corev1.NodeInternalIP,
   286  						Address: "1.1.1.1",
   287  					},
   288  					{
   289  						Type:    corev1.NodeInternalIP,
   290  						Address: "2.2.2.2",
   291  					},
   292  				},
   293  			},
   294  		}
   295  
   296  		// Set Node labels
   297  		// The expectation is that these labels will be overwritten by the labels
   298  		// from the Machine by the node label sync.
   299  		node.Labels = map[string]string{}
   300  		managedNodeLabelsToBeOverWritten := map[string]string{
   301  			clusterv1.NodeRoleLabelPrefix + "/anyRole": "valueFromNode",
   302  
   303  			clusterv1.ManagedNodeLabelDomain:                                  "valueFromNode",
   304  			"custom-prefix." + clusterv1.ManagedNodeLabelDomain:               "valueFromNode",
   305  			clusterv1.ManagedNodeLabelDomain + "/anything":                    "valueFromNode",
   306  			"custom-prefix." + clusterv1.ManagedNodeLabelDomain + "/anything": "valueFromNode",
   307  
   308  			clusterv1.NodeRestrictionLabelDomain:                                  "valueFromNode",
   309  			"custom-prefix." + clusterv1.NodeRestrictionLabelDomain:               "valueFromNode",
   310  			clusterv1.NodeRestrictionLabelDomain + "/anything":                    "valueFromNode",
   311  			"custom-prefix." + clusterv1.NodeRestrictionLabelDomain + "/anything": "valueFromNode",
   312  		}
   313  		for k, v := range managedNodeLabelsToBeOverWritten {
   314  			node.Labels[k] = v
   315  		}
   316  		// The expectation is that these labels will be preserved by the node label sync.
   317  		unmanagedNodeLabelsToBePreserved := map[string]string{
   318  			"node-role.kubernetes.io/control-plane": "",
   319  			"label":                                 "valueFromNode",
   320  		}
   321  		for k, v := range unmanagedNodeLabelsToBePreserved {
   322  			node.Labels[k] = v
   323  		}
   324  
   325  		g.Expect(env.Create(ctx, node)).To(Succeed())
   326  		defer func() {
   327  			g.Expect(env.Cleanup(ctx, node)).To(Succeed())
   328  		}()
   329  
   330  		g.Expect(env.Create(ctx, cluster)).To(Succeed())
   331  		defaultKubeconfigSecret := kubeconfig.GenerateSecret(cluster, kubeconfig.FromEnvTestConfig(env.Config, cluster))
   332  		g.Expect(env.Create(ctx, defaultKubeconfigSecret)).To(Succeed())
   333  
   334  		g.Expect(env.Create(ctx, infraMachine)).To(Succeed())
   335  		// Set InfrastructureMachine .status.interruptible and .status.ready to true.
   336  		interruptibleTrueInfraMachine := infraMachine.DeepCopy()
   337  		g.Expect(unstructured.SetNestedMap(interruptibleTrueInfraMachine.Object, interruptibleTrueInfraMachineStatus, "status")).Should(Succeed())
   338  		g.Expect(env.Status().Patch(ctx, interruptibleTrueInfraMachine, client.MergeFrom(infraMachine))).Should(Succeed())
   339  
   340  		g.Expect(env.Create(ctx, machine)).To(Succeed())
   341  
   342  		// Validate that the right labels where synced to the Node.
   343  		g.Eventually(func(g Gomega) bool {
   344  			if err := env.Get(ctx, client.ObjectKeyFromObject(node), node); err != nil {
   345  				return false
   346  			}
   347  
   348  			// Managed Machine Labels should have been synced to the Node.
   349  			for k, v := range managedMachineLabels {
   350  				g.Expect(node.Labels).To(HaveKeyWithValue(k, v))
   351  			}
   352  
   353  			// Interruptible label should be set on the node.
   354  			g.Expect(node.Labels).To(HaveKey(clusterv1.InterruptibleLabel))
   355  
   356  			// Unmanaged Machine labels should not have been synced to the Node.
   357  			for k, v := range unmanagedMachineLabels {
   358  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   359  			}
   360  
   361  			// Pre-existing managed Node labels should have been overwritten on the Node.
   362  			for k, v := range managedNodeLabelsToBeOverWritten {
   363  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   364  			}
   365  			// Pre-existing unmanaged Node labels should have been preserved on the Node.
   366  			for k, v := range unmanagedNodeLabelsToBePreserved {
   367  				g.Expect(node.Labels).To(HaveKeyWithValue(k, v))
   368  			}
   369  
   370  			return true
   371  		}, 10*time.Second).Should(BeTrue())
   372  
   373  		// Set InfrastructureMachine .status.interruptible to false.
   374  		interruptibleFalseInfraMachine := interruptibleTrueInfraMachine.DeepCopy()
   375  		g.Expect(unstructured.SetNestedMap(interruptibleFalseInfraMachine.Object, interruptibleFalseInfraMachineStatus, "status")).Should(Succeed())
   376  		g.Expect(env.Status().Patch(ctx, interruptibleFalseInfraMachine, client.MergeFrom(interruptibleTrueInfraMachine))).Should(Succeed())
   377  
   378  		// Remove managed labels from Machine.
   379  		modifiedMachine := machine.DeepCopy()
   380  		for k := range managedMachineLabels {
   381  			delete(modifiedMachine.Labels, k)
   382  		}
   383  		g.Expect(env.Patch(ctx, modifiedMachine, client.MergeFrom(machine))).To(Succeed())
   384  
   385  		// Validate that managed Machine labels were removed from the Node and all others are not changed.
   386  		g.Eventually(func(g Gomega) bool {
   387  			if err := env.Get(ctx, client.ObjectKeyFromObject(node), node); err != nil {
   388  				return false
   389  			}
   390  
   391  			// Managed Machine Labels should have been removed from the Node now.
   392  			for k, v := range managedMachineLabels {
   393  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   394  			}
   395  
   396  			// Interruptible label should not be on node.
   397  			g.Expect(node.Labels).NotTo(HaveKey(clusterv1.InterruptibleLabel))
   398  
   399  			// Unmanaged Machine labels should not have been synced at all to the Node.
   400  			for k, v := range unmanagedMachineLabels {
   401  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   402  			}
   403  
   404  			// Pre-existing managed Node labels have been overwritten earlier by the managed Machine labels.
   405  			// Now that the managed Machine labels have been removed, they should still not exist.
   406  			for k, v := range managedNodeLabelsToBeOverWritten {
   407  				g.Expect(node.Labels).ToNot(HaveKeyWithValue(k, v))
   408  			}
   409  			// Pre-existing unmanaged Node labels should have been preserved on the Node.
   410  			for k, v := range unmanagedNodeLabelsToBePreserved {
   411  				g.Expect(node.Labels).To(HaveKeyWithValue(k, v))
   412  			}
   413  
   414  			return true
   415  		}, 10*time.Second).Should(BeTrue())
   416  	})
   417  }
   418  
   419  func TestSummarizeNodeConditions(t *testing.T) {
   420  	testCases := []struct {
   421  		name       string
   422  		conditions []corev1.NodeCondition
   423  		status     corev1.ConditionStatus
   424  	}{
   425  		{
   426  			name: "node is healthy",
   427  			conditions: []corev1.NodeCondition{
   428  				{Type: corev1.NodeReady, Status: corev1.ConditionTrue},
   429  				{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionFalse},
   430  				{Type: corev1.NodeDiskPressure, Status: corev1.ConditionFalse},
   431  				{Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse},
   432  			},
   433  			status: corev1.ConditionTrue,
   434  		},
   435  		{
   436  			name: "all conditions are unknown",
   437  			conditions: []corev1.NodeCondition{
   438  				{Type: corev1.NodeReady, Status: corev1.ConditionUnknown},
   439  				{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionUnknown},
   440  				{Type: corev1.NodeDiskPressure, Status: corev1.ConditionUnknown},
   441  				{Type: corev1.NodePIDPressure, Status: corev1.ConditionUnknown},
   442  			},
   443  			status: corev1.ConditionUnknown,
   444  		},
   445  		{
   446  			name: "multiple semantically failed condition",
   447  			conditions: []corev1.NodeCondition{
   448  				{Type: corev1.NodeReady, Status: corev1.ConditionUnknown},
   449  				{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionTrue},
   450  				{Type: corev1.NodeDiskPressure, Status: corev1.ConditionTrue},
   451  				{Type: corev1.NodePIDPressure, Status: corev1.ConditionTrue},
   452  			},
   453  			status: corev1.ConditionFalse,
   454  		},
   455  		{
   456  			name: "one positive condition when the rest is unknown",
   457  			conditions: []corev1.NodeCondition{
   458  				{Type: corev1.NodeReady, Status: corev1.ConditionTrue},
   459  				{Type: corev1.NodeMemoryPressure, Status: corev1.ConditionUnknown},
   460  				{Type: corev1.NodeDiskPressure, Status: corev1.ConditionUnknown},
   461  				{Type: corev1.NodePIDPressure, Status: corev1.ConditionUnknown},
   462  			},
   463  			status: corev1.ConditionTrue,
   464  		},
   465  	}
   466  	for _, test := range testCases {
   467  		t.Run(test.name, func(t *testing.T) {
   468  			g := NewWithT(t)
   469  			node := &corev1.Node{
   470  				ObjectMeta: metav1.ObjectMeta{
   471  					Name: "node-1",
   472  				},
   473  				Status: corev1.NodeStatus{
   474  					Conditions: test.conditions,
   475  				},
   476  			}
   477  			status, _ := summarizeNodeConditions(node)
   478  			g.Expect(status).To(Equal(test.status))
   479  		})
   480  	}
   481  }
   482  
   483  func TestGetManagedLabels(t *testing.T) {
   484  	// Create managedLabels map from known managed prefixes.
   485  	managedLabels := map[string]string{
   486  		clusterv1.NodeRoleLabelPrefix + "/anyRole": "",
   487  
   488  		clusterv1.ManagedNodeLabelDomain:                                  "",
   489  		"custom-prefix." + clusterv1.ManagedNodeLabelDomain:               "",
   490  		clusterv1.ManagedNodeLabelDomain + "/anything":                    "",
   491  		"custom-prefix." + clusterv1.ManagedNodeLabelDomain + "/anything": "",
   492  
   493  		clusterv1.NodeRestrictionLabelDomain:                                  "",
   494  		"custom-prefix." + clusterv1.NodeRestrictionLabelDomain:               "",
   495  		clusterv1.NodeRestrictionLabelDomain + "/anything":                    "",
   496  		"custom-prefix." + clusterv1.NodeRestrictionLabelDomain + "/anything": "",
   497  	}
   498  
   499  	// Append arbitrary labels.
   500  	allLabels := map[string]string{
   501  		"foo":                               "",
   502  		"bar":                               "",
   503  		"company.xyz/node.cluster.x-k8s.io": "not-managed",
   504  		"gpu-node.cluster.x-k8s.io":         "not-managed",
   505  		"company.xyz/node-restriction.kubernetes.io": "not-managed",
   506  		"gpu-node-restriction.kubernetes.io":         "not-managed",
   507  	}
   508  	for k, v := range managedLabels {
   509  		allLabels[k] = v
   510  	}
   511  
   512  	g := NewWithT(t)
   513  	got := getManagedLabels(allLabels)
   514  	g.Expect(got).To(BeEquivalentTo(managedLabels))
   515  }
   516  
   517  func TestPatchNode(t *testing.T) {
   518  	testCases := []struct {
   519  		name                string
   520  		oldNode             *corev1.Node
   521  		newLabels           map[string]string
   522  		newAnnotations      map[string]string
   523  		expectedLabels      map[string]string
   524  		expectedAnnotations map[string]string
   525  		expectedTaints      []corev1.Taint
   526  	}{
   527  		{
   528  			name: "Check that patch works even if there are Status.Addresses with the same key",
   529  			oldNode: &corev1.Node{
   530  				ObjectMeta: metav1.ObjectMeta{
   531  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   532  				},
   533  				Status: corev1.NodeStatus{
   534  					Addresses: []corev1.NodeAddress{
   535  						{
   536  							Type:    corev1.NodeInternalIP,
   537  							Address: "1.1.1.1",
   538  						},
   539  						{
   540  							Type:    corev1.NodeInternalIP,
   541  							Address: "2.2.2.2",
   542  						},
   543  					},
   544  				},
   545  			},
   546  			newLabels:      map[string]string{"foo": "bar"},
   547  			expectedLabels: map[string]string{"foo": "bar"},
   548  			expectedAnnotations: map[string]string{
   549  				clusterv1.LabelsFromMachineAnnotation: "foo",
   550  			},
   551  			expectedTaints: []corev1.Taint{
   552  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   553  			},
   554  		},
   555  		// Labels (CAPI owns a subset of labels, everything else should be preserved)
   556  		{
   557  			name: "Existing labels should be preserved if there are no label from machines",
   558  			oldNode: &corev1.Node{
   559  				ObjectMeta: metav1.ObjectMeta{
   560  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   561  					Labels: map[string]string{
   562  						"not-managed-by-capi": "foo",
   563  					},
   564  				},
   565  			},
   566  			expectedLabels: map[string]string{
   567  				"not-managed-by-capi": "foo",
   568  			},
   569  			expectedTaints: []corev1.Taint{
   570  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   571  			},
   572  		},
   573  		{
   574  			name: "Add label must preserve existing labels",
   575  			oldNode: &corev1.Node{
   576  				ObjectMeta: metav1.ObjectMeta{
   577  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   578  					Labels: map[string]string{
   579  						"not-managed-by-capi": "foo",
   580  					},
   581  				},
   582  			},
   583  			newLabels: map[string]string{
   584  				"label-from-machine": "foo",
   585  			},
   586  			expectedLabels: map[string]string{
   587  				"not-managed-by-capi": "foo",
   588  				"label-from-machine":  "foo",
   589  			},
   590  			expectedAnnotations: map[string]string{
   591  				clusterv1.LabelsFromMachineAnnotation: "label-from-machine",
   592  			},
   593  			expectedTaints: []corev1.Taint{
   594  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   595  			},
   596  		},
   597  		{
   598  			name: "CAPI takes ownership of existing labels if they are set from machines",
   599  			oldNode: &corev1.Node{
   600  				ObjectMeta: metav1.ObjectMeta{
   601  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   602  					Labels: map[string]string{
   603  						clusterv1.NodeRoleLabelPrefix: "foo",
   604  					},
   605  				},
   606  			},
   607  			newLabels: map[string]string{
   608  				clusterv1.NodeRoleLabelPrefix: "control-plane",
   609  			},
   610  			expectedLabels: map[string]string{
   611  				clusterv1.NodeRoleLabelPrefix: "control-plane",
   612  			},
   613  			expectedAnnotations: map[string]string{
   614  				clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   615  			},
   616  			expectedTaints: []corev1.Taint{
   617  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   618  			},
   619  		},
   620  		{
   621  			name: "change a label previously set from machines",
   622  			oldNode: &corev1.Node{
   623  				ObjectMeta: metav1.ObjectMeta{
   624  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   625  					Labels: map[string]string{
   626  						clusterv1.NodeRoleLabelPrefix: "foo",
   627  					},
   628  					Annotations: map[string]string{
   629  						clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   630  					},
   631  				},
   632  			},
   633  			newLabels: map[string]string{
   634  				clusterv1.NodeRoleLabelPrefix: "control-plane",
   635  			},
   636  			expectedLabels: map[string]string{
   637  				clusterv1.NodeRoleLabelPrefix: "control-plane",
   638  			},
   639  			expectedAnnotations: map[string]string{
   640  				clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   641  			},
   642  			expectedTaints: []corev1.Taint{
   643  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   644  			},
   645  		},
   646  		{
   647  			name: "Delete a label previously set from machines",
   648  			oldNode: &corev1.Node{
   649  				ObjectMeta: metav1.ObjectMeta{
   650  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   651  					Labels: map[string]string{
   652  						clusterv1.NodeRoleLabelPrefix: "foo",
   653  						"not-managed-by-capi":         "foo",
   654  					},
   655  					Annotations: map[string]string{
   656  						clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   657  					},
   658  				},
   659  			},
   660  			expectedLabels: map[string]string{
   661  				"not-managed-by-capi": "foo",
   662  			},
   663  			expectedAnnotations: map[string]string{
   664  				clusterv1.LabelsFromMachineAnnotation: "",
   665  			},
   666  			expectedTaints: []corev1.Taint{
   667  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   668  			},
   669  		},
   670  		{
   671  			name: "Label previously set from machine, already removed out of band, annotation should be cleaned up",
   672  			oldNode: &corev1.Node{
   673  				ObjectMeta: metav1.ObjectMeta{
   674  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   675  					Annotations: map[string]string{
   676  						clusterv1.LabelsFromMachineAnnotation: clusterv1.NodeRoleLabelPrefix,
   677  					},
   678  				},
   679  			},
   680  			expectedAnnotations: map[string]string{
   681  				clusterv1.LabelsFromMachineAnnotation: "",
   682  			},
   683  			expectedTaints: []corev1.Taint{
   684  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   685  			},
   686  		},
   687  		// Add annotations (CAPI only enforces some annotations and never changes or removes them)
   688  		{
   689  			name: "Add CAPI annotations",
   690  			oldNode: &corev1.Node{
   691  				ObjectMeta: metav1.ObjectMeta{
   692  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   693  					Annotations: map[string]string{
   694  						"not-managed-by-capi": "foo",
   695  					},
   696  				},
   697  			},
   698  			newAnnotations: map[string]string{
   699  				clusterv1.ClusterNameAnnotation:      "foo",
   700  				clusterv1.ClusterNamespaceAnnotation: "bar",
   701  				clusterv1.MachineAnnotation:          "baz",
   702  			},
   703  			expectedAnnotations: map[string]string{
   704  				clusterv1.ClusterNameAnnotation:       "foo",
   705  				clusterv1.ClusterNamespaceAnnotation:  "bar",
   706  				clusterv1.MachineAnnotation:           "baz",
   707  				"not-managed-by-capi":                 "foo",
   708  				clusterv1.LabelsFromMachineAnnotation: "",
   709  			},
   710  			expectedTaints: []corev1.Taint{
   711  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   712  			},
   713  		},
   714  		// Taint (CAPI only remove one taint if it exists, other taints should be preserved)
   715  		{
   716  			name: "Removes NodeUninitializedTaint if present",
   717  			oldNode: &corev1.Node{
   718  				ObjectMeta: metav1.ObjectMeta{
   719  					Name: fmt.Sprintf("node-%s", util.RandomString(6)),
   720  				},
   721  				Spec: corev1.NodeSpec{
   722  					Taints: []corev1.Taint{
   723  						{
   724  							Key:    "node-role.kubernetes.io/control-plane",
   725  							Effect: corev1.TaintEffectNoSchedule,
   726  						},
   727  						clusterv1.NodeUninitializedTaint,
   728  					},
   729  				},
   730  			},
   731  			expectedAnnotations: map[string]string{
   732  				clusterv1.LabelsFromMachineAnnotation: "",
   733  			},
   734  			expectedTaints: []corev1.Taint{
   735  				{
   736  					Key:    "node-role.kubernetes.io/control-plane",
   737  					Effect: corev1.TaintEffectNoSchedule,
   738  				},
   739  				{Key: "node.kubernetes.io/not-ready", Effect: "NoSchedule"}, // Added by the API server
   740  			},
   741  		},
   742  	}
   743  
   744  	r := Reconciler{
   745  		Client:                    env,
   746  		UnstructuredCachingClient: env,
   747  	}
   748  	for _, tc := range testCases {
   749  		t.Run(tc.name, func(t *testing.T) {
   750  			g := NewWithT(t)
   751  			oldNode := tc.oldNode.DeepCopy()
   752  
   753  			g.Expect(env.Create(ctx, oldNode)).To(Succeed())
   754  			t.Cleanup(func() {
   755  				_ = env.Cleanup(ctx, oldNode)
   756  			})
   757  
   758  			err := r.patchNode(ctx, env, oldNode, tc.newLabels, tc.newAnnotations)
   759  			g.Expect(err).ToNot(HaveOccurred())
   760  
   761  			g.Eventually(func(g Gomega) {
   762  				gotNode := &corev1.Node{}
   763  				err = env.Get(ctx, client.ObjectKeyFromObject(oldNode), gotNode)
   764  				g.Expect(err).ToNot(HaveOccurred())
   765  
   766  				g.Expect(gotNode.Labels).To(BeComparableTo(tc.expectedLabels))
   767  				g.Expect(gotNode.Annotations).To(BeComparableTo(tc.expectedAnnotations))
   768  				g.Expect(gotNode.Spec.Taints).To(BeComparableTo(tc.expectedTaints))
   769  			}, 10*time.Second).Should(Succeed())
   770  		})
   771  	}
   772  }