sigs.k8s.io/cluster-api@v1.7.1/controllers/remote/cluster_cache_tracker_test.go (about)

     1  /*
     2  Copyright 2020 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package remote
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"testing"
    23  
    24  	"github.com/davecgh/go-spew/spew"
    25  	. "github.com/onsi/gomega"
    26  	corev1 "k8s.io/api/core/v1"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/apimachinery/pkg/types"
    29  	"k8s.io/client-go/kubernetes/scheme"
    30  	ctrl "sigs.k8s.io/controller-runtime"
    31  	"sigs.k8s.io/controller-runtime/pkg/client"
    32  	"sigs.k8s.io/controller-runtime/pkg/client/fake"
    33  	"sigs.k8s.io/controller-runtime/pkg/handler"
    34  	"sigs.k8s.io/controller-runtime/pkg/manager"
    35  	metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
    36  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    37  
    38  	clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
    39  	"sigs.k8s.io/cluster-api/util"
    40  	"sigs.k8s.io/cluster-api/util/conditions"
    41  )
    42  
    43  func mapper(_ context.Context, i client.Object) []reconcile.Request {
    44  	return []reconcile.Request{
    45  		{
    46  			NamespacedName: types.NamespacedName{
    47  				Namespace: i.GetNamespace(),
    48  				Name:      getMappedName(i.GetName()),
    49  			},
    50  		},
    51  	}
    52  }
    53  
    54  func getMappedName(name string) string {
    55  	return fmt.Sprintf("mapped-%s", name)
    56  }
    57  
    58  func TestClusterCacheTracker(t *testing.T) {
    59  	t.Run("watching", func(t *testing.T) {
    60  		var (
    61  			mgr        manager.Manager
    62  			mgrContext context.Context
    63  			mgrCancel  context.CancelFunc
    64  			cct        *ClusterCacheTracker
    65  			k8sClient  client.Client
    66  			c          *testController
    67  			w          Watcher
    68  			clusterA   *clusterv1.Cluster
    69  		)
    70  
    71  		setup := func(t *testing.T, g *WithT) *corev1.Namespace {
    72  			t.Helper()
    73  
    74  			t.Log("Setting up a new manager")
    75  			var err error
    76  			mgr, err = manager.New(env.Config, manager.Options{
    77  				Scheme: scheme.Scheme,
    78  				Metrics: metricsserver.Options{
    79  					BindAddress: "0",
    80  				},
    81  			})
    82  			g.Expect(err).ToNot(HaveOccurred())
    83  
    84  			c = &testController{
    85  				ch: make(chan string),
    86  			}
    87  			w, err = ctrl.NewControllerManagedBy(mgr).For(&clusterv1.MachineDeployment{}).Build(c)
    88  			g.Expect(err).ToNot(HaveOccurred())
    89  
    90  			mgrContext, mgrCancel = context.WithCancel(ctx)
    91  			t.Log("Starting the manager")
    92  			go func() {
    93  				g.Expect(mgr.Start(mgrContext)).To(Succeed())
    94  			}()
    95  			<-mgr.Elected()
    96  
    97  			k8sClient = mgr.GetClient()
    98  
    99  			t.Log("Setting up a ClusterCacheTracker")
   100  			cct, err = NewClusterCacheTracker(mgr, ClusterCacheTrackerOptions{
   101  				Indexes: []Index{NodeProviderIDIndex},
   102  			})
   103  			g.Expect(err).ToNot(HaveOccurred())
   104  
   105  			t.Log("Creating a namespace for the test")
   106  			ns, err := env.CreateNamespace(ctx, "cluster-cache-tracker-test")
   107  			g.Expect(err).ToNot(HaveOccurred())
   108  
   109  			t.Log("Creating a test cluster")
   110  			clusterA = &clusterv1.Cluster{
   111  				ObjectMeta: metav1.ObjectMeta{
   112  					Namespace: ns.GetName(),
   113  					Name:      "test-cluster",
   114  				},
   115  			}
   116  			g.Expect(k8sClient.Create(ctx, clusterA)).To(Succeed())
   117  			conditions.MarkTrue(clusterA, clusterv1.ControlPlaneInitializedCondition)
   118  			clusterA.Status.InfrastructureReady = true
   119  			g.Expect(k8sClient.Status().Update(ctx, clusterA)).To(Succeed())
   120  
   121  			t.Log("Creating a test cluster kubeconfig")
   122  			g.Expect(env.CreateKubeconfigSecret(ctx, clusterA)).To(Succeed())
   123  
   124  			return ns
   125  		}
   126  
   127  		teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace) {
   128  			t.Helper()
   129  			defer close(c.ch)
   130  
   131  			t.Log("Deleting any Secrets")
   132  			g.Expect(cleanupTestSecrets(ctx, k8sClient)).To(Succeed())
   133  			t.Log("Deleting any Clusters")
   134  			g.Expect(cleanupTestClusters(ctx, k8sClient)).To(Succeed())
   135  			g.Expect(<-c.ch).To(Equal(getMappedName(clusterA.Name)))
   136  			g.Consistently(c.ch).ShouldNot(Receive())
   137  			t.Log("Deleting Namespace")
   138  			g.Expect(env.Delete(ctx, ns)).To(Succeed())
   139  			t.Log("Stopping the manager")
   140  			mgrCancel()
   141  		}
   142  
   143  		t.Run("with the same name should succeed and not have duplicates", func(t *testing.T) {
   144  			g := NewWithT(t)
   145  			ns := setup(t, g)
   146  			defer teardown(t, g, ns)
   147  
   148  			t.Log("Creating the watch")
   149  			g.Expect(cct.Watch(ctx, WatchInput{
   150  				Name:         "watch1",
   151  				Cluster:      util.ObjectKey(clusterA),
   152  				Watcher:      w,
   153  				Kind:         &clusterv1.Cluster{},
   154  				EventHandler: handler.EnqueueRequestsFromMapFunc(mapper),
   155  			})).To(Succeed())
   156  
   157  			t.Log("Waiting to receive the watch notification")
   158  			g.Expect(<-c.ch).To(Equal(getMappedName(clusterA.Name)))
   159  
   160  			t.Log("Ensuring no additional watch notifications arrive")
   161  			g.Consistently(c.ch).ShouldNot(Receive())
   162  
   163  			t.Log("Updating the cluster")
   164  			clusterA.Annotations = map[string]string{
   165  				"update1": "1",
   166  			}
   167  			g.Expect(k8sClient.Update(ctx, clusterA)).To(Succeed())
   168  
   169  			t.Log("Waiting to receive the watch notification")
   170  			g.Expect(<-c.ch).To(Equal(getMappedName(clusterA.Name)))
   171  
   172  			t.Log("Ensuring no additional watch notifications arrive")
   173  			g.Consistently(c.ch).ShouldNot(Receive())
   174  
   175  			t.Log("Creating the same watch a second time")
   176  			g.Expect(cct.Watch(ctx, WatchInput{
   177  				Name:         "watch1",
   178  				Cluster:      util.ObjectKey(clusterA),
   179  				Watcher:      w,
   180  				Kind:         &clusterv1.Cluster{},
   181  				EventHandler: handler.EnqueueRequestsFromMapFunc(mapper),
   182  			})).To(Succeed())
   183  
   184  			t.Log("Ensuring no additional watch notifications arrive")
   185  			g.Consistently(c.ch).ShouldNot(Receive())
   186  
   187  			t.Log("Updating the cluster")
   188  			clusterA.Annotations["update1"] = "2"
   189  			g.Expect(k8sClient.Update(ctx, clusterA)).To(Succeed())
   190  
   191  			t.Log("Waiting to receive the watch notification")
   192  			g.Expect(<-c.ch).To(Equal(getMappedName(clusterA.Name)))
   193  
   194  			t.Log("Ensuring no additional watch notifications arrive")
   195  			g.Consistently(c.ch).ShouldNot(Receive())
   196  		})
   197  	})
   198  
   199  	t.Run("runningOnWorkloadCluster", func(t *testing.T) {
   200  		tests := []struct {
   201  			name                      string
   202  			currentControllerMetadata *metav1.ObjectMeta
   203  			clusterObjects            []client.Object
   204  			expected                  bool
   205  		}{
   206  			{
   207  				name: "should return true if the controller is running on the workload cluster",
   208  				currentControllerMetadata: &metav1.ObjectMeta{
   209  					Name:      "controller-pod",
   210  					Namespace: "controller-pod-namespace",
   211  					UID:       types.UID("controller-pod-uid"),
   212  				},
   213  				clusterObjects: []client.Object{
   214  					&corev1.Pod{
   215  						ObjectMeta: metav1.ObjectMeta{
   216  							Name:      "controller-pod",
   217  							Namespace: "controller-pod-namespace",
   218  							UID:       types.UID("controller-pod-uid"),
   219  						},
   220  					},
   221  				},
   222  				expected: true,
   223  			},
   224  			{
   225  				name: "should return false if the controller is not running on the workload cluster: name mismatch",
   226  				currentControllerMetadata: &metav1.ObjectMeta{
   227  					Name:      "controller-pod",
   228  					Namespace: "controller-pod-namespace",
   229  					UID:       types.UID("controller-pod-uid"),
   230  				},
   231  				clusterObjects: []client.Object{
   232  					&corev1.Pod{
   233  						ObjectMeta: metav1.ObjectMeta{
   234  							Name:      "controller-pod-mismatch",
   235  							Namespace: "controller-pod-namespace",
   236  							UID:       types.UID("controller-pod-uid"),
   237  						},
   238  					},
   239  				},
   240  				expected: false,
   241  			},
   242  			{
   243  				name: "should return false if the controller is not running on the workload cluster: namespace mismatch",
   244  				currentControllerMetadata: &metav1.ObjectMeta{
   245  					Name:      "controller-pod",
   246  					Namespace: "controller-pod-namespace",
   247  					UID:       types.UID("controller-pod-uid"),
   248  				},
   249  				clusterObjects: []client.Object{
   250  					&corev1.Pod{
   251  						ObjectMeta: metav1.ObjectMeta{
   252  							Name:      "controller-pod",
   253  							Namespace: "controller-pod-namespace-mismatch",
   254  							UID:       types.UID("controller-pod-uid"),
   255  						},
   256  					},
   257  				},
   258  				expected: false,
   259  			},
   260  			{
   261  				name: "should return false if the controller is not running on the workload cluster: uid mismatch",
   262  				currentControllerMetadata: &metav1.ObjectMeta{
   263  					Name:      "controller-pod",
   264  					Namespace: "controller-pod-namespace",
   265  					UID:       types.UID("controller-pod-uid"),
   266  				},
   267  				clusterObjects: []client.Object{
   268  					&corev1.Pod{
   269  						ObjectMeta: metav1.ObjectMeta{
   270  							Name:      "controller-pod",
   271  							Namespace: "controller-pod-namespace",
   272  							UID:       types.UID("controller-pod-uid-mismatch"),
   273  						},
   274  					},
   275  				},
   276  				expected: false,
   277  			},
   278  			{
   279  				name: "should return false if the controller is not running on the workload cluster: no pod in cluster",
   280  				currentControllerMetadata: &metav1.ObjectMeta{
   281  					Name:      "controller-pod",
   282  					Namespace: "controller-pod-namespace",
   283  					UID:       types.UID("controller-pod-uid"),
   284  				},
   285  				clusterObjects: []client.Object{},
   286  				expected:       false,
   287  			},
   288  			{
   289  				name:                      "should return false if the controller is not running on the workload cluster: no controller metadata",
   290  				currentControllerMetadata: nil,
   291  				clusterObjects: []client.Object{
   292  					&corev1.Pod{
   293  						ObjectMeta: metav1.ObjectMeta{
   294  							Name:      "controller-pod",
   295  							Namespace: "controller-pod-namespace",
   296  							UID:       types.UID("controller-pod-uid"),
   297  						},
   298  					},
   299  				},
   300  				expected: false,
   301  			},
   302  		}
   303  
   304  		for _, tt := range tests {
   305  			t.Run(tt.name, func(t *testing.T) {
   306  				g := NewWithT(t)
   307  
   308  				c := fake.NewClientBuilder().WithObjects(tt.clusterObjects...).Build()
   309  
   310  				cct := &ClusterCacheTracker{
   311  					controllerPodMetadata: tt.currentControllerMetadata,
   312  				}
   313  
   314  				found, err := cct.runningOnWorkloadCluster(ctx, c, client.ObjectKey{Name: "test-cluster", Namespace: "test-namespace"})
   315  				g.Expect(err).ToNot(HaveOccurred())
   316  				g.Expect(found).To(Equal(tt.expected))
   317  			})
   318  		}
   319  	})
   320  }
   321  
   322  type testController struct {
   323  	ch chan string
   324  }
   325  
   326  func (c *testController) Reconcile(ctx context.Context, req reconcile.Request) (reconcile.Result, error) {
   327  	spew.Dump(req)
   328  	select {
   329  	case <-ctx.Done():
   330  	case c.ch <- req.Name:
   331  	}
   332  	return ctrl.Result{}, nil
   333  }
   334  
   335  func cleanupTestSecrets(ctx context.Context, c client.Client) error {
   336  	secretList := &corev1.SecretList{}
   337  	if err := c.List(ctx, secretList); err != nil {
   338  		return err
   339  	}
   340  	for _, secret := range secretList.Items {
   341  		s := secret
   342  		if err := c.Delete(ctx, &s); err != nil {
   343  			return err
   344  		}
   345  	}
   346  	return nil
   347  }
   348  
   349  func cleanupTestClusters(ctx context.Context, c client.Client) error {
   350  	clusterList := &clusterv1.ClusterList{}
   351  	if err := c.List(ctx, clusterList); err != nil {
   352  		return err
   353  	}
   354  	for _, cluster := range clusterList.Items {
   355  		o := cluster
   356  		if err := c.Delete(ctx, &o); err != nil {
   357  			return err
   358  		}
   359  	}
   360  	return nil
   361  }