github.com/tilt-dev/tilt@v0.33.15-0.20240515162809-0a22ed45d8a0/internal/controllers/core/kubernetesdiscovery/reconciler_test.go (about)

     1  package kubernetesdiscovery
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"sort"
     7  	"strings"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/google/go-cmp/cmp"
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/require"
    14  	appsv1 "k8s.io/api/apps/v1"
    15  	v1 "k8s.io/api/core/v1"
    16  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    17  	"k8s.io/apimachinery/pkg/labels"
    18  	"k8s.io/apimachinery/pkg/runtime"
    19  	"k8s.io/apimachinery/pkg/types"
    20  	controllerruntime "sigs.k8s.io/controller-runtime"
    21  	"sigs.k8s.io/controller-runtime/pkg/reconcile"
    22  
    23  	"github.com/tilt-dev/tilt/internal/container"
    24  	"github.com/tilt-dev/tilt/internal/controllers/apis/cluster"
    25  	"github.com/tilt-dev/tilt/internal/controllers/fake"
    26  	"github.com/tilt-dev/tilt/internal/k8s"
    27  	"github.com/tilt-dev/tilt/internal/timecmp"
    28  	"github.com/tilt-dev/tilt/pkg/apis"
    29  	"github.com/tilt-dev/tilt/pkg/apis/core/v1alpha1"
    30  )
    31  
    32  const stdTimeout = time.Second
    33  
    34  type ancestorMap map[types.UID]types.UID
    35  type podNameMap map[types.UID]string
    36  
    37  func TestPodDiscoveryExactMatch(t *testing.T) {
    38  	f := newFixture(t)
    39  
    40  	pod := f.buildPod("pod-ns", "pod", nil, nil)
    41  
    42  	key := types.NamespacedName{Namespace: "some-ns", Name: "kd"}
    43  	kd := &v1alpha1.KubernetesDiscovery{
    44  		ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name},
    45  		Spec: v1alpha1.KubernetesDiscoverySpec{
    46  			Watches: []v1alpha1.KubernetesWatchRef{
    47  				{
    48  					UID:       string(pod.UID),
    49  					Namespace: pod.Namespace,
    50  					Name:      pod.Name,
    51  				},
    52  			},
    53  		},
    54  	}
    55  
    56  	f.Create(kd)
    57  	f.requireMonitorStarted(key)
    58  	// we should not have observed any pods yet
    59  	f.requireObservedPods(key, nil, nil)
    60  
    61  	kCli := f.clients.MustK8sClient(clusterNN(*kd))
    62  	kCli.UpsertPod(pod)
    63  
    64  	f.requireObservedPods(key, ancestorMap{pod.UID: pod.UID}, nil)
    65  }
    66  
    67  func TestPodDiscoveryAncestorMatch(t *testing.T) {
    68  	f := newFixture(t)
    69  
    70  	ns := k8s.Namespace("ns")
    71  	dep, rs := f.buildK8sDeployment(ns, "dep")
    72  
    73  	key := types.NamespacedName{Namespace: "some-ns", Name: "kd"}
    74  	kd := &v1alpha1.KubernetesDiscovery{
    75  		ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name},
    76  		Spec: v1alpha1.KubernetesDiscoverySpec{
    77  			Watches: []v1alpha1.KubernetesWatchRef{
    78  				{
    79  					UID:       string(rs.UID),
    80  					Namespace: ns.String(),
    81  					Name:      rs.Name,
    82  				},
    83  			},
    84  		},
    85  	}
    86  
    87  	f.injectK8sObjects(*kd, dep, rs)
    88  
    89  	f.Create(kd)
    90  	f.requireMonitorStarted(key)
    91  	// we should not have observed any pods yet
    92  	f.requireObservedPods(key, nil, nil)
    93  
    94  	pod := f.buildPod(ns, "pod", nil, rs)
    95  	f.injectK8sObjects(*kd, pod)
    96  
    97  	f.requireObservedPods(key, ancestorMap{pod.UID: rs.UID}, nil)
    98  
    99  	// Make sure the owner is filled in.
   100  	f.MustGet(key, kd)
   101  	assert.Equal(t, &v1alpha1.PodOwner{
   102  		Name:              "dep-rs",
   103  		APIVersion:        "apps/v1",
   104  		Kind:              "ReplicaSet",
   105  		CreationTimestamp: rs.CreationTimestamp,
   106  	}, kd.Status.Pods[0].Owner)
   107  
   108  	// update the spec, changing the UID
   109  	f.Get(key, kd)
   110  	kd.Spec.Watches[0].UID = "unknown-uid"
   111  	f.Update(kd)
   112  
   113  	// no pods should be seen now
   114  	f.requireObservedPods(key, nil, nil)
   115  }
   116  
   117  func TestPodDiscoveryPreexisting(t *testing.T) {
   118  	f := newFixture(t)
   119  	ns := k8s.Namespace("ns")
   120  
   121  	key := types.NamespacedName{Namespace: "some-ns", Name: "kd"}
   122  	kd := &v1alpha1.KubernetesDiscovery{
   123  		ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name},
   124  		Spec: v1alpha1.KubernetesDiscoverySpec{
   125  			Watches: []v1alpha1.KubernetesWatchRef{
   126  				{Namespace: ns.String()},
   127  			},
   128  		},
   129  	}
   130  	f.Create(kd)
   131  	f.requireMonitorStarted(key)
   132  	// we should not have observed any pods yet
   133  	f.requireObservedPods(key, nil, nil)
   134  
   135  	_, rs := f.buildK8sDeployment(ns, "dep")
   136  	pod := f.buildPod(ns, "pod", nil, rs)
   137  	// pod is deployed before it or its ancestors are ever referenced by spec
   138  	f.injectK8sObjects(*kd, pod)
   139  
   140  	// typically, the reconciler will see the Pod event BEFORE any client is able to create
   141  	// a spec that references it via ancestor UID and we still want those included on the status
   142  	f.MustGet(key, kd)
   143  	kd.Spec.Watches[0].UID = string(rs.UID)
   144  	f.Update(kd)
   145  
   146  	f.requireObservedPods(key, ancestorMap{pod.UID: rs.UID}, nil)
   147  }
   148  
   149  func TestPodDiscoveryLabelMatch(t *testing.T) {
   150  	f := newFixture(t)
   151  
   152  	ns := k8s.Namespace("ns")
   153  
   154  	knownDep, knownRS := f.buildK8sDeployment(ns, "known")
   155  
   156  	key := types.NamespacedName{Namespace: "some-ns", Name: "kd"}
   157  	kd := &v1alpha1.KubernetesDiscovery{
   158  		ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name},
   159  		Spec: v1alpha1.KubernetesDiscoverySpec{
   160  			Watches: []v1alpha1.KubernetesWatchRef{
   161  				{Namespace: ns.String()},
   162  				{Namespace: ns.String(), UID: string(knownRS.UID)},
   163  			},
   164  			ExtraSelectors: []metav1.LabelSelector{
   165  				*metav1.SetAsLabelSelector(labels.Set{"k1": "v1", "k2": "v2"}),
   166  				*metav1.SetAsLabelSelector(labels.Set{"k3": "v3"}),
   167  			},
   168  		},
   169  	}
   170  	f.injectK8sObjects(*kd, knownDep, knownRS)
   171  
   172  	f.Create(kd)
   173  	f.requireMonitorStarted(key)
   174  	// we should not have observed any pods yet
   175  	f.requireObservedPods(key, nil, nil)
   176  
   177  	pod1 := f.buildPod(ns, "pod1", labels.Set{"k1": "v1", "k2": "v2", "other": "other1"}, nil)
   178  	pod2 := f.buildPod(ns, "pod2", labels.Set{"k1": "v1", "other": "other2"}, nil)
   179  	_, unknownRS := f.buildK8sDeployment(ns, "unknown")
   180  	pod3 := f.buildPod(ns, "pod3", labels.Set{"k3": "v3"}, unknownRS)
   181  	pod4 := f.buildPod(ns, "pod4", labels.Set{"k3": "v3"}, knownRS)
   182  	f.injectK8sObjects(*kd, pod1, pod2, pod3, pod4)
   183  
   184  	// pod1 matches on labels and doesn't have any associated Deployment
   185  	// pod2 does NOT match on labels - it must match ALL labels from a given set (it's missing k2:v2)
   186  	// pod3 matches on labels and has a Deployment (that's NOT watched by this spec)
   187  	// pod4 matches on a known ancestor AND labels but ancestor should take precedence
   188  	f.requireObservedPods(key, ancestorMap{pod1.UID: "", pod3.UID: "", pod4.UID: knownRS.UID}, nil)
   189  
   190  	// change the selectors around
   191  	f.Get(key, kd)
   192  	kd.Spec.ExtraSelectors[0] = *metav1.SetAsLabelSelector(labels.Set{"other": "other2"})
   193  	kd.Spec.ExtraSelectors[1] = *metav1.SetAsLabelSelector(labels.Set{"k3": "v4"})
   194  	f.Update(kd)
   195  
   196  	// pod1 no longer matches
   197  	// pod2 matches on labels
   198  	// pod3 no longer matches
   199  	// pod4 does NOT match on labels anymore should STILL be seen because it has a watched ancestor UID
   200  	f.requireObservedPods(key, ancestorMap{pod2.UID: "", pod4.UID: knownRS.UID}, nil)
   201  }
   202  
   203  func TestPodDiscoveryDuplicates(t *testing.T) {
   204  	f := newFixture(t)
   205  
   206  	ns := k8s.Namespace("ns")
   207  
   208  	sharedDep, sharedRS := f.buildK8sDeployment(ns, "known")
   209  	preExistingPod := f.buildPod(ns, "preexisting", nil, sharedRS)
   210  
   211  	key1 := types.NamespacedName{Namespace: "some-ns", Name: "kd1"}
   212  	kd1 := &v1alpha1.KubernetesDiscovery{
   213  		ObjectMeta: metav1.ObjectMeta{Namespace: key1.Namespace, Name: key1.Name},
   214  		Spec: v1alpha1.KubernetesDiscoverySpec{
   215  			Watches: []v1alpha1.KubernetesWatchRef{
   216  				{Namespace: ns.String()},
   217  				{Namespace: ns.String(), UID: string(sharedRS.UID)},
   218  			},
   219  			ExtraSelectors: []metav1.LabelSelector{
   220  				*metav1.SetAsLabelSelector(labels.Set{"k": "v"}),
   221  			},
   222  		},
   223  	}
   224  	f.injectK8sObjects(*kd1, preExistingPod, sharedDep, sharedRS)
   225  	f.Create(kd1)
   226  
   227  	kd2Dep, kd2RS := f.buildK8sDeployment(ns, "kd2only")
   228  
   229  	key2 := types.NamespacedName{Namespace: "some-ns", Name: "kd2"}
   230  	kd2 := &v1alpha1.KubernetesDiscovery{
   231  		ObjectMeta: metav1.ObjectMeta{Namespace: key2.Namespace, Name: key2.Name},
   232  		Spec: v1alpha1.KubernetesDiscoverySpec{
   233  			Watches: []v1alpha1.KubernetesWatchRef{
   234  				{Namespace: ns.String()},
   235  				{Namespace: ns.String(), UID: string(sharedRS.UID)},
   236  				{Namespace: ns.String(), UID: string(kd2RS.UID)},
   237  			},
   238  			ExtraSelectors: []metav1.LabelSelector{
   239  				*metav1.SetAsLabelSelector(labels.Set{"k": "v"}),
   240  			},
   241  		},
   242  	}
   243  	f.injectK8sObjects(*kd2, kd2Dep, kd2RS)
   244  	f.Create(kd2)
   245  
   246  	for _, k := range []types.NamespacedName{key1, key2} {
   247  		f.requireMonitorStarted(k)
   248  		// initially, both should have seen the pre-existing pod since they both watch the same RS
   249  		f.requireObservedPods(k, ancestorMap{preExistingPod.UID: sharedRS.UID}, nil)
   250  	}
   251  
   252  	// pod1 matches on labels for both kd/kd2 and doesn't have any associated Deployment
   253  	pod1 := f.buildPod(ns, "pod1", labels.Set{"k": "v"}, nil)
   254  
   255  	// pod2 is another pod for the known, shared RS
   256  	pod2 := f.buildPod(ns, "pod2", nil, nil)
   257  
   258  	// pod3 is for the replicaset only known by KD2 but has labels that match KD1 as well
   259  	pod3 := f.buildPod(ns, "pod3", labels.Set{"k": "v"}, kd2RS)
   260  
   261  	f.injectK8sObjects(*kd1, pod1, pod2, pod3)
   262  
   263  	f.requireObservedPods(key1, ancestorMap{
   264  		preExistingPod.UID: sharedRS.UID,
   265  		pod1.UID:           "", // label match
   266  		pod3.UID:           "", // label match
   267  	}, nil)
   268  
   269  	f.requireObservedPods(key2, ancestorMap{
   270  		preExistingPod.UID: sharedRS.UID,
   271  		pod1.UID:           "",        // label match
   272  		pod3.UID:           kd2RS.UID, // <-- unlike KD1, known RS!
   273  	}, nil)
   274  }
   275  
   276  func TestReconcileManagesPodLogStream(t *testing.T) {
   277  	f := newFixture(t)
   278  
   279  	ns := k8s.Namespace("ns")
   280  	pod1 := f.buildPod(ns, "pod1", nil, nil)
   281  	pod2 := f.buildPod(ns, "pod2", nil, nil)
   282  
   283  	key := types.NamespacedName{Namespace: "some-ns", Name: "kd"}
   284  	sinceTime := apis.NewTime(time.Now())
   285  	podLogStreamTemplateSpec := &v1alpha1.PodLogStreamTemplateSpec{
   286  		SinceTime: &sinceTime,
   287  		IgnoreContainers: []string{
   288  			string(container.IstioInitContainerName),
   289  			string(container.IstioSidecarContainerName),
   290  		},
   291  	}
   292  
   293  	kd := &v1alpha1.KubernetesDiscovery{
   294  		ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name},
   295  		Spec: v1alpha1.KubernetesDiscoverySpec{
   296  			Watches: []v1alpha1.KubernetesWatchRef{
   297  				{
   298  					UID:       string(pod1.UID),
   299  					Namespace: pod1.Namespace,
   300  					Name:      pod1.Name,
   301  				},
   302  				{
   303  					UID:       string(pod2.UID),
   304  					Namespace: pod2.Namespace,
   305  					Name:      pod2.Name,
   306  				},
   307  			},
   308  			PodLogStreamTemplateSpec: podLogStreamTemplateSpec,
   309  		},
   310  	}
   311  
   312  	f.injectK8sObjects(*kd, pod1, pod2)
   313  
   314  	f.Create(kd)
   315  	// make sure the pods have been seen so that it knows what to create resources for
   316  	f.requireObservedPods(key, ancestorMap{pod1.UID: pod1.UID, pod2.UID: pod2.UID}, nil)
   317  
   318  	// in reality, once the pods are observed, a status update is triggered, which would
   319  	// result in a reconcile; but the test is not running under the manager, so an update
   320  	// doesn't implicitly trigger a reconcile and we have to manually do it
   321  	f.MustReconcile(key)
   322  
   323  	var podLogStreams v1alpha1.PodLogStreamList
   324  	f.List(&podLogStreams)
   325  	require.Equal(t, 2, len(podLogStreams.Items), "Incorrect number of PodLogStream objects")
   326  
   327  	sort.Slice(podLogStreams.Items, func(i, j int) bool {
   328  		return podLogStreams.Items[i].Spec.Pod < podLogStreams.Items[j].Spec.Pod
   329  	})
   330  
   331  	assert.Equal(t, "pod1", podLogStreams.Items[0].Spec.Pod)
   332  	assert.Equal(t, "pod2", podLogStreams.Items[1].Spec.Pod)
   333  
   334  	for _, pls := range podLogStreams.Items {
   335  		assert.Equal(t, ns.String(), pls.Spec.Namespace)
   336  
   337  		timecmp.AssertTimeEqual(t, sinceTime, pls.Spec.SinceTime)
   338  
   339  		assert.ElementsMatch(t,
   340  			[]string{container.IstioInitContainerName.String(), container.IstioSidecarContainerName.String()},
   341  			pls.Spec.IgnoreContainers)
   342  
   343  		assert.Empty(t, pls.Spec.OnlyContainers)
   344  	}
   345  
   346  	// simulate a pod delete and ensure that after it's observed + reconciled, the PLS is also deleted
   347  	kCli := f.clients.MustK8sClient(clusterNN(*kd))
   348  	kCli.EmitPodDelete(pod1)
   349  	f.requireObservedPods(key, ancestorMap{pod2.UID: pod2.UID}, nil)
   350  	f.MustReconcile(key)
   351  	f.List(&podLogStreams)
   352  	require.Equal(t, 1, len(podLogStreams.Items), "Incorrect number of PodLogStream objects")
   353  	assert.Equal(t, "pod2", podLogStreams.Items[0].Spec.Pod)
   354  
   355  	// simulate the PodLogStream being deleted by an external force - chaos!
   356  	f.Delete(&podLogStreams.Items[0])
   357  	f.List(&podLogStreams)
   358  	assert.Empty(t, podLogStreams.Items)
   359  	// similar to before, in reality, the reconciler watches the objects it owns, so the manager would
   360  	// normally call reconcile automatically, but for the test we have to manually simulate it
   361  	f.MustReconcile(key)
   362  	f.List(&podLogStreams)
   363  	require.Equal(t, 1, len(podLogStreams.Items), "Incorrect number of PodLogStream objects")
   364  	assert.Equal(t, "pod2", podLogStreams.Items[0].Spec.Pod)
   365  }
   366  
   367  func TestReconcileManagesPortForward(t *testing.T) {
   368  	f := newFixture(t)
   369  
   370  	ns := k8s.Namespace("ns")
   371  	pod := f.buildPod(ns, "pod", nil, nil)
   372  	pod.Spec.Containers = []v1.Container{
   373  		{
   374  			Name: "container",
   375  			Ports: []v1.ContainerPort{
   376  				{ContainerPort: 7890, Protocol: v1.ProtocolTCP},
   377  			},
   378  		},
   379  	}
   380  	pod.Status.ContainerStatuses = []v1.ContainerStatus{{Name: "container"}}
   381  
   382  	kd := &v1alpha1.KubernetesDiscovery{
   383  		ObjectMeta: metav1.ObjectMeta{
   384  			Namespace: "some-ns",
   385  			Name:      "ks",
   386  			Annotations: map[string]string{
   387  				v1alpha1.AnnotationManifest: "my-resource",
   388  			},
   389  		},
   390  		Spec: v1alpha1.KubernetesDiscoverySpec{
   391  			Watches: []v1alpha1.KubernetesWatchRef{
   392  				{
   393  					UID:       string(pod.UID),
   394  					Namespace: pod.Namespace,
   395  					Name:      pod.Name,
   396  				},
   397  			},
   398  			PortForwardTemplateSpec: &v1alpha1.PortForwardTemplateSpec{
   399  				Forwards: []v1alpha1.Forward{{LocalPort: 1234}},
   400  			},
   401  		},
   402  	}
   403  	key := apis.Key(kd)
   404  
   405  	f.injectK8sObjects(*kd, pod)
   406  
   407  	f.Create(kd)
   408  	// make sure the pods have been seen so that it knows what to create resources for
   409  	f.requireObservedPods(key, ancestorMap{pod.UID: pod.UID}, nil)
   410  
   411  	// in reality, once the pods are observed, a status update is triggered, which would
   412  	// result in a reconcile; but the test is not running under the manager, so an update
   413  	// doesn't implicitly trigger a reconcile and we have to manually do it
   414  	f.MustReconcile(key)
   415  
   416  	var portForwards v1alpha1.PortForwardList
   417  	f.List(&portForwards)
   418  	require.Len(t, portForwards.Items, 1)
   419  	if assert.Len(t, portForwards.Items[0].Spec.Forwards, 1) {
   420  		fwd := portForwards.Items[0].Spec.Forwards[0]
   421  		assert.Equal(t, int32(1234), fwd.LocalPort)
   422  		assert.Equal(t, int32(7890), fwd.ContainerPort)
   423  	}
   424  
   425  	f.AssertStdOutContains(
   426  		`k8s_resource(name='my-resource', port_forwards='1234') currently maps localhost:1234 to port 7890 in your container.
   427  A future version of Tilt will change this default and will map localhost:1234 to port 1234 in your container.
   428  To keep your project working, change your Tiltfile to k8s_resource(name='my-resource', port_forwards='1234:7890')`)
   429  
   430  	// simulate a pod delete and ensure that after it's observed + reconciled, the PF is also deleted
   431  	kCli := f.clients.MustK8sClient(clusterNN(*kd))
   432  	kCli.EmitPodDelete(pod)
   433  	f.requireObservedPods(key, nil, nil)
   434  	f.MustReconcile(key)
   435  	f.List(&portForwards)
   436  	require.Empty(t, portForwards.Items)
   437  }
   438  
   439  func TestKubernetesDiscoveryIndexing(t *testing.T) {
   440  	f := newFixture(t)
   441  
   442  	pod := f.buildPod("pod-ns", "pod", nil, nil)
   443  
   444  	key := types.NamespacedName{Namespace: "some-ns", Name: "kd"}
   445  	kd := &v1alpha1.KubernetesDiscovery{
   446  		ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name},
   447  		Spec: v1alpha1.KubernetesDiscoverySpec{
   448  			Cluster: "my-cluster",
   449  			Watches: []v1alpha1.KubernetesWatchRef{
   450  				{
   451  					UID:       string(pod.UID),
   452  					Namespace: pod.Namespace,
   453  					Name:      pod.Name,
   454  				},
   455  			},
   456  		},
   457  	}
   458  
   459  	// fixture will automatically create a cluster object
   460  	f.Create(kd)
   461  
   462  	reqs := f.r.indexer.Enqueue(context.Background(),
   463  		&v1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "my-cluster"}})
   464  	assert.ElementsMatch(t, []reconcile.Request{
   465  		{NamespacedName: types.NamespacedName{Namespace: "some-ns", Name: "kd"}},
   466  	}, reqs)
   467  }
   468  
   469  func TestKubernetesDiscoveryClusterError(t *testing.T) {
   470  	f := newFixture(t)
   471  
   472  	pod := f.buildPod("pod-ns", "pod", nil, nil)
   473  
   474  	key := types.NamespacedName{Namespace: "some-ns", Name: "kd"}
   475  	kd := &v1alpha1.KubernetesDiscovery{
   476  		ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name},
   477  		Spec: v1alpha1.KubernetesDiscoverySpec{
   478  			Cluster: "my-cluster",
   479  			Watches: []v1alpha1.KubernetesWatchRef{
   480  				{
   481  					UID:       string(pod.UID),
   482  					Namespace: pod.Namespace,
   483  					Name:      pod.Name,
   484  				},
   485  			},
   486  		},
   487  	}
   488  
   489  	// cannot use normal fixture create flow because we want to intentionally
   490  	// set things up in a bad state
   491  	f.clients.EnsureK8sClusterError(f.ctx, clusterNN(*kd), errors.New("oh no"))
   492  	require.NoError(t, f.Client.Create(f.Context(), kd), "Could not create KubernetesDiscovery")
   493  	f.MustReconcile(key)
   494  	f.MustGet(key, kd)
   495  
   496  	require.NotNil(t, kd.Status.Waiting, "Waiting should be present")
   497  	require.Equal(t, "ClusterUnavailable", kd.Status.Waiting.Reason)
   498  	require.Zero(t, kd.Status.MonitorStartTime, "MonitorStartTime should not be populated")
   499  	require.Nil(t, kd.Status.Running, "Running should not be populated")
   500  }
   501  
   502  func TestClusterChange(t *testing.T) {
   503  	f := newFixture(t)
   504  
   505  	kd1ClusterA := &v1alpha1.KubernetesDiscovery{
   506  		ObjectMeta: metav1.ObjectMeta{Namespace: "some-ns", Name: "kd1ClusterA"},
   507  		Spec: v1alpha1.KubernetesDiscoverySpec{
   508  			Watches: []v1alpha1.KubernetesWatchRef{
   509  				{
   510  					UID:       "pod1-uid",
   511  					Namespace: "pod-ns",
   512  				},
   513  			},
   514  			Cluster: "clusterA",
   515  		},
   516  	}
   517  	kd2ClusterA := kd1ClusterA.DeepCopy()
   518  	kd2ClusterA.Name = "kd2ClusterA"
   519  
   520  	kd3ClusterB := kd1ClusterA.DeepCopy()
   521  	kd3ClusterB.Name = "kd3ClusterB"
   522  	kd3ClusterB.Spec.Cluster = "clusterB"
   523  
   524  	// set up initial state
   525  	for _, kd := range []*v1alpha1.KubernetesDiscovery{kd1ClusterA, kd2ClusterA, kd3ClusterB} {
   526  		f.Create(kd)
   527  		key := apis.Key(kd)
   528  		f.requireMonitorStarted(key)
   529  		// we should not have observed any pods yet
   530  		f.requireObservedPods(key, nil, nil)
   531  	}
   532  
   533  	const pod1UID types.UID = "pod1-uid"
   534  	const pod2UID types.UID = "pod2-uid"
   535  	kCliClusterA := f.clients.MustK8sClient(clusterNN(*kd1ClusterA))
   536  
   537  	pod1ClusterA := f.buildPod("pod-ns", "pod1ClusterA", nil, nil)
   538  	pod1ClusterA.UID = pod1UID
   539  	kCliClusterA.UpsertPod(pod1ClusterA)
   540  
   541  	// this will be matched on later
   542  	pod2ClusterA := f.buildPod("pod-ns", "pod2ClusterA", labels.Set{"foo": "bar"}, nil)
   543  	pod2ClusterA.UID = pod2UID
   544  	kCliClusterA.UpsertPod(pod2ClusterA)
   545  
   546  	kCliClusterB := f.clients.MustK8sClient(clusterNN(*kd3ClusterB))
   547  	// N.B. we intentionally use the same UIDs across both clusters!
   548  	pod1ClusterB := pod1ClusterA.DeepCopy()
   549  	pod1ClusterB.Name = "pod1ClusterB"
   550  	kCliClusterB.UpsertPod(pod1ClusterB)
   551  
   552  	pod2ClusterB := pod2ClusterA.DeepCopy()
   553  	pod2ClusterB.Name = "pod2ClusterB"
   554  	kCliClusterB.UpsertPod(pod2ClusterB)
   555  
   556  	f.requireObservedPods(apis.Key(kd1ClusterA), ancestorMap{pod1UID: pod1UID}, podNameMap{pod1UID: "pod1ClusterA"})
   557  	f.requireObservedPods(apis.Key(kd2ClusterA), ancestorMap{pod1UID: pod1UID}, podNameMap{pod1UID: "pod1ClusterA"})
   558  	f.requireObservedPods(apis.Key(kd3ClusterB), ancestorMap{pod1UID: pod1UID}, podNameMap{pod1UID: "pod1ClusterB"})
   559  
   560  	// create a NEW client for A
   561  	kCliClusterA2 := k8s.NewFakeK8sClient(t)
   562  	connectedAtA2 := f.clients.SetK8sClient(clusterNN(*kd1ClusterA), kCliClusterA2)
   563  
   564  	// create copies of the old pods with slightly different names so we can
   565  	// be sure we received the new ones
   566  	pod1ClusterA2 := pod1ClusterA.DeepCopy()
   567  	pod1ClusterA2.Name = "pod1ClusterA-2"
   568  	kCliClusterA2.UpsertPod(pod1ClusterA2)
   569  
   570  	pod2ClusterA2 := pod2ClusterA.DeepCopy()
   571  	pod2ClusterA2.Name = "pod2ClusterA-2"
   572  	kCliClusterA2.UpsertPod(pod2ClusterA2)
   573  
   574  	// reconcile should succeed even though client is stale (and cannot be
   575  	// refreshed due to lack of Cluster obj update, simulating a stale informer
   576  	// cache) because the KD spec has not changed, so no watches will be (re)setup
   577  	f.MustReconcile(apis.Key(kd1ClusterA))
   578  
   579  	// on the other hand, no watches can be (re)setup, e.g. if spec changes
   580  	f.MustGet(apis.Key(kd2ClusterA), kd2ClusterA)
   581  	kd2ClusterA.Spec.ExtraSelectors = append(kd2ClusterA.Spec.ExtraSelectors,
   582  		metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}})
   583  
   584  	require.NoError(f.t, f.Client.Update(f.ctx, kd2ClusterA))
   585  	f.MustReconcile(apis.Key(kd2ClusterA))
   586  	f.MustGet(apis.Key(kd2ClusterA), kd2ClusterA)
   587  	require.NotNil(t, kd2ClusterA.Status.Waiting, "kd2clusterA should be in waiting state")
   588  	require.Equal(t, "ClusterUnavailable", kd2ClusterA.Status.Waiting.Reason)
   589  
   590  	// cluster B can reconcile even if it has spec changes since it's using a
   591  	// different cluster
   592  	f.MustGet(apis.Key(kd3ClusterB), kd3ClusterB)
   593  	kd3ClusterB.Spec.ExtraSelectors = append(kd3ClusterB.Spec.ExtraSelectors,
   594  		metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}})
   595  	f.Update(kd3ClusterB)
   596  	f.MustReconcile(apis.Key(kd3ClusterB))
   597  
   598  	// write the updated cluster obj to apiserver
   599  	clusterA := f.getCluster(clusterNN(*kd1ClusterA))
   600  	clusterA.Status.ConnectedAt = connectedAtA2.DeepCopy()
   601  	require.NoError(f.t, f.Client.Status().Update(f.ctx, clusterA))
   602  
   603  	// kd1 still only matches by UID but should see the Pod from the new cluster now
   604  	f.MustReconcile(apis.Key(kd1ClusterA))
   605  	f.requireObservedPods(apis.Key(kd1ClusterA), ancestorMap{pod1UID: pod1UID}, podNameMap{pod1UID: "pod1ClusterA-2"})
   606  
   607  	// kd2 will now have 2 Pods - one by UID and one by label, but both from the new cluster
   608  	// (note: because pod2 matches by label, there's no ancestor UID)
   609  	f.MustReconcile(apis.Key(kd2ClusterA))
   610  	f.requireObservedPods(apis.Key(kd2ClusterA),
   611  		ancestorMap{pod1UID: pod1UID, pod2UID: ""},
   612  		podNameMap{pod1UID: "pod1ClusterA-2", pod2UID: "pod2ClusterA-2"})
   613  
   614  	// kd3 will now have 3 Pods - one by UID and one by label, both from its original cluster
   615  	// (note: because pod2 matches by label, there's no ancestor UID)
   616  	f.MustReconcile(apis.Key(kd3ClusterB))
   617  	f.requireObservedPods(apis.Key(kd3ClusterB),
   618  		ancestorMap{pod1UID: pod1UID, pod2UID: ""},
   619  		podNameMap{pod1UID: "pod1ClusterB", pod2UID: "pod2ClusterB"})
   620  }
   621  
   622  func TestHangOntoDeletedPodsWhenNoSibling(t *testing.T) {
   623  	f := newFixture(t)
   624  
   625  	ns := k8s.Namespace("ns")
   626  	dep, rs := f.buildK8sDeployment(ns, "dep")
   627  
   628  	key := types.NamespacedName{Namespace: "some-ns", Name: "kd"}
   629  	kd := &v1alpha1.KubernetesDiscovery{
   630  		ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name},
   631  		Spec: v1alpha1.KubernetesDiscoverySpec{
   632  			Watches: []v1alpha1.KubernetesWatchRef{
   633  				{
   634  					UID:       string(rs.UID),
   635  					Namespace: ns.String(),
   636  					Name:      rs.Name,
   637  				},
   638  			},
   639  		},
   640  	}
   641  
   642  	f.injectK8sObjects(*kd, dep, rs)
   643  
   644  	f.Create(kd)
   645  	f.requireMonitorStarted(key)
   646  	// we should not have observed any pods yet
   647  	f.requireObservedPods(key, nil, nil)
   648  
   649  	podA := f.buildPod(ns, "pod-a", nil, rs)
   650  	podA.Status.Phase = v1.PodSucceeded
   651  	f.injectK8sObjects(*kd, podA)
   652  
   653  	f.requireObservedPods(key, ancestorMap{podA.UID: rs.UID}, nil)
   654  
   655  	kCli := f.clients.MustK8sClient(clusterNN(*kd))
   656  	kCli.EmitPodDelete(podA)
   657  
   658  	f.requireObservedPods(key, ancestorMap{podA.UID: rs.UID}, nil)
   659  
   660  	podB := f.buildPod(ns, "pod-b", nil, rs)
   661  	podB.Status.Phase = v1.PodRunning
   662  	f.injectK8sObjects(*kd, podB)
   663  	f.requireObservedPods(key, ancestorMap{podB.UID: rs.UID}, nil)
   664  }
   665  
   666  func TestNoHangOntoDeletedPodsWhenSiblingExists(t *testing.T) {
   667  	f := newFixture(t)
   668  
   669  	ns := k8s.Namespace("ns")
   670  	dep, rs := f.buildK8sDeployment(ns, "dep")
   671  
   672  	key := types.NamespacedName{Namespace: "some-ns", Name: "kd"}
   673  	kd := &v1alpha1.KubernetesDiscovery{
   674  		ObjectMeta: metav1.ObjectMeta{Namespace: key.Namespace, Name: key.Name},
   675  		Spec: v1alpha1.KubernetesDiscoverySpec{
   676  			Watches: []v1alpha1.KubernetesWatchRef{
   677  				{
   678  					UID:       string(rs.UID),
   679  					Namespace: ns.String(),
   680  					Name:      rs.Name,
   681  				},
   682  			},
   683  		},
   684  	}
   685  
   686  	f.injectK8sObjects(*kd, dep, rs)
   687  
   688  	f.Create(kd)
   689  	f.requireMonitorStarted(key)
   690  	// we should not have observed any pods yet
   691  	f.requireObservedPods(key, nil, nil)
   692  
   693  	podA := f.buildPod(ns, "pod-a", nil, rs)
   694  	podA.Status.Phase = v1.PodSucceeded
   695  	f.injectK8sObjects(*kd, podA)
   696  
   697  	f.requireObservedPods(key, ancestorMap{podA.UID: rs.UID}, nil)
   698  
   699  	podB := f.buildPod(ns, "pod-b", nil, rs)
   700  	podB.Status.Phase = v1.PodRunning
   701  	f.injectK8sObjects(*kd, podB)
   702  
   703  	f.requireObservedPods(key, ancestorMap{podA.UID: rs.UID, podB.UID: rs.UID}, nil)
   704  
   705  	kCli := f.clients.MustK8sClient(clusterNN(*kd))
   706  	kCli.EmitPodDelete(podA)
   707  
   708  	f.requireObservedPods(key, ancestorMap{podB.UID: rs.UID}, nil)
   709  }
   710  
   711  type fixture struct {
   712  	*fake.ControllerFixture
   713  	t       *testing.T
   714  	r       *Reconciler
   715  	ctx     context.Context
   716  	clients *cluster.FakeClientProvider
   717  }
   718  
   719  func newFixture(t *testing.T) *fixture {
   720  	rd := NewContainerRestartDetector()
   721  	cfb := fake.NewControllerFixtureBuilder(t)
   722  	clients := cluster.NewFakeClientProvider(t, cfb.Client)
   723  	pw := NewReconciler(cfb.Client, cfb.Scheme(), clients, rd, cfb.Store)
   724  
   725  	ret := &fixture{
   726  		ControllerFixture: cfb.WithRequeuer(pw.requeuer).Build(pw),
   727  		r:                 pw,
   728  		ctx:               cfb.Context(),
   729  		t:                 t,
   730  		clients:           clients,
   731  	}
   732  	return ret
   733  }
   734  
   735  func (f *fixture) requireMonitorStarted(key types.NamespacedName) {
   736  	f.t.Helper()
   737  	var desc strings.Builder
   738  	f.requireState(key, func(kd *v1alpha1.KubernetesDiscovery) bool {
   739  		desc.Reset()
   740  		if kd == nil {
   741  			desc.WriteString("object does not exist in apiserver")
   742  			return false
   743  		}
   744  		if kd.Status.MonitorStartTime.IsZero() {
   745  			desc.WriteString("monitor start time is zero")
   746  			return false
   747  		}
   748  		return true
   749  	}, "Monitor not started for key[%s]: %s", key, &desc)
   750  }
   751  
   752  func (f *fixture) requireObservedPods(key types.NamespacedName, expectedAncestors ancestorMap, expectedNames podNameMap) {
   753  	f.t.Helper()
   754  
   755  	if expectedAncestors == nil {
   756  		// just for easier comparison since nil != empty map
   757  		expectedAncestors = ancestorMap{}
   758  	}
   759  
   760  	var desc strings.Builder
   761  	f.requireState(key, func(kd *v1alpha1.KubernetesDiscovery) bool {
   762  		desc.Reset()
   763  		if kd == nil {
   764  			desc.WriteString("object does not exist in apiserver")
   765  			return false
   766  		}
   767  		actualAncestors := make(ancestorMap)
   768  		actualNames := make(podNameMap)
   769  		for _, p := range kd.Status.Pods {
   770  			podUID := types.UID(p.UID)
   771  			actualAncestors[podUID] = types.UID(p.AncestorUID)
   772  			actualNames[podUID] = p.Name
   773  		}
   774  
   775  		if diff := cmp.Diff(expectedAncestors, actualAncestors); diff != "" {
   776  			desc.WriteString("\n")
   777  			desc.WriteString(diff)
   778  			return false
   779  		}
   780  
   781  		// expectedNames are optional - we really care about UIDs but in some
   782  		// cases it's useful to check names for multi-cluster cases
   783  		if expectedNames != nil {
   784  			if diff := cmp.Diff(expectedNames, actualNames); diff != "" {
   785  				desc.WriteString("\n")
   786  				desc.WriteString(diff)
   787  				return false
   788  			}
   789  		}
   790  
   791  		return true
   792  	}, "Expected Pods were not observed for key[%s]: %s", key, &desc)
   793  }
   794  
   795  func (f *fixture) requireState(key types.NamespacedName, cond func(kd *v1alpha1.KubernetesDiscovery) bool, msg string, args ...interface{}) {
   796  	f.t.Helper()
   797  	require.Eventuallyf(f.t, func() bool {
   798  		var kd v1alpha1.KubernetesDiscovery
   799  		if !f.Get(key, &kd) {
   800  			return cond(nil)
   801  		}
   802  		return cond(&kd)
   803  	}, stdTimeout, 20*time.Millisecond, msg, args...)
   804  }
   805  
   806  // buildK8sDeployment creates fake Deployment + associated ReplicaSet objects.
   807  func (f *fixture) buildK8sDeployment(namespace k8s.Namespace, name string) (*appsv1.Deployment, *appsv1.ReplicaSet) {
   808  	d := &appsv1.Deployment{
   809  		ObjectMeta: metav1.ObjectMeta{
   810  			UID:               types.UID(name + "-uid"),
   811  			Namespace:         namespace.String(),
   812  			Name:              name,
   813  			CreationTimestamp: apis.Now(),
   814  		},
   815  	}
   816  	rsName := name + "-rs"
   817  	rs := &appsv1.ReplicaSet{
   818  		ObjectMeta: metav1.ObjectMeta{
   819  			UID:               types.UID(rsName + "-uid"),
   820  			Namespace:         namespace.String(),
   821  			Name:              rsName,
   822  			CreationTimestamp: apis.Now(),
   823  			OwnerReferences:   []metav1.OwnerReference{k8s.RuntimeObjToOwnerRef(d)},
   824  		},
   825  	}
   826  
   827  	return d, rs
   828  }
   829  
   830  // injectK8sObjects seeds objects in the fake K8s client for subsequent retrieval.
   831  // This allow the reconciler to build object owner trees.
   832  func (f *fixture) injectK8sObjects(kd v1alpha1.KubernetesDiscovery, objs ...runtime.Object) {
   833  	f.t.Helper()
   834  	f.clients.EnsureK8sCluster(f.ctx, clusterNN(kd))
   835  	kCli := f.clients.MustK8sClient(clusterNN(kd))
   836  
   837  	var k8sEntities []k8s.K8sEntity
   838  	for _, obj := range objs {
   839  		if pod, ok := obj.(*v1.Pod); ok {
   840  			kCli.UpsertPod(pod)
   841  			continue
   842  		}
   843  
   844  		k8sEntities = append(k8sEntities, k8s.NewK8sEntity(obj))
   845  	}
   846  
   847  	// inject these so that their metadata can be found later for owner reference matching
   848  	kCli.Inject(k8sEntities...)
   849  }
   850  
   851  // buildPod makes a fake Pod object but does not simulate its deployment.
   852  func (f *fixture) buildPod(namespace k8s.Namespace, name string, podLabels labels.Set, rs *appsv1.ReplicaSet) *v1.Pod {
   853  	f.t.Helper()
   854  
   855  	if podLabels == nil {
   856  		podLabels = make(labels.Set)
   857  	}
   858  
   859  	p := &v1.Pod{
   860  		ObjectMeta: metav1.ObjectMeta{
   861  			UID:       types.UID(name + "-uid"),
   862  			Namespace: namespace.String(),
   863  			Name:      name,
   864  			Labels:    podLabels,
   865  		},
   866  		Status: v1.PodStatus{
   867  			Phase: v1.PodRunning,
   868  		},
   869  	}
   870  
   871  	if rs != nil {
   872  		if rs.Namespace != p.Namespace {
   873  			f.t.Fatalf("Pod (namespace=%s) cannot differ from ReplicaSet (namespace=%s)", p.Namespace, rs.Namespace)
   874  		}
   875  		p.OwnerReferences = []metav1.OwnerReference{k8s.RuntimeObjToOwnerRef(rs)}
   876  	}
   877  
   878  	return p
   879  }
   880  
   881  func clusterNN(kd v1alpha1.KubernetesDiscovery) types.NamespacedName {
   882  	nn := types.NamespacedName{Namespace: kd.Namespace, Name: kd.Spec.Cluster}
   883  	if nn.Name == "" {
   884  		nn.Name = v1alpha1.ClusterNameDefault
   885  	}
   886  	return nn
   887  }
   888  
   889  func (f *fixture) getCluster(nn types.NamespacedName) *v1alpha1.Cluster {
   890  	var c v1alpha1.Cluster
   891  	f.MustGet(nn, &c)
   892  	return &c
   893  }
   894  
   895  func (f *fixture) Create(kd *v1alpha1.KubernetesDiscovery) controllerruntime.Result {
   896  	f.t.Helper()
   897  	f.clients.EnsureK8sCluster(f.ctx, clusterNN(*kd))
   898  	return f.ControllerFixture.Create(kd)
   899  }