github.com/cilium/cilium@v1.16.2/pkg/k8s/resource/resource_test.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright Authors of Cilium
     3  
     4  package resource_test
     5  
     6  import (
     7  	"context"
     8  	"errors"
     9  	"fmt"
    10  	"math/rand/v2"
    11  	"runtime"
    12  	"strconv"
    13  	"sync"
    14  	"sync/atomic"
    15  	"testing"
    16  	"time"
    17  
    18  	"github.com/cilium/hive/cell"
    19  	"github.com/cilium/hive/hivetest"
    20  	"github.com/stretchr/testify/assert"
    21  	"github.com/stretchr/testify/require"
    22  	"go.uber.org/goleak"
    23  	corev1 "k8s.io/api/core/v1"
    24  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    25  	k8sRuntime "k8s.io/apimachinery/pkg/runtime"
    26  	"k8s.io/apimachinery/pkg/types"
    27  	"k8s.io/apimachinery/pkg/watch"
    28  	"k8s.io/client-go/tools/cache"
    29  	"k8s.io/client-go/util/workqueue"
    30  
    31  	"github.com/cilium/cilium/pkg/hive"
    32  	k8sClient "github.com/cilium/cilium/pkg/k8s/client"
    33  	"github.com/cilium/cilium/pkg/k8s/resource"
    34  	"github.com/cilium/cilium/pkg/k8s/utils"
    35  )
    36  
    37  const testTimeout = time.Minute
    38  
    39  func TestMain(m *testing.M) {
    40  	cleanup := func(exitCode int) {
    41  		// Force garbage-collection to force finalizers to run and catch
    42  		// missing Event.Done() calls.
    43  		runtime.GC()
    44  	}
    45  	goleak.VerifyTestMain(m,
    46  		goleak.Cleanup(cleanup),
    47  		// Delaying workqueues used by resource.Resource[T].Events leaks this waitingLoop goroutine.
    48  		// It does stop when shutting down but is not guaranteed to before we actually exit.
    49  		goleak.IgnoreTopFunction("k8s.io/client-go/util/workqueue.(*delayingType).waitingLoop"),
    50  	)
    51  }
    52  
    53  func testStore(t *testing.T, node *corev1.Node, store resource.Store[*corev1.Node]) {
    54  	var (
    55  		item   *corev1.Node
    56  		exists bool
    57  		err    error
    58  	)
    59  
    60  	check := func() {
    61  		if err != nil {
    62  			t.Fatalf("unexpected error from GetByKey: %s", err)
    63  		}
    64  		if !exists {
    65  			t.Fatalf("GetByKey returned exists=false")
    66  		}
    67  		if item.Name != node.ObjectMeta.Name {
    68  			t.Fatalf("expected item returned by GetByKey to have name %s, got %s",
    69  				node.ObjectMeta.Name, item.ObjectMeta.Name)
    70  		}
    71  	}
    72  	item, exists, err = store.GetByKey(resource.Key{Name: node.ObjectMeta.Name})
    73  	check()
    74  	item, exists, err = store.Get(node)
    75  	check()
    76  
    77  	keys := []resource.Key{}
    78  	iter := store.IterKeys()
    79  	for iter.Next() {
    80  		keys = append(keys, iter.Key())
    81  	}
    82  
    83  	if len(keys) != 1 && keys[0].Name != "some-node" {
    84  		t.Fatalf("unexpected keys: %#v", keys)
    85  	}
    86  
    87  	items := store.List()
    88  	if len(items) != 1 && items[0].ObjectMeta.Name != "some-node" {
    89  		t.Fatalf("unexpected items: %#v", items)
    90  	}
    91  }
    92  
    93  func TestResource_WithFakeClient(t *testing.T) {
    94  	var (
    95  		nodeName = "some-node"
    96  		node     = &corev1.Node{
    97  			ObjectMeta: metav1.ObjectMeta{
    98  				Name:            nodeName,
    99  				ResourceVersion: "0",
   100  			},
   101  			Status: corev1.NodeStatus{
   102  				Phase: "init",
   103  			},
   104  		}
   105  
   106  		nodes          resource.Resource[*corev1.Node]
   107  		fakeClient, cs = k8sClient.NewFakeClientset()
   108  
   109  		events <-chan resource.Event[*corev1.Node]
   110  	)
   111  
   112  	// Create the initial version of the node. Do this before anything
   113  	// starts watching the resources to avoid a race.
   114  	fakeClient.KubernetesFakeClientset.Tracker().Create(
   115  		corev1.SchemeGroupVersion.WithResource("nodes"),
   116  		node.DeepCopy(), "")
   117  
   118  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
   119  	defer cancel()
   120  
   121  	hive := hive.New(
   122  		cell.Provide(func() k8sClient.Clientset { return cs }),
   123  		nodesResource,
   124  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
   125  			nodes = r
   126  
   127  			// Subscribe prior to starting as it's allowed. Sync event
   128  			// for early subscribers will be emitted when informer has
   129  			// synchronized.
   130  			events = nodes.Events(ctx)
   131  		}))
   132  
   133  	tlog := hivetest.Logger(t)
   134  	if err := hive.Start(tlog, ctx); err != nil {
   135  		t.Fatalf("hive.Start failed: %s", err)
   136  	}
   137  
   138  	// First event should be the node (initial set)
   139  	ev, ok := <-events
   140  	require.True(t, ok)
   141  	require.Equal(t, resource.Upsert, ev.Kind)
   142  	require.Equal(t, ev.Key.Name, nodeName)
   143  	require.Equal(t, ev.Object.GetName(), node.Name)
   144  	require.Equal(t, ev.Object.Status.Phase, node.Status.Phase)
   145  	ev.Done(nil)
   146  
   147  	// Second should be a sync.
   148  	ev, ok = <-events
   149  	require.True(t, ok, "events channel closed unexpectedly")
   150  	require.Equal(t, resource.Sync, ev.Kind)
   151  	require.Nil(t, ev.Object)
   152  	ev.Done(nil)
   153  
   154  	// After sync event we can also use Store() without it blocking.
   155  	store, err := nodes.Store(ctx)
   156  	if err != nil {
   157  		t.Fatalf("expected non-nil error from Store(), got: %q", err)
   158  	}
   159  	testStore(t, node, store)
   160  
   161  	// Update the node and check the update event
   162  	node.Status.Phase = "update1"
   163  	node.ObjectMeta.ResourceVersion = "1"
   164  	fakeClient.KubernetesFakeClientset.Tracker().Update(
   165  		corev1.SchemeGroupVersion.WithResource("nodes"),
   166  		node.DeepCopy(), "")
   167  
   168  	ev, ok = <-events
   169  	require.True(t, ok, "events channel closed unexpectedly")
   170  	require.Equal(t, resource.Upsert, ev.Kind)
   171  	require.Equal(t, ev.Key.Name, nodeName)
   172  	require.Equal(t, ev.Object.Status.Phase, corev1.NodePhase("update1"))
   173  	ev.Done(nil)
   174  
   175  	// Test that multiple events for the same key are coalesced.
   176  	// We'll use another subscriber to validate that all the changes
   177  	// have been processed by the resource.
   178  	// This also verifies that late subscribers correctly receive the
   179  	// sync event.
   180  	{
   181  		ctx2, cancel2 := context.WithCancel(ctx)
   182  		events2 := nodes.Events(ctx2)
   183  
   184  		ev2, ok := <-events2
   185  		require.True(t, ok, "events channel closed unexpectedly")
   186  		require.Equal(t, resource.Upsert, ev2.Kind)
   187  		ev2.Done(nil)
   188  
   189  		ev2, ok = <-events2
   190  		require.True(t, ok, "events channel closed unexpectedly")
   191  		require.Equal(t, resource.Sync, ev2.Kind)
   192  		ev2.Done(nil)
   193  
   194  		for i := 2; i <= 10; i++ {
   195  			version := fmt.Sprintf("%d", i)
   196  			node.Status.Phase = corev1.NodePhase(fmt.Sprintf("update%d", i))
   197  			node.ObjectMeta.ResourceVersion = version
   198  			fakeClient.KubernetesFakeClientset.Tracker().Update(
   199  				corev1.SchemeGroupVersion.WithResource("nodes"),
   200  				node.DeepCopy(), "")
   201  			ev2, ok := <-events2
   202  			require.True(t, ok, "events channel closed unexpectedly")
   203  			require.Equal(t, resource.Upsert, ev2.Kind)
   204  			require.Equal(t, version, ev2.Object.ResourceVersion)
   205  			ev2.Done(nil)
   206  		}
   207  		cancel2()
   208  		for range events2 {
   209  		}
   210  	}
   211  
   212  	// We should now see either just the last change, or one intermediate change
   213  	// and the last change.
   214  	ev, ok = <-events
   215  	require.True(t, ok, "events channel closed unexpectedly")
   216  	require.Equal(t, resource.Upsert, ev.Kind)
   217  	require.Equal(t, nodeName, ev.Key.Name)
   218  	ev.Done(nil)
   219  	if ev.Object.ResourceVersion != node.ObjectMeta.ResourceVersion {
   220  		ev, ok = <-events
   221  		require.True(t, ok, "events channel closed unexpectedly")
   222  		require.Equal(t, resource.Upsert, ev.Kind)
   223  		require.Equal(t, nodeName, ev.Key.Name)
   224  		require.Equal(t, node.ObjectMeta.ResourceVersion, ev.Object.ResourceVersion)
   225  		ev.Done(nil)
   226  	}
   227  
   228  	// Finally delete the node
   229  	fakeClient.KubernetesFakeClientset.Tracker().Delete(
   230  		corev1.SchemeGroupVersion.WithResource("nodes"),
   231  		"", "some-node")
   232  
   233  	ev, ok = <-events
   234  	require.True(t, ok, "events channel closed unexpectedly")
   235  	require.Equal(t, resource.Delete, ev.Kind)
   236  	require.Equal(t, nodeName, ev.Key.Name)
   237  	require.Equal(t, node.ObjectMeta.ResourceVersion, ev.Object.ResourceVersion)
   238  	ev.Done(nil)
   239  
   240  	// Cancel the subscriber context and verify that the stream gets completed.
   241  	cancel()
   242  
   243  	// No more events should be observed.
   244  	ev, ok = <-events
   245  	if ok {
   246  		t.Fatalf("unexpected event still in stream: %v", ev)
   247  	}
   248  
   249  	// Finally check that the hive stops correctly. Note that we're not doing this in a
   250  	// defer to avoid potentially deadlocking on the Fatal calls.
   251  	if err := hive.Stop(tlog, context.TODO()); err != nil {
   252  		t.Fatalf("hive.Stop failed: %s", err)
   253  	}
   254  }
   255  
   256  type createsAndDeletesListerWatcher struct {
   257  	events chan watch.Event
   258  }
   259  
   260  func (lw *createsAndDeletesListerWatcher) ResultChan() <-chan watch.Event {
   261  	return lw.events
   262  }
   263  
   264  func (lw *createsAndDeletesListerWatcher) Stop() {
   265  	close(lw.events)
   266  }
   267  
   268  func (*createsAndDeletesListerWatcher) List(options metav1.ListOptions) (k8sRuntime.Object, error) {
   269  	return &corev1.NodeList{}, nil
   270  }
   271  
   272  func (lw *createsAndDeletesListerWatcher) Watch(options metav1.ListOptions) (watch.Interface, error) {
   273  	return lw, nil
   274  }
   275  
   276  var _ cache.ListerWatcher = &createsAndDeletesListerWatcher{}
   277  var _ watch.Interface = &createsAndDeletesListerWatcher{}
   278  
   279  func TestResource_RepeatedDelete(t *testing.T) {
   280  	var (
   281  		nodeName = "some-node"
   282  		node     = &corev1.Node{
   283  			ObjectMeta: metav1.ObjectMeta{
   284  				Name:            nodeName,
   285  				ResourceVersion: "0",
   286  			},
   287  			Status: corev1.NodeStatus{
   288  				Phase: "init",
   289  			},
   290  		}
   291  
   292  		nodes resource.Resource[*corev1.Node]
   293  
   294  		lw     = createsAndDeletesListerWatcher{events: make(chan watch.Event, 100)}
   295  		events <-chan resource.Event[*corev1.Node]
   296  	)
   297  
   298  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
   299  	defer cancel()
   300  
   301  	hive := hive.New(
   302  		cell.Provide(
   303  			func(lc cell.Lifecycle) resource.Resource[*corev1.Node] {
   304  				return resource.New[*corev1.Node](lc, &lw)
   305  			}),
   306  
   307  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
   308  			nodes = r
   309  
   310  			// Subscribe prior to starting as it's allowed. Sync event
   311  			// for early subscribers will be emitted when informer has
   312  			// synchronized.
   313  			events = nodes.Events(ctx)
   314  		}))
   315  
   316  	tlog := hivetest.Logger(t)
   317  	if err := hive.Start(tlog, ctx); err != nil {
   318  		t.Fatalf("hive.Start failed: %s", err)
   319  	}
   320  
   321  	ev, ok := <-events
   322  	require.True(t, ok, "events channel closed unexpectedly")
   323  	require.Equal(t, resource.Sync, ev.Kind)
   324  	require.Nil(t, ev.Object)
   325  	ev.Done(nil)
   326  
   327  	finalVersion := "99999"
   328  
   329  	// Repeatedly create and delete the node in the background
   330  	// while "unreliably" processing some of the delete events.
   331  	go func() {
   332  		for i := 0; i < 1000; i++ {
   333  			node.ObjectMeta.ResourceVersion = fmt.Sprintf("%d", i)
   334  
   335  			lw.events <- watch.Event{
   336  				Type:   watch.Added,
   337  				Object: node.DeepCopy(),
   338  			}
   339  
   340  			// Sleep tiny amount to force a context switch
   341  			time.Sleep(time.Microsecond)
   342  
   343  			lw.events <- watch.Event{
   344  				Type:   watch.Deleted,
   345  				Object: node.DeepCopy(),
   346  			}
   347  
   348  			// Sleep tiny amount to force a context switch
   349  			time.Sleep(time.Microsecond)
   350  		}
   351  
   352  		// Create final copy of the object to mark the end of the test.
   353  		node.ObjectMeta.ResourceVersion = finalVersion
   354  		lw.events <- watch.Event{
   355  			Type:   watch.Added,
   356  			Object: node.DeepCopy(),
   357  		}
   358  	}()
   359  
   360  	var (
   361  		lastDeleteVersion uint64
   362  		lastUpsertVersion uint64
   363  	)
   364  	exists := false
   365  
   366  	for ev := range events {
   367  		if ev.Kind == resource.Delete {
   368  			version, _ := strconv.ParseUint(ev.Object.ObjectMeta.ResourceVersion, 10, 64)
   369  
   370  			// Objects that we've not witnessed created should not be seen deleted.
   371  			require.True(t, exists, "delete event for object that we didn't witness being created")
   372  
   373  			// The upserted object's version should be less or equal to the deleted object's version.
   374  			require.Equal(t, lastUpsertVersion, version, "expected deleted object version to equal to last upserted version")
   375  
   376  			// Check that we don't go back in time.
   377  			require.LessOrEqual(t, lastDeleteVersion, version, "expected always increasing ResourceVersion")
   378  			lastDeleteVersion = version
   379  
   380  			// Fail every 3rd deletion to test retrying.
   381  			if rand.IntN(3) == 0 {
   382  				ev.Done(errors.New("delete failed"))
   383  			} else {
   384  				exists = false
   385  				ev.Done(nil)
   386  			}
   387  		} else if ev.Kind == resource.Upsert {
   388  			exists = true
   389  
   390  			// Check that we don't go back in time
   391  			version, _ := strconv.ParseUint(ev.Object.ObjectMeta.ResourceVersion, 10, 64)
   392  			require.LessOrEqual(t, lastUpsertVersion, version, "expected always increasing ResourceVersion")
   393  			lastUpsertVersion = version
   394  
   395  			if ev.Object.ObjectMeta.ResourceVersion == finalVersion {
   396  				cancel()
   397  			}
   398  			ev.Done(nil)
   399  		}
   400  	}
   401  
   402  	// Finally check that the hive stops correctly. Note that we're not doing this in a
   403  	// defer to avoid potentially deadlocking on the Fatal calls.
   404  	require.NoError(t, hive.Stop(tlog, context.TODO()))
   405  }
   406  
   407  func TestResource_CompletionOnStop(t *testing.T) {
   408  	var nodes resource.Resource[*corev1.Node]
   409  
   410  	hive := hive.New(
   411  		k8sClient.FakeClientCell,
   412  		nodesResource,
   413  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
   414  			nodes = r
   415  		}))
   416  
   417  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
   418  	defer cancel()
   419  
   420  	tlog := hivetest.Logger(t)
   421  	if err := hive.Start(tlog, ctx); err != nil {
   422  		t.Fatalf("hive.Start failed: %s", err)
   423  	}
   424  
   425  	xs := nodes.Events(ctx)
   426  
   427  	// We should only see a sync event
   428  	ev := <-xs
   429  	assert.Equal(t, resource.Sync, ev.Kind)
   430  	ev.Done(nil)
   431  
   432  	// After sync Store() should not block and should be empty.
   433  	store, err := nodes.Store(ctx)
   434  	if err != nil {
   435  		t.Fatalf("expected non-nil error from Store(), got %q", err)
   436  	}
   437  	if len(store.List()) != 0 {
   438  		t.Fatalf("expected empty store, got %d items", len(store.List()))
   439  	}
   440  
   441  	// Stop the hive to stop the resource.
   442  	if err := hive.Stop(tlog, ctx); err != nil {
   443  		t.Fatalf("hive.Stop failed: %s", err)
   444  	}
   445  
   446  	// No more events should be observed.
   447  	ev, ok := <-xs
   448  	if ok {
   449  		t.Fatalf("unexpected event still in channel: %v", ev)
   450  	}
   451  }
   452  
   453  func TestResource_WithTransform(t *testing.T) {
   454  	type StrippedNode = metav1.PartialObjectMetadata
   455  	var strippedNodes resource.Resource[*StrippedNode]
   456  	var fakeClient, cs = k8sClient.NewFakeClientset()
   457  
   458  	node := &corev1.Node{
   459  		ObjectMeta: metav1.ObjectMeta{
   460  			Name:            "node",
   461  			ResourceVersion: "0",
   462  		},
   463  		Status: corev1.NodeStatus{
   464  			Phase: "init",
   465  		},
   466  	}
   467  
   468  	strip := func(obj *corev1.Node) (*StrippedNode, error) {
   469  		return &StrippedNode{TypeMeta: node.TypeMeta, ObjectMeta: node.ObjectMeta}, nil
   470  	}
   471  
   472  	hive := hive.New(
   473  		cell.Provide(
   474  			func() k8sClient.Clientset { return cs },
   475  			func(lc cell.Lifecycle, c k8sClient.Clientset) resource.Resource[*StrippedNode] {
   476  				lw := utils.ListerWatcherFromTyped[*corev1.NodeList](c.CoreV1().Nodes())
   477  				return resource.New[*StrippedNode](lc, lw, resource.WithTransform(strip))
   478  			}),
   479  
   480  		cell.Invoke(func(r resource.Resource[*StrippedNode]) {
   481  			strippedNodes = r
   482  		}))
   483  
   484  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
   485  	defer cancel()
   486  
   487  	tlog := hivetest.Logger(t)
   488  	if err := hive.Start(tlog, ctx); err != nil {
   489  		t.Fatalf("hive.Start failed: %s", err)
   490  	}
   491  
   492  	fakeClient.KubernetesFakeClientset.Tracker().Create(
   493  		corev1.SchemeGroupVersion.WithResource("nodes"),
   494  		node.DeepCopy(), "")
   495  
   496  	events := strippedNodes.Events(ctx)
   497  
   498  	event := <-events
   499  	assert.Equal(t, resource.Upsert, event.Kind)
   500  	event.Done(nil)
   501  
   502  	event = <-events
   503  	assert.Equal(t, resource.Sync, event.Kind)
   504  	event.Done(nil)
   505  
   506  	// Stop the hive to stop the resource.
   507  	if err := hive.Stop(tlog, ctx); err != nil {
   508  		t.Fatalf("hive.Stop failed: %s", err)
   509  	}
   510  
   511  	// No more events should be observed.
   512  	event, ok := <-events
   513  	if ok {
   514  		t.Fatalf("unexpected event still in channel: %v", event)
   515  	}
   516  
   517  }
   518  
   519  func TestResource_WithoutIndexers(t *testing.T) {
   520  	var (
   521  		node = &corev1.Node{
   522  			ObjectMeta: metav1.ObjectMeta{
   523  				Name:            "test-node-1",
   524  				ResourceVersion: "0",
   525  			},
   526  		}
   527  		nodeResource   resource.Resource[*corev1.Node]
   528  		fakeClient, cs = k8sClient.NewFakeClientset()
   529  	)
   530  
   531  	fakeClient.KubernetesFakeClientset.Tracker().Create(
   532  		corev1.SchemeGroupVersion.WithResource("nodes"),
   533  		node.DeepCopy(), "")
   534  
   535  	hive := hive.New(
   536  		cell.Provide(func() k8sClient.Clientset { return cs }),
   537  		cell.Provide(
   538  			func(lc cell.Lifecycle, cs k8sClient.Clientset) resource.Resource[*corev1.Node] {
   539  				lw := utils.ListerWatcherFromTyped[*corev1.NodeList](cs.CoreV1().Nodes())
   540  				return resource.New[*corev1.Node](lc, lw)
   541  			},
   542  		),
   543  
   544  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
   545  			nodeResource = r
   546  		}),
   547  	)
   548  
   549  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
   550  	defer cancel()
   551  
   552  	tlog := hivetest.Logger(t)
   553  	if err := hive.Start(tlog, ctx); err != nil {
   554  		t.Fatalf("hive.Start failed: %s", err)
   555  	}
   556  
   557  	events := nodeResource.Events(ctx)
   558  
   559  	// wait for the upsert event
   560  	ev, ok := <-events
   561  	require.True(t, ok)
   562  	require.Equal(t, resource.Upsert, ev.Kind)
   563  	ev.Done(nil)
   564  
   565  	// wait for the sync event
   566  	ev, ok = <-events
   567  	require.True(t, ok)
   568  	assert.Equal(t, resource.Sync, ev.Kind)
   569  	ev.Done(nil)
   570  
   571  	// get a reference to the store
   572  	store, err := nodeResource.Store(ctx)
   573  	if err != nil {
   574  		t.Fatalf("unexpected non-nil error from Store(), got: %q", err)
   575  	}
   576  
   577  	indexName, indexValue := "index-name", "index-value"
   578  
   579  	// ByIndex should not find any objects
   580  	_, err = store.ByIndex(indexName, indexValue)
   581  	if err == nil {
   582  		t.Fatalf("expected non-nil error from store.ByIndex(%q, %q), got nil", indexName, indexValue)
   583  	}
   584  
   585  	// IndexKeys should not find any keys
   586  	_, err = store.IndexKeys(indexName, indexValue)
   587  	if err == nil {
   588  		t.Fatalf("unexpected non-nil error from store.IndexKeys(%q, %q), got nil", indexName, indexValue)
   589  	}
   590  
   591  	// Stop the hive to stop the resource.
   592  	if err := hive.Stop(tlog, ctx); err != nil {
   593  		t.Fatalf("hive.Stop failed: %s", err)
   594  	}
   595  
   596  	// No more events should be observed.
   597  	ev, ok = <-events
   598  	if ok {
   599  		t.Fatalf("unexpected event still in channel: %v", ev)
   600  	}
   601  }
   602  
   603  func TestResource_WithIndexers(t *testing.T) {
   604  	var (
   605  		nodes = [...]*corev1.Node{
   606  			{
   607  				ObjectMeta: metav1.ObjectMeta{
   608  					Name: "test-node-1",
   609  					Labels: map[string]string{
   610  						"key": "node-1",
   611  					},
   612  					ResourceVersion: "0",
   613  				},
   614  			},
   615  			{
   616  				ObjectMeta: metav1.ObjectMeta{
   617  					Name: "test-node-2",
   618  					Labels: map[string]string{
   619  						"key": "node-2",
   620  					},
   621  					ResourceVersion: "0",
   622  				},
   623  			},
   624  			{
   625  				ObjectMeta: metav1.ObjectMeta{
   626  					Name: "test-node-3",
   627  					Labels: map[string]string{
   628  						"key": "node-3",
   629  					},
   630  					ResourceVersion: "0",
   631  				},
   632  			},
   633  		}
   634  		nodeResource   resource.Resource[*corev1.Node]
   635  		fakeClient, cs = k8sClient.NewFakeClientset()
   636  
   637  		indexName = "node-index-key"
   638  		indexFunc = func(obj interface{}) ([]string, error) {
   639  			switch t := obj.(type) {
   640  			case *corev1.Node:
   641  				return []string{t.Name}, nil
   642  			}
   643  			return nil, errors.New("object is not a *corev1.Node")
   644  		}
   645  	)
   646  
   647  	for _, node := range nodes {
   648  		fakeClient.KubernetesFakeClientset.Tracker().Create(
   649  			corev1.SchemeGroupVersion.WithResource("nodes"),
   650  			node.DeepCopy(), "")
   651  	}
   652  
   653  	hive := hive.New(
   654  		cell.Provide(func() k8sClient.Clientset { return cs }),
   655  		cell.Provide(
   656  			func(lc cell.Lifecycle, cs k8sClient.Clientset) resource.Resource[*corev1.Node] {
   657  				lw := utils.ListerWatcherFromTyped[*corev1.NodeList](cs.CoreV1().Nodes())
   658  				return resource.New[*corev1.Node](
   659  					lc, lw,
   660  					resource.WithIndexers(cache.Indexers{indexName: indexFunc}),
   661  				)
   662  			},
   663  		),
   664  
   665  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
   666  			nodeResource = r
   667  		}),
   668  	)
   669  
   670  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
   671  	defer cancel()
   672  
   673  	tlog := hivetest.Logger(t)
   674  	if err := hive.Start(tlog, ctx); err != nil {
   675  		t.Fatalf("hive.Start failed: %s", err)
   676  	}
   677  
   678  	events := nodeResource.Events(ctx)
   679  
   680  	// wait for the upsert events
   681  	for i := 0; i < len(nodes); i++ {
   682  		ev, ok := <-events
   683  		require.True(t, ok)
   684  		require.Equal(t, resource.Upsert, ev.Kind)
   685  		ev.Done(nil)
   686  	}
   687  
   688  	// wait for the sync event
   689  	ev, ok := <-events
   690  	require.True(t, ok)
   691  	assert.Equal(t, resource.Sync, ev.Kind)
   692  	ev.Done(nil)
   693  
   694  	// get a reference to the store
   695  	store, err := nodeResource.Store(ctx)
   696  	if err != nil {
   697  		t.Fatalf("unexpected non-nil error from Store(), got: %q", err)
   698  	}
   699  
   700  	indexValue := "test-node-2"
   701  
   702  	// retrieve a specific node by its value for the indexer key
   703  	found, err := store.ByIndex(indexName, indexValue)
   704  	if err != nil {
   705  		t.Fatalf("unexpected non-nil error from store.ByIndex(%q, %q), got: %q", indexName, indexValue, err)
   706  	}
   707  	require.Len(t, found, 1)
   708  	require.Equal(t, found[0].Name, indexValue)
   709  	require.Len(t, found[0].Labels, 1)
   710  	require.Equal(t, found[0].Labels["key"], "node-2")
   711  
   712  	// retrieve the keys of the stored objects whose set of indexed values includes a specific value
   713  	keys, err := store.IndexKeys(indexName, indexValue)
   714  	if err != nil {
   715  		t.Fatalf("unexpected non-nil error from store.IndexKeys(%q, %q), got: %q", indexName, indexValue, err)
   716  	}
   717  	require.Len(t, keys, 1)
   718  	require.Equal(t, []string{indexValue}, keys)
   719  
   720  	// Stop the hive to stop the resource.
   721  	if err := hive.Stop(tlog, ctx); err != nil {
   722  		t.Fatalf("hive.Stop failed: %s", err)
   723  	}
   724  
   725  	// No more events should be observed.
   726  	ev, ok = <-events
   727  	if ok {
   728  		t.Fatalf("unexpected event still in channel: %v", ev)
   729  	}
   730  }
   731  
   732  var RetryFiveTimes resource.ErrorHandler = func(key resource.Key, numRetries int, err error) resource.ErrorAction {
   733  	if numRetries >= 4 {
   734  		return resource.ErrorActionStop
   735  	}
   736  	return resource.ErrorActionRetry
   737  }
   738  
   739  func TestResource_Retries(t *testing.T) {
   740  	var (
   741  		nodes          resource.Resource[*corev1.Node]
   742  		fakeClient, cs = k8sClient.NewFakeClientset()
   743  	)
   744  
   745  	var rateLimiterUsed atomic.Int64
   746  	rateLimiter := func() workqueue.RateLimiter {
   747  		rateLimiterUsed.Add(1)
   748  		return workqueue.DefaultControllerRateLimiter()
   749  	}
   750  
   751  	hive := hive.New(
   752  		cell.Provide(func() k8sClient.Clientset { return cs }),
   753  		cell.Provide(func(lc cell.Lifecycle, c k8sClient.Clientset) resource.Resource[*corev1.Node] {
   754  			nodesLW := utils.ListerWatcherFromTyped[*corev1.NodeList](c.CoreV1().Nodes())
   755  			return resource.New[*corev1.Node](lc, nodesLW)
   756  		}),
   757  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
   758  			nodes = r
   759  		}))
   760  
   761  	ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
   762  	defer cancel()
   763  
   764  	tlog := hivetest.Logger(t)
   765  	err := hive.Start(tlog, ctx)
   766  	assert.NoError(t, err)
   767  
   768  	// Check that the WithRateLimiter option works.
   769  	{
   770  		ctx, cancel := context.WithCancel(ctx)
   771  		events := nodes.Events(ctx, resource.WithRateLimiter(rateLimiter()), resource.WithErrorHandler(RetryFiveTimes))
   772  		ev := <-events
   773  		assert.NoError(t, err)
   774  		assert.Equal(t, int64(1), rateLimiterUsed.Load())
   775  		ev.Done(nil)
   776  		cancel()
   777  		_, ok := <-events
   778  		assert.False(t, ok)
   779  	}
   780  
   781  	// Test that sync events are retried
   782  	{
   783  		xs := nodes.Events(ctx, resource.WithErrorHandler(RetryFiveTimes))
   784  
   785  		expectedErr := errors.New("sync")
   786  		var numRetries atomic.Int64
   787  
   788  		for ev := range xs {
   789  			switch ev.Kind {
   790  			case resource.Sync:
   791  				numRetries.Add(1)
   792  				ev.Done(expectedErr)
   793  			case resource.Upsert:
   794  				ev.Done(nil)
   795  			case resource.Delete:
   796  				t.Fatalf("unexpected delete of %s", ev.Key)
   797  			}
   798  		}
   799  
   800  		assert.Equal(t, int64(5), numRetries.Load(), "expected to see 5 retries for sync")
   801  	}
   802  
   803  	var node = &corev1.Node{
   804  		ObjectMeta: metav1.ObjectMeta{
   805  			Name:            "some-node",
   806  			ResourceVersion: "0",
   807  		},
   808  		Status: corev1.NodeStatus{
   809  			Phase: "init",
   810  		},
   811  	}
   812  
   813  	// Create the initial version of the node.
   814  	fakeClient.KubernetesFakeClientset.Tracker().Create(
   815  		corev1.SchemeGroupVersion.WithResource("nodes"),
   816  		node, "")
   817  
   818  	// Test that update events are retried
   819  	{
   820  		xs := nodes.Events(ctx, resource.WithErrorHandler(RetryFiveTimes))
   821  
   822  		expectedErr := errors.New("update")
   823  		var numRetries atomic.Int64
   824  
   825  		for ev := range xs {
   826  			switch ev.Kind {
   827  			case resource.Sync:
   828  				ev.Done(nil)
   829  			case resource.Upsert:
   830  				numRetries.Add(1)
   831  				ev.Done(expectedErr)
   832  			case resource.Delete:
   833  				t.Fatalf("unexpected delete of %s", ev.Key)
   834  			}
   835  		}
   836  
   837  		assert.Equal(t, int64(5), numRetries.Load(), "expected to see 5 retries for update")
   838  	}
   839  
   840  	// Test that delete events are retried
   841  	{
   842  		xs := nodes.Events(ctx, resource.WithErrorHandler(RetryFiveTimes))
   843  
   844  		expectedErr := errors.New("delete")
   845  		var numRetries atomic.Int64
   846  
   847  		for ev := range xs {
   848  			switch ev.Kind {
   849  			case resource.Sync:
   850  				ev.Done(nil)
   851  			case resource.Upsert:
   852  				fakeClient.KubernetesFakeClientset.Tracker().Delete(
   853  					corev1.SchemeGroupVersion.WithResource("nodes"),
   854  					"", node.Name)
   855  				ev.Done(nil)
   856  			case resource.Delete:
   857  				numRetries.Add(1)
   858  				ev.Done(expectedErr)
   859  			}
   860  		}
   861  
   862  		assert.Equal(t, int64(5), numRetries.Load(), "expected to see 5 retries for delete")
   863  	}
   864  
   865  	err = hive.Stop(tlog, ctx)
   866  	assert.NoError(t, err)
   867  }
   868  
   869  func TestResource_Observe(t *testing.T) {
   870  	var (
   871  		nodeName = "some-node"
   872  		node     = &corev1.Node{
   873  			ObjectMeta: metav1.ObjectMeta{
   874  				Name:            nodeName,
   875  				ResourceVersion: "0",
   876  			},
   877  			Status: corev1.NodeStatus{
   878  				Phase: "init",
   879  			},
   880  		}
   881  		fakeClient, cs = k8sClient.NewFakeClientset()
   882  		nodes          resource.Resource[*corev1.Node]
   883  	)
   884  
   885  	// Create the initial version of the node. Do this before anything
   886  	// starts watching the resources to avoid a race.
   887  	fakeClient.KubernetesFakeClientset.Tracker().Create(
   888  		corev1.SchemeGroupVersion.WithResource("nodes"),
   889  		node.DeepCopy(), "")
   890  
   891  	hive := hive.New(
   892  		cell.Provide(func() k8sClient.Clientset { return cs }),
   893  		nodesResource,
   894  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
   895  			nodes = r
   896  		}))
   897  
   898  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
   899  	defer cancel()
   900  
   901  	tlog := hivetest.Logger(t)
   902  	if err := hive.Start(tlog, ctx); err != nil {
   903  		t.Fatalf("hive.Start failed: %s", err)
   904  	}
   905  
   906  	eventWg := sync.WaitGroup{}
   907  	completeWg := sync.WaitGroup{}
   908  
   909  	eventWg.Add(2)    // upsert & sync
   910  	completeWg.Add(1) // complete
   911  
   912  	nodes.Observe(ctx, func(e resource.Event[*corev1.Node]) {
   913  		e.Done(nil)
   914  		eventWg.Done()
   915  	}, func(err error) {
   916  		completeWg.Done()
   917  	})
   918  
   919  	eventWg.Wait()
   920  
   921  	// Stop the hive to stop the resource and trigger completion.
   922  	if err := hive.Stop(tlog, ctx); err != nil {
   923  		t.Fatalf("hive.Stop failed: %s", err)
   924  	}
   925  	completeWg.Wait()
   926  }
   927  
   928  func TestResource_Releasable(t *testing.T) {
   929  	var (
   930  		nodeName = "some-node"
   931  		node     = &corev1.Node{
   932  			ObjectMeta: metav1.ObjectMeta{
   933  				Name:            nodeName,
   934  				ResourceVersion: "0",
   935  			},
   936  			Status: corev1.NodeStatus{
   937  				Phase: "init",
   938  			},
   939  		}
   940  		nodeResource   resource.Resource[*corev1.Node]
   941  		fakeClient, cs = k8sClient.NewFakeClientset()
   942  	)
   943  
   944  	fakeClient.KubernetesFakeClientset.Tracker().Create(
   945  		corev1.SchemeGroupVersion.WithResource("nodes"),
   946  		node.DeepCopy(), "")
   947  
   948  	hive := hive.New(
   949  		cell.Provide(func() k8sClient.Clientset { return cs }),
   950  		cell.Provide(
   951  			func(lc cell.Lifecycle, cs k8sClient.Clientset) resource.Resource[*corev1.Node] {
   952  				lw := utils.ListerWatcherFromTyped[*corev1.NodeList](cs.CoreV1().Nodes())
   953  				return resource.New[*corev1.Node](
   954  					lc, lw,
   955  					resource.WithStoppableInformer(),
   956  				)
   957  			},
   958  		),
   959  
   960  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
   961  			nodeResource = r
   962  		}),
   963  	)
   964  
   965  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
   966  	defer cancel()
   967  
   968  	tlog := hivetest.Logger(t)
   969  	assert.NoError(t, hive.Start(tlog, ctx))
   970  
   971  	var (
   972  		store resource.Store[*corev1.Node]
   973  		err   error
   974  	)
   975  
   976  	// get a reference to the store and start the informer
   977  	store, err = nodeResource.Store(ctx)
   978  	assert.NoError(t, err)
   979  
   980  	// store should be synced
   981  	testStore(t, node, store)
   982  
   983  	// release the store and wait some time for the underlying informer to stop
   984  	store.Release()
   985  	time.Sleep(20 * time.Millisecond)
   986  
   987  	// the old store reference is still valid
   988  	testStore(t, node, store)
   989  
   990  	// add a new node and wait some time to be sure the update is visible if there is an active subscriber
   991  	node = &corev1.Node{
   992  		ObjectMeta: metav1.ObjectMeta{
   993  			Name:            "another-node",
   994  			ResourceVersion: "0",
   995  		},
   996  	}
   997  	fakeClient.KubernetesFakeClientset.Tracker().Create(
   998  		corev1.SchemeGroupVersion.WithResource("nodes"),
   999  		node.DeepCopy(), "")
  1000  	time.Sleep(20 * time.Millisecond)
  1001  
  1002  	// the store won't see any change, since there is no active informer keeping it up to date
  1003  	assert.Len(t, store.List(), 1)
  1004  
  1005  	// take a new reference to the store and start a new informer
  1006  	store, err = nodeResource.Store(ctx)
  1007  	assert.NoError(t, err)
  1008  
  1009  	// new store reference should be synced
  1010  	assert.Len(t, store.List(), 2)
  1011  
  1012  	subCtx, subCancel := context.WithCancel(ctx)
  1013  	defer subCancel()
  1014  
  1015  	var wg sync.WaitGroup
  1016  
  1017  	// subscribe to the resource events stream
  1018  	subscribed := subscribe(subCtx, &wg, nodeResource)
  1019  
  1020  	// wait for subscription
  1021  	<-subscribed
  1022  
  1023  	// release the store
  1024  	store.Release()
  1025  
  1026  	// the store reference is still valid
  1027  	assert.Len(t, store.List(), 2)
  1028  
  1029  	// the store will be eventually updated because the underlying informer is still alive thanks to the Events subscriber
  1030  	node = &corev1.Node{
  1031  		ObjectMeta: metav1.ObjectMeta{
  1032  			Name:            "another-node-again",
  1033  			ResourceVersion: "0",
  1034  		},
  1035  	}
  1036  	fakeClient.KubernetesFakeClientset.Tracker().Create(
  1037  		corev1.SchemeGroupVersion.WithResource("nodes"),
  1038  		node.DeepCopy(), "")
  1039  	assert.Eventually(
  1040  		t,
  1041  		func() bool { return len(store.List()) == 3 },
  1042  		time.Second,
  1043  		5*time.Millisecond,
  1044  	)
  1045  
  1046  	// stop the subscriber and wait some time for the underlying informer to stop
  1047  	subCancel()
  1048  	time.Sleep(20 * time.Millisecond)
  1049  
  1050  	// add a new node and wait some time to be sure the update is visible if there is an active subscriber
  1051  	node = &corev1.Node{
  1052  		ObjectMeta: metav1.ObjectMeta{
  1053  			Name:            "another-node-again-and-again",
  1054  			ResourceVersion: "0",
  1055  		},
  1056  	}
  1057  	fakeClient.KubernetesFakeClientset.Tracker().Create(
  1058  		corev1.SchemeGroupVersion.WithResource("nodes"),
  1059  		node.DeepCopy(), "")
  1060  
  1061  	// the underlying informer is now stopped and the store has not been updated
  1062  	assert.Len(t, store.List(), 3)
  1063  
  1064  	assert.NoError(t, hive.Stop(tlog, ctx))
  1065  }
  1066  
  1067  func TestResource_ReleasableCtxCanceled(t *testing.T) {
  1068  	var (
  1069  		nodeName = "some-node"
  1070  		node     = &corev1.Node{
  1071  			ObjectMeta: metav1.ObjectMeta{
  1072  				Name:            nodeName,
  1073  				ResourceVersion: "0",
  1074  			},
  1075  			Status: corev1.NodeStatus{
  1076  				Phase: "init",
  1077  			},
  1078  		}
  1079  		nodeResource   resource.Resource[*corev1.Node]
  1080  		fakeClient, cs = k8sClient.NewFakeClientset()
  1081  	)
  1082  
  1083  	fakeClient.KubernetesFakeClientset.Tracker().Create(
  1084  		corev1.SchemeGroupVersion.WithResource("nodes"),
  1085  		node.DeepCopy(), "")
  1086  
  1087  	hive := hive.New(
  1088  		cell.Provide(func() k8sClient.Clientset { return cs }),
  1089  		cell.Provide(
  1090  			func(lc cell.Lifecycle, cs k8sClient.Clientset) resource.Resource[*corev1.Node] {
  1091  				lw := utils.ListerWatcherFromTyped[*corev1.NodeList](cs.CoreV1().Nodes())
  1092  				return resource.New[*corev1.Node](
  1093  					lc, lw,
  1094  					resource.WithStoppableInformer(),
  1095  				)
  1096  			},
  1097  		),
  1098  
  1099  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
  1100  			nodeResource = r
  1101  		}),
  1102  	)
  1103  
  1104  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
  1105  	defer cancel()
  1106  
  1107  	tlog := hivetest.Logger(t)
  1108  	assert.NoError(t, hive.Start(tlog, ctx))
  1109  
  1110  	subCtx, subCancel := context.WithCancel(ctx)
  1111  	subCancel()
  1112  
  1113  	// Store should return context.Canceled and should release the resource
  1114  	_, err := nodeResource.Store(subCtx)
  1115  	assert.ErrorIs(t, err, context.Canceled)
  1116  
  1117  	// resource should be able to start again after the first call to Store has been canceled
  1118  	store, err := nodeResource.Store(ctx)
  1119  	assert.NoError(t, err)
  1120  
  1121  	// store should be synced
  1122  	testStore(t, node, store)
  1123  
  1124  	// release the store and wait some time for the underlying informer to stop
  1125  	store.Release()
  1126  	time.Sleep(20 * time.Millisecond)
  1127  
  1128  	// the store reference is still valid
  1129  	testStore(t, node, store)
  1130  
  1131  	// add a new node and wait some time to be sure the update is visible if there is an active subscriber
  1132  	node = &corev1.Node{
  1133  		ObjectMeta: metav1.ObjectMeta{
  1134  			Name:            "another-node",
  1135  			ResourceVersion: "0",
  1136  		},
  1137  	}
  1138  	fakeClient.KubernetesFakeClientset.Tracker().Create(
  1139  		corev1.SchemeGroupVersion.WithResource("nodes"),
  1140  		node.DeepCopy(), "")
  1141  	time.Sleep(20 * time.Millisecond)
  1142  
  1143  	// the store won't see any change, since the informer should have been stopped
  1144  	// (first call to Store was canceled and the second reference has been explicitly released)
  1145  	assert.Len(t, store.List(), 1)
  1146  
  1147  	assert.NoError(t, hive.Stop(tlog, ctx))
  1148  }
  1149  
  1150  func subscribe(ctx context.Context, wg *sync.WaitGroup, nodes resource.Resource[*corev1.Node]) <-chan struct{} {
  1151  	subscribed := make(chan struct{})
  1152  
  1153  	wg.Add(1)
  1154  	go func() {
  1155  		defer wg.Done()
  1156  
  1157  		events := nodes.Events(ctx)
  1158  		close(subscribed)
  1159  
  1160  		for ev := range events {
  1161  			ev.Done(nil)
  1162  		}
  1163  	}()
  1164  
  1165  	return subscribed
  1166  }
  1167  
  1168  //
  1169  // Benchmarks
  1170  //
  1171  
  1172  type benchmarkListerWatcher struct {
  1173  	events chan watch.Event
  1174  }
  1175  
  1176  func (lw *benchmarkListerWatcher) List(opts metav1.ListOptions) (k8sRuntime.Object, error) {
  1177  	return &corev1.NodeList{}, nil
  1178  }
  1179  func (lw *benchmarkListerWatcher) Watch(opts metav1.ListOptions) (watch.Interface, error) {
  1180  	return lw, nil
  1181  }
  1182  func (lw *benchmarkListerWatcher) Stop() {
  1183  }
  1184  func (lw *benchmarkListerWatcher) ResultChan() <-chan watch.Event {
  1185  	return lw.events
  1186  }
  1187  
  1188  func BenchmarkResource(b *testing.B) {
  1189  	var (
  1190  		nodes resource.Resource[*corev1.Node]
  1191  		lw    = &benchmarkListerWatcher{
  1192  			events: make(chan watch.Event, 128),
  1193  		}
  1194  	)
  1195  
  1196  	hive := hive.New(
  1197  		cell.Provide(func(lc cell.Lifecycle) resource.Resource[*corev1.Node] {
  1198  			return resource.New[*corev1.Node](lc, lw)
  1199  		}),
  1200  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
  1201  			nodes = r
  1202  		}))
  1203  
  1204  	tlog := hivetest.Logger(b)
  1205  	err := hive.Start(tlog, context.TODO())
  1206  	assert.NoError(b, err)
  1207  
  1208  	ctx, cancel := context.WithCancel(context.Background())
  1209  	events := nodes.Events(ctx)
  1210  
  1211  	ev := <-events
  1212  	assert.Equal(b, resource.Sync, ev.Kind)
  1213  	ev.Done(nil)
  1214  
  1215  	b.ResetTimer()
  1216  
  1217  	var wg sync.WaitGroup
  1218  
  1219  	// Feed in b.N nodes as watcher events
  1220  	wg.Add(1)
  1221  	go func() {
  1222  		for i := 0; i < b.N; i++ {
  1223  			name := fmt.Sprintf("node-%d", i)
  1224  			lw.events <- watch.Event{Type: watch.Added, Object: &corev1.Node{
  1225  				ObjectMeta: metav1.ObjectMeta{
  1226  					Name: name,
  1227  					UID:  types.UID(name),
  1228  				},
  1229  			}}
  1230  		}
  1231  		wg.Done()
  1232  	}()
  1233  
  1234  	// Consume the events via the resource
  1235  	for i := 0; i < b.N; i++ {
  1236  		ev, ok := <-events
  1237  		assert.True(b, ok)
  1238  		assert.Equal(b, resource.Upsert, ev.Kind)
  1239  		ev.Done(nil)
  1240  	}
  1241  
  1242  	cancel()
  1243  	for ev := range events {
  1244  		ev.Done(nil)
  1245  	}
  1246  
  1247  	err = hive.Stop(tlog, context.TODO())
  1248  	assert.NoError(b, err)
  1249  
  1250  	wg.Wait()
  1251  }
  1252  
  1253  func TestResource_SkippedDonePanics(t *testing.T) {
  1254  	t.Skip("This test can be only done manually as it tests finalizer panicing")
  1255  
  1256  	var (
  1257  		node = &corev1.Node{
  1258  			ObjectMeta: metav1.ObjectMeta{
  1259  				Name:            "some-node",
  1260  				ResourceVersion: "0",
  1261  			},
  1262  			Status: corev1.NodeStatus{
  1263  				Phase: "init",
  1264  			},
  1265  		}
  1266  		nodes          resource.Resource[*corev1.Node]
  1267  		fakeClient, cs = k8sClient.NewFakeClientset()
  1268  		events         <-chan resource.Event[*corev1.Node]
  1269  	)
  1270  
  1271  	// Create the initial version of the node. Do this before anything
  1272  	// starts watching the resources to avoid a race.
  1273  	fakeClient.KubernetesFakeClientset.Tracker().Create(
  1274  		corev1.SchemeGroupVersion.WithResource("nodes"),
  1275  		node.DeepCopy(), "")
  1276  
  1277  	ctx, cancel := context.WithTimeout(context.Background(), testTimeout)
  1278  	defer cancel()
  1279  
  1280  	hive := hive.New(
  1281  		cell.Provide(func() k8sClient.Clientset { return cs }),
  1282  		nodesResource,
  1283  		cell.Invoke(func(r resource.Resource[*corev1.Node]) {
  1284  			nodes = r
  1285  
  1286  			// Subscribe prior to starting as it's allowed. Sync event
  1287  			// for early subscribers will be emitted when informer has
  1288  			// synchronized.
  1289  			events = nodes.Events(ctx)
  1290  		}))
  1291  
  1292  	tlog := hivetest.Logger(t)
  1293  	if err := hive.Start(tlog, ctx); err != nil {
  1294  		t.Fatalf("hive.Start failed: %s", err)
  1295  	}
  1296  
  1297  	// First event should be the node (initial set)
  1298  	ev := <-events
  1299  	assert.Equal(t, resource.Upsert, ev.Kind)
  1300  	// Skipping the Done() call:
  1301  	// ev.Done(nil)
  1302  
  1303  	// Finalizer will now panic.
  1304  	<-events
  1305  }
  1306  
  1307  //
  1308  // Helpers
  1309  //
  1310  
  1311  var nodesResource = cell.Provide(
  1312  	func(lc cell.Lifecycle, c k8sClient.Clientset) resource.Resource[*corev1.Node] {
  1313  		lw := utils.ListerWatcherFromTyped[*corev1.NodeList](c.CoreV1().Nodes())
  1314  		return resource.New[*corev1.Node](lc, lw)
  1315  	},
  1316  )