k8s.io/apiserver@v0.31.1/pkg/storage/cacher/cache_watcher_test.go (about)

     1  /*
     2  Copyright 2023 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cacher
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"reflect"
    23  	"sync"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/google/go-cmp/cmp"
    28  	v1 "k8s.io/api/core/v1"
    29  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    30  	"k8s.io/apimachinery/pkg/fields"
    31  	"k8s.io/apimachinery/pkg/labels"
    32  	"k8s.io/apimachinery/pkg/runtime"
    33  	"k8s.io/apimachinery/pkg/runtime/schema"
    34  	"k8s.io/apimachinery/pkg/util/wait"
    35  	"k8s.io/apimachinery/pkg/watch"
    36  	"k8s.io/apiserver/pkg/storage"
    37  	utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
    38  	"k8s.io/client-go/tools/cache"
    39  	testingclock "k8s.io/utils/clock/testing"
    40  )
    41  
    42  // verifies the cacheWatcher.process goroutine is properly cleaned up even if
    43  // the writes to cacheWatcher.result channel is blocked.
    44  func TestCacheWatcherCleanupNotBlockedByResult(t *testing.T) {
    45  	var lock sync.RWMutex
    46  	var w *cacheWatcher
    47  	count := 0
    48  	filter := func(string, labels.Set, fields.Set) bool { return true }
    49  	forget := func(drainWatcher bool) {
    50  		lock.Lock()
    51  		defer lock.Unlock()
    52  		count++
    53  		// forget() has to stop the watcher, as only stopping the watcher
    54  		// triggers stopping the process() goroutine which we are in the
    55  		// end waiting for in this test.
    56  		w.setDrainInputBufferLocked(drainWatcher)
    57  		w.stopLocked()
    58  	}
    59  	initEvents := []*watchCacheEvent{
    60  		{Object: &v1.Pod{}},
    61  		{Object: &v1.Pod{}},
    62  	}
    63  	// set the size of the buffer of w.result to 0, so that the writes to
    64  	// w.result is blocked.
    65  	w = newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, time.Now(), false, schema.GroupResource{Resource: "pods"}, "")
    66  	go w.processInterval(context.Background(), intervalFromEvents(initEvents), 0)
    67  	w.Stop()
    68  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
    69  		lock.RLock()
    70  		defer lock.RUnlock()
    71  		return count == 2, nil
    72  	}); err != nil {
    73  		t.Fatalf("expected forget() to be called twice, because sendWatchCacheEvent should not be blocked by the result channel: %v", err)
    74  	}
    75  }
    76  
    77  func TestCacheWatcherHandlesFiltering(t *testing.T) {
    78  	filter := func(_ string, _ labels.Set, field fields.Set) bool {
    79  		return field["spec.nodeName"] == "host"
    80  	}
    81  	forget := func(bool) {}
    82  
    83  	testCases := []struct {
    84  		events   []*watchCacheEvent
    85  		expected []watch.Event
    86  	}{
    87  		// properly handle starting with the filter, then being deleted, then re-added
    88  		{
    89  			events: []*watchCacheEvent{
    90  				{
    91  					Type:            watch.Added,
    92  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}},
    93  					ObjFields:       fields.Set{"spec.nodeName": "host"},
    94  					ResourceVersion: 1,
    95  				},
    96  				{
    97  					Type:            watch.Modified,
    98  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}},
    99  					PrevObjFields:   fields.Set{"spec.nodeName": "host"},
   100  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}},
   101  					ObjFields:       fields.Set{"spec.nodeName": ""},
   102  					ResourceVersion: 2,
   103  				},
   104  				{
   105  					Type:            watch.Modified,
   106  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}},
   107  					PrevObjFields:   fields.Set{"spec.nodeName": ""},
   108  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}},
   109  					ObjFields:       fields.Set{"spec.nodeName": "host"},
   110  					ResourceVersion: 3,
   111  				},
   112  			},
   113  			expected: []watch.Event{
   114  				{Type: watch.Added, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}}},
   115  				{Type: watch.Deleted, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}}},
   116  				{Type: watch.Added, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}}},
   117  			},
   118  		},
   119  		// properly handle ignoring changes prior to the filter, then getting added, then deleted
   120  		{
   121  			events: []*watchCacheEvent{
   122  				{
   123  					Type:            watch.Added,
   124  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}},
   125  					ObjFields:       fields.Set{"spec.nodeName": ""},
   126  					ResourceVersion: 1,
   127  				},
   128  				{
   129  					Type:            watch.Modified,
   130  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}},
   131  					PrevObjFields:   fields.Set{"spec.nodeName": ""},
   132  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}},
   133  					ObjFields:       fields.Set{"spec.nodeName": ""},
   134  					ResourceVersion: 2,
   135  				},
   136  				{
   137  					Type:            watch.Modified,
   138  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}},
   139  					PrevObjFields:   fields.Set{"spec.nodeName": ""},
   140  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}},
   141  					ObjFields:       fields.Set{"spec.nodeName": "host"},
   142  					ResourceVersion: 3,
   143  				},
   144  				{
   145  					Type:            watch.Modified,
   146  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}},
   147  					PrevObjFields:   fields.Set{"spec.nodeName": "host"},
   148  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "4"}},
   149  					ObjFields:       fields.Set{"spec.nodeName": "host"},
   150  					ResourceVersion: 4,
   151  				},
   152  				{
   153  					Type:            watch.Modified,
   154  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "4"}},
   155  					PrevObjFields:   fields.Set{"spec.nodeName": "host"},
   156  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "5"}},
   157  					ObjFields:       fields.Set{"spec.nodeName": ""},
   158  					ResourceVersion: 5,
   159  				},
   160  				{
   161  					Type:            watch.Modified,
   162  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "5"}},
   163  					PrevObjFields:   fields.Set{"spec.nodeName": ""},
   164  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "6"}},
   165  					ObjFields:       fields.Set{"spec.nodeName": ""},
   166  					ResourceVersion: 6,
   167  				},
   168  			},
   169  			expected: []watch.Event{
   170  				{Type: watch.Added, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}}},
   171  				{Type: watch.Modified, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "4"}}},
   172  				{Type: watch.Deleted, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "5"}}},
   173  			},
   174  		},
   175  	}
   176  
   177  TestCase:
   178  	for i, testCase := range testCases {
   179  		// set the size of the buffer of w.result to 0, so that the writes to
   180  		// w.result is blocked.
   181  		for j := range testCase.events {
   182  			testCase.events[j].ResourceVersion = uint64(j) + 1
   183  		}
   184  
   185  		w := newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, time.Now(), false, schema.GroupResource{Resource: "pods"}, "")
   186  		go w.processInterval(context.Background(), intervalFromEvents(testCase.events), 0)
   187  
   188  		ch := w.ResultChan()
   189  		for j, event := range testCase.expected {
   190  			e := <-ch
   191  			if !reflect.DeepEqual(event, e) {
   192  				t.Errorf("%d: unexpected event %d: %s", i, j, cmp.Diff(event, e))
   193  				break TestCase
   194  			}
   195  		}
   196  		select {
   197  		case obj, ok := <-ch:
   198  			t.Errorf("%d: unexpected excess event: %#v %t", i, obj, ok)
   199  			break TestCase
   200  		default:
   201  		}
   202  		w.setDrainInputBufferLocked(false)
   203  		w.stopLocked()
   204  	}
   205  }
   206  
   207  func TestCacheWatcherStoppedInAnotherGoroutine(t *testing.T) {
   208  	var w *cacheWatcher
   209  	done := make(chan struct{})
   210  	filter := func(string, labels.Set, fields.Set) bool { return true }
   211  	forget := func(drainWatcher bool) {
   212  		w.setDrainInputBufferLocked(drainWatcher)
   213  		w.stopLocked()
   214  		done <- struct{}{}
   215  	}
   216  
   217  	maxRetriesToProduceTheRaceCondition := 1000
   218  	// Simulating the timer is fired and stopped concurrently by set time
   219  	// timeout to zero and run the Stop goroutine concurrently.
   220  	// May sure that the watch will not be blocked on Stop.
   221  	for i := 0; i < maxRetriesToProduceTheRaceCondition; i++ {
   222  		w = newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, time.Now(), false, schema.GroupResource{Resource: "pods"}, "")
   223  		go w.Stop()
   224  		select {
   225  		case <-done:
   226  		case <-time.After(time.Second):
   227  			t.Fatal("stop is blocked when the timer is fired concurrently")
   228  		}
   229  	}
   230  
   231  	deadline := time.Now().Add(time.Hour)
   232  	// After that, verifies the cacheWatcher.process goroutine works correctly.
   233  	for i := 0; i < maxRetriesToProduceTheRaceCondition; i++ {
   234  		w = newCacheWatcher(2, filter, emptyFunc, storage.APIObjectVersioner{}, deadline, false, schema.GroupResource{Resource: "pods"}, "")
   235  		w.input <- &watchCacheEvent{Object: &v1.Pod{}, ResourceVersion: uint64(i + 1)}
   236  		ctx, cancel := context.WithDeadline(context.Background(), deadline)
   237  		defer cancel()
   238  		go w.processInterval(ctx, intervalFromEvents(nil), 0)
   239  		select {
   240  		case <-w.ResultChan():
   241  		case <-time.After(time.Second):
   242  			t.Fatal("expected received a event on ResultChan")
   243  		}
   244  		w.setDrainInputBufferLocked(false)
   245  		w.stopLocked()
   246  	}
   247  }
   248  
   249  func TestCacheWatcherStoppedOnDestroy(t *testing.T) {
   250  	backingStorage := &dummyStorage{}
   251  	cacher, _, err := newTestCacher(backingStorage)
   252  	if err != nil {
   253  		t.Fatalf("Couldn't create cacher: %v", err)
   254  	}
   255  	defer cacher.Stop()
   256  
   257  	// Wait until cacher is initialized.
   258  	if err := cacher.ready.wait(context.Background()); err != nil {
   259  		t.Fatalf("unexpected error waiting for the cache to be ready")
   260  	}
   261  
   262  	w, err := cacher.Watch(context.Background(), "pods/ns", storage.ListOptions{ResourceVersion: "0", Predicate: storage.Everything})
   263  	if err != nil {
   264  		t.Fatalf("Failed to create watch: %v", err)
   265  	}
   266  
   267  	watchClosed := make(chan struct{})
   268  	go func() {
   269  		defer close(watchClosed)
   270  		for event := range w.ResultChan() {
   271  			switch event.Type {
   272  			case watch.Added, watch.Modified, watch.Deleted:
   273  				// ok
   274  			default:
   275  				t.Errorf("unexpected event %#v", event)
   276  			}
   277  		}
   278  	}()
   279  
   280  	cacher.Stop()
   281  
   282  	select {
   283  	case <-watchClosed:
   284  	case <-time.After(wait.ForeverTestTimeout):
   285  		t.Errorf("timed out waiting for watch to close")
   286  	}
   287  
   288  }
   289  
   290  func TestResourceVersionAfterInitEvents(t *testing.T) {
   291  	getAttrsFunc := func(obj runtime.Object) (labels.Set, fields.Set, error) {
   292  		return nil, nil, nil
   293  	}
   294  
   295  	const numObjects = 10
   296  	store := cache.NewIndexer(storeElementKey, storeElementIndexers(nil))
   297  
   298  	for i := 0; i < numObjects; i++ {
   299  		elem := makeTestStoreElement(makeTestPod(fmt.Sprintf("pod-%d", i), uint64(i)))
   300  		store.Add(elem)
   301  	}
   302  
   303  	wci, err := newCacheIntervalFromStore(numObjects, store, getAttrsFunc, "", false)
   304  	if err != nil {
   305  		t.Fatal(err)
   306  	}
   307  
   308  	filter := func(_ string, _ labels.Set, _ fields.Set) bool { return true }
   309  	forget := func(_ bool) {}
   310  	deadline := time.Now().Add(time.Minute)
   311  	w := newCacheWatcher(numObjects+1, filter, forget, storage.APIObjectVersioner{}, deadline, true, schema.GroupResource{Resource: "pods"}, "")
   312  
   313  	// Simulate a situation when the last event will that was already in
   314  	// the state, wasn't yet processed by cacher and will be delivered
   315  	// via channel again.
   316  	event := &watchCacheEvent{
   317  		Type:            watch.Added,
   318  		Object:          makeTestPod(fmt.Sprintf("pod-%d", numObjects-1), uint64(numObjects-1)),
   319  		ResourceVersion: uint64(numObjects - 1),
   320  	}
   321  	if !w.add(event, time.NewTimer(time.Second)) {
   322  		t.Fatalf("failed to add event")
   323  	}
   324  	w.stopLocked()
   325  
   326  	wg := sync.WaitGroup{}
   327  	wg.Add(1)
   328  	go func() {
   329  		defer wg.Done()
   330  		w.processInterval(context.Background(), wci, uint64(numObjects-1))
   331  	}()
   332  
   333  	// We expect all init events to be delivered.
   334  	for i := 0; i < numObjects; i++ {
   335  		<-w.ResultChan()
   336  	}
   337  	// We don't expect any other event to be delivered and thus
   338  	// the ResultChan to be closed.
   339  	result, ok := <-w.ResultChan()
   340  	if ok {
   341  		t.Errorf("unexpected event: %#v", result)
   342  	}
   343  
   344  	wg.Wait()
   345  }
   346  
   347  func TestTimeBucketWatchersBasic(t *testing.T) {
   348  	filter := func(_ string, _ labels.Set, _ fields.Set) bool {
   349  		return true
   350  	}
   351  	forget := func(bool) {}
   352  
   353  	newWatcher := func(deadline time.Time) *cacheWatcher {
   354  		w := newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, deadline, true, schema.GroupResource{Resource: "pods"}, "")
   355  		w.setBookmarkAfterResourceVersion(0)
   356  		return w
   357  	}
   358  
   359  	clock := testingclock.NewFakeClock(time.Now())
   360  	watchers := newTimeBucketWatchers(clock, defaultBookmarkFrequency)
   361  	now := clock.Now()
   362  	watchers.addWatcherThreadUnsafe(newWatcher(now.Add(10 * time.Second)))
   363  	watchers.addWatcherThreadUnsafe(newWatcher(now.Add(20 * time.Second)))
   364  	watchers.addWatcherThreadUnsafe(newWatcher(now.Add(20 * time.Second)))
   365  
   366  	if len(watchers.watchersBuckets) != 2 {
   367  		t.Errorf("unexpected bucket size: %#v", watchers.watchersBuckets)
   368  	}
   369  	watchers0 := watchers.popExpiredWatchersThreadUnsafe()
   370  	if len(watchers0) != 0 {
   371  		t.Errorf("unexpected bucket size: %#v", watchers0)
   372  	}
   373  
   374  	clock.Step(10 * time.Second)
   375  	watchers1 := watchers.popExpiredWatchersThreadUnsafe()
   376  	if len(watchers1) != 1 || len(watchers1[0]) != 1 {
   377  		t.Errorf("unexpected bucket size: %v", watchers1)
   378  	}
   379  	watchers1 = watchers.popExpiredWatchersThreadUnsafe()
   380  	if len(watchers1) != 0 {
   381  		t.Errorf("unexpected bucket size: %#v", watchers1)
   382  	}
   383  
   384  	clock.Step(12 * time.Second)
   385  	watchers2 := watchers.popExpiredWatchersThreadUnsafe()
   386  	if len(watchers2) != 1 || len(watchers2[0]) != 2 {
   387  		t.Errorf("unexpected bucket size: %#v", watchers2)
   388  	}
   389  }
   390  
   391  func makeWatchCacheEvent(rv uint64) *watchCacheEvent {
   392  	return &watchCacheEvent{
   393  		Type: watch.Added,
   394  		Object: &v1.Pod{
   395  			ObjectMeta: metav1.ObjectMeta{
   396  				Name:            fmt.Sprintf("pod-%d", rv),
   397  				ResourceVersion: fmt.Sprintf("%d", rv),
   398  			},
   399  		},
   400  		ResourceVersion: rv,
   401  	}
   402  }
   403  
   404  // TestCacheWatcherDraining verifies the cacheWatcher.process goroutine is properly cleaned up when draining was requested
   405  func TestCacheWatcherDraining(t *testing.T) {
   406  	var lock sync.RWMutex
   407  	var w *cacheWatcher
   408  	count := 0
   409  	filter := func(string, labels.Set, fields.Set) bool { return true }
   410  	forget := func(drainWatcher bool) {
   411  		lock.Lock()
   412  		defer lock.Unlock()
   413  		count++
   414  		w.setDrainInputBufferLocked(drainWatcher)
   415  		w.stopLocked()
   416  	}
   417  	initEvents := []*watchCacheEvent{
   418  		makeWatchCacheEvent(5),
   419  		makeWatchCacheEvent(6),
   420  	}
   421  	w = newCacheWatcher(1, filter, forget, storage.APIObjectVersioner{}, time.Now(), true, schema.GroupResource{Resource: "pods"}, "")
   422  	go w.processInterval(context.Background(), intervalFromEvents(initEvents), 1)
   423  	if !w.add(makeWatchCacheEvent(7), time.NewTimer(1*time.Second)) {
   424  		t.Fatal("failed adding an even to the watcher")
   425  	}
   426  	forget(true) // drain the watcher
   427  
   428  	eventCount := 0
   429  	for range w.ResultChan() {
   430  		eventCount++
   431  	}
   432  	if eventCount != 3 {
   433  		t.Errorf("Unexpected number of objects received: %d, expected: 3", eventCount)
   434  	}
   435  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
   436  		lock.RLock()
   437  		defer lock.RUnlock()
   438  		return count == 2, nil
   439  	}); err != nil {
   440  		t.Fatalf("expected forget() to be called twice, because processInterval should call Stop(): %v", err)
   441  	}
   442  }
   443  
   444  // TestCacheWatcherDrainingRequestedButNotDrained verifies the cacheWatcher.process goroutine is properly cleaned up when draining was requested
   445  // but the client never actually get any data
   446  func TestCacheWatcherDrainingRequestedButNotDrained(t *testing.T) {
   447  	var lock sync.RWMutex
   448  	var w *cacheWatcher
   449  	count := 0
   450  	filter := func(string, labels.Set, fields.Set) bool { return true }
   451  	forget := func(drainWatcher bool) {
   452  		lock.Lock()
   453  		defer lock.Unlock()
   454  		count++
   455  		w.setDrainInputBufferLocked(drainWatcher)
   456  		w.stopLocked()
   457  	}
   458  	initEvents := []*watchCacheEvent{
   459  		makeWatchCacheEvent(5),
   460  		makeWatchCacheEvent(6),
   461  	}
   462  	w = newCacheWatcher(1, filter, forget, storage.APIObjectVersioner{}, time.Now(), true, schema.GroupResource{Resource: "pods"}, "")
   463  	go w.processInterval(context.Background(), intervalFromEvents(initEvents), 1)
   464  	if !w.add(makeWatchCacheEvent(7), time.NewTimer(1*time.Second)) {
   465  		t.Fatal("failed adding an even to the watcher")
   466  	}
   467  	forget(true) // drain the watcher
   468  	w.Stop()     // client disconnected, timeout expired or ctx was actually closed
   469  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
   470  		lock.RLock()
   471  		defer lock.RUnlock()
   472  		return count == 3, nil
   473  	}); err != nil {
   474  		t.Fatalf("expected forget() to be called three times, because processInterval should call Stop(): %v", err)
   475  	}
   476  }
   477  
   478  // TestCacheWatcherDrainingNoBookmarkAfterResourceVersionReceived verifies if the watcher will be stopped
   479  // when adding an item times out and the bookmarkAfterResourceVersion hasn't been received
   480  func TestCacheWatcherDrainingNoBookmarkAfterResourceVersionReceived(t *testing.T) {
   481  	var lock sync.RWMutex
   482  	var w *cacheWatcher
   483  	count := 0
   484  	filter := func(string, labels.Set, fields.Set) bool { return true }
   485  	forget := func(drainWatcher bool) {
   486  		lock.Lock()
   487  		defer lock.Unlock()
   488  		if drainWatcher {
   489  			t.Fatalf("didn't expect drainWatcher to be set to true")
   490  		}
   491  		count++
   492  		w.setDrainInputBufferLocked(drainWatcher)
   493  		w.stopLocked()
   494  	}
   495  	initEvents := []*watchCacheEvent{
   496  		{Object: &v1.Pod{}},
   497  		{Object: &v1.Pod{}},
   498  	}
   499  	w = newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, time.Now(), true, schema.GroupResource{Resource: "pods"}, "")
   500  	w.setBookmarkAfterResourceVersion(10)
   501  	go w.processInterval(context.Background(), intervalFromEvents(initEvents), 0)
   502  
   503  	// get an event so that
   504  	// we know the w.processInterval
   505  	// has been scheduled, and
   506  	// it will be blocked on
   507  	// sending the other event
   508  	// to the result chan
   509  	<-w.ResultChan()
   510  
   511  	// now, once we know, the processInterval
   512  	// is waiting add another event that will time out
   513  	// and start the cleanup process
   514  	if w.add(&watchCacheEvent{Object: &v1.Pod{}}, time.NewTimer(10*time.Millisecond)) {
   515  		t.Fatal("expected the add method to fail")
   516  	}
   517  	if err := wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(_ context.Context) (bool, error) {
   518  		lock.RLock()
   519  		defer lock.RUnlock()
   520  		return count == 2, nil
   521  	}); err != nil {
   522  		t.Fatalf("expected forget() to be called twice, first call from w.add() and then from w.Stop() called from w.processInterval(): %v", err)
   523  	}
   524  
   525  	if !w.stopped {
   526  		t.Fatal("expected the watcher to be stopped but it wasn't")
   527  	}
   528  }
   529  
   530  // TestCacheWatcherDrainingNoBookmarkAfterResourceVersionSent checks if the watcher's input
   531  // channel is drained if the bookmarkAfterResourceVersion was received but not sent
   532  func TestCacheWatcherDrainingNoBookmarkAfterResourceVersionSent(t *testing.T) {
   533  	makePod := func(rv uint64) *v1.Pod {
   534  		return &v1.Pod{
   535  			ObjectMeta: metav1.ObjectMeta{
   536  				Name:            fmt.Sprintf("pod-%d", rv),
   537  				Namespace:       "ns",
   538  				ResourceVersion: fmt.Sprintf("%d", rv),
   539  				Annotations:     map[string]string{},
   540  			},
   541  		}
   542  	}
   543  	var lock sync.RWMutex
   544  	var w *cacheWatcher
   545  	watchInitializationSignal := utilflowcontrol.NewInitializationSignal()
   546  	ctx := utilflowcontrol.WithInitializationSignal(context.Background(), watchInitializationSignal)
   547  	count := 0
   548  	filter := func(string, labels.Set, fields.Set) bool { return true }
   549  	forget := func(drainWatcher bool) {
   550  		lock.Lock()
   551  		defer lock.Unlock()
   552  		count++
   553  		w.setDrainInputBufferLocked(drainWatcher)
   554  		w.stopLocked()
   555  	}
   556  	initEvents := []*watchCacheEvent{{Object: makePod(1)}, {Object: makePod(2)}}
   557  	w = newCacheWatcher(2, filter, forget, storage.APIObjectVersioner{}, time.Now(), true, schema.GroupResource{Resource: "pods"}, "")
   558  	w.setBookmarkAfterResourceVersion(10)
   559  	go w.processInterval(ctx, intervalFromEvents(initEvents), 0)
   560  	watchInitializationSignal.Wait()
   561  
   562  	// note that we can add three events even though the chanSize is two because
   563  	// one event has been popped off from the input chan
   564  	if !w.add(&watchCacheEvent{Object: makePod(5), ResourceVersion: 5}, time.NewTimer(1*time.Second)) {
   565  		t.Fatal("failed adding an even to the watcher")
   566  	}
   567  	if !w.nonblockingAdd(&watchCacheEvent{Type: watch.Bookmark, ResourceVersion: 10, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "10"}}}) {
   568  		t.Fatal("failed adding an even to the watcher")
   569  	}
   570  	if !w.add(&watchCacheEvent{Object: makePod(15), ResourceVersion: 15}, time.NewTimer(1*time.Second)) {
   571  		t.Fatal("failed adding an even to the watcher")
   572  	}
   573  	if w.add(&watchCacheEvent{Object: makePod(20), ResourceVersion: 20}, time.NewTimer(1*time.Second)) {
   574  		t.Fatal("expected the add method to fail")
   575  	}
   576  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
   577  		lock.RLock()
   578  		defer lock.RUnlock()
   579  		return count == 1, nil
   580  	}); err != nil {
   581  		t.Fatalf("expected forget() to be called once, just from the w.add() method: %v", err)
   582  	}
   583  
   584  	if !w.stopped {
   585  		t.Fatal("expected the watcher to be stopped but it wasn't")
   586  	}
   587  	verifyEvents(t, w, []watch.Event{
   588  		{Type: watch.Added, Object: makePod(1)},
   589  		{Type: watch.Added, Object: makePod(2)},
   590  		{Type: watch.Added, Object: makePod(5)},
   591  		{Type: watch.Bookmark, Object: &v1.Pod{
   592  			ObjectMeta: metav1.ObjectMeta{
   593  				ResourceVersion: "10",
   594  				Annotations:     map[string]string{metav1.InitialEventsAnnotationKey: "true"},
   595  			},
   596  		}},
   597  		{Type: watch.Added, Object: makePod(15)},
   598  	}, true)
   599  
   600  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
   601  		lock.RLock()
   602  		defer lock.RUnlock()
   603  		return count == 2, nil
   604  	}); err != nil {
   605  		t.Fatalf("expected forget() to be called twice, the second call is from w.Stop() method called from  w.processInterval(): %v", err)
   606  	}
   607  }
   608  
   609  func TestBookmarkAfterResourceVersionWatchers(t *testing.T) {
   610  	newWatcher := func(id string, deadline time.Time) *cacheWatcher {
   611  		w := newCacheWatcher(0, func(_ string, _ labels.Set, _ fields.Set) bool { return true }, func(bool) {}, storage.APIObjectVersioner{}, deadline, true, schema.GroupResource{Resource: "pods"}, id)
   612  		w.setBookmarkAfterResourceVersion(10)
   613  		return w
   614  	}
   615  
   616  	clock := testingclock.NewFakeClock(time.Now())
   617  	target := newTimeBucketWatchers(clock, defaultBookmarkFrequency)
   618  	if !target.addWatcherThreadUnsafe(newWatcher("1", clock.Now().Add(2*time.Minute))) {
   619  		t.Fatal("failed adding an even to the watcher")
   620  	}
   621  
   622  	// the watcher is immediately expired (it's waiting for bookmark, so it is scheduled immediately)
   623  	ret := target.popExpiredWatchersThreadUnsafe()
   624  	if len(ret) != 1 || len(ret[0]) != 1 {
   625  		t.Fatalf("expected only one watcher to be expired")
   626  	}
   627  	if !target.addWatcherThreadUnsafe(ret[0][0]) {
   628  		t.Fatal("failed adding an even to the watcher")
   629  	}
   630  
   631  	// after one second time the watcher is still expired
   632  	clock.Step(1 * time.Second)
   633  	ret = target.popExpiredWatchersThreadUnsafe()
   634  	if len(ret) != 1 || len(ret[0]) != 1 {
   635  		t.Fatalf("expected only one watcher to be expired")
   636  	}
   637  	if !target.addWatcherThreadUnsafe(ret[0][0]) {
   638  		t.Fatal("failed adding an even to the watcher")
   639  	}
   640  
   641  	// after 29 seconds the watcher is still expired
   642  	clock.Step(29 * time.Second)
   643  	ret = target.popExpiredWatchersThreadUnsafe()
   644  	if len(ret) != 1 || len(ret[0]) != 1 {
   645  		t.Fatalf("expected only one watcher to be expired")
   646  	}
   647  
   648  	// after confirming the watcher is not expired immediately
   649  	ret[0][0].markBookmarkAfterRvAsReceived(&watchCacheEvent{Type: watch.Bookmark, ResourceVersion: 10, Object: &v1.Pod{}})
   650  	if !target.addWatcherThreadUnsafe(ret[0][0]) {
   651  		t.Fatal("failed adding an even to the watcher")
   652  	}
   653  	clock.Step(30 * time.Second)
   654  	ret = target.popExpiredWatchersThreadUnsafe()
   655  	if len(ret) != 0 {
   656  		t.Fatalf("didn't expect any watchers to be expired")
   657  	}
   658  
   659  	clock.Step(30 * time.Second)
   660  	ret = target.popExpiredWatchersThreadUnsafe()
   661  	if len(ret) != 1 || len(ret[0]) != 1 {
   662  		t.Fatalf("expected only one watcher to be expired")
   663  	}
   664  }