k8s.io/apiserver@v0.29.3/pkg/storage/cacher/cache_watcher_test.go (about)

     1  /*
     2  Copyright 2023 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cacher
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"reflect"
    23  	"sync"
    24  	"testing"
    25  	"time"
    26  
    27  	"github.com/google/go-cmp/cmp"
    28  	v1 "k8s.io/api/core/v1"
    29  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    30  	"k8s.io/apimachinery/pkg/fields"
    31  	"k8s.io/apimachinery/pkg/labels"
    32  	"k8s.io/apimachinery/pkg/runtime"
    33  	"k8s.io/apimachinery/pkg/runtime/schema"
    34  	"k8s.io/apimachinery/pkg/util/wait"
    35  	"k8s.io/apimachinery/pkg/watch"
    36  	"k8s.io/apiserver/pkg/storage"
    37  	utilflowcontrol "k8s.io/apiserver/pkg/util/flowcontrol"
    38  	"k8s.io/client-go/tools/cache"
    39  	testingclock "k8s.io/utils/clock/testing"
    40  )
    41  
    42  // verifies the cacheWatcher.process goroutine is properly cleaned up even if
    43  // the writes to cacheWatcher.result channel is blocked.
    44  func TestCacheWatcherCleanupNotBlockedByResult(t *testing.T) {
    45  	var lock sync.RWMutex
    46  	var w *cacheWatcher
    47  	count := 0
    48  	filter := func(string, labels.Set, fields.Set) bool { return true }
    49  	forget := func(drainWatcher bool) {
    50  		lock.Lock()
    51  		defer lock.Unlock()
    52  		count++
    53  		// forget() has to stop the watcher, as only stopping the watcher
    54  		// triggers stopping the process() goroutine which we are in the
    55  		// end waiting for in this test.
    56  		w.setDrainInputBufferLocked(drainWatcher)
    57  		w.stopLocked()
    58  	}
    59  	initEvents := []*watchCacheEvent{
    60  		{Object: &v1.Pod{}},
    61  		{Object: &v1.Pod{}},
    62  	}
    63  	// set the size of the buffer of w.result to 0, so that the writes to
    64  	// w.result is blocked.
    65  	w = newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, time.Now(), false, schema.GroupResource{Resource: "pods"}, "")
    66  	go w.processInterval(context.Background(), intervalFromEvents(initEvents), 0)
    67  	w.Stop()
    68  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
    69  		lock.RLock()
    70  		defer lock.RUnlock()
    71  		return count == 2, nil
    72  	}); err != nil {
    73  		t.Fatalf("expected forget() to be called twice, because sendWatchCacheEvent should not be blocked by the result channel: %v", err)
    74  	}
    75  }
    76  
    77  func TestCacheWatcherHandlesFiltering(t *testing.T) {
    78  	filter := func(_ string, _ labels.Set, field fields.Set) bool {
    79  		return field["spec.nodeName"] == "host"
    80  	}
    81  	forget := func(bool) {}
    82  
    83  	testCases := []struct {
    84  		events   []*watchCacheEvent
    85  		expected []watch.Event
    86  	}{
    87  		// properly handle starting with the filter, then being deleted, then re-added
    88  		{
    89  			events: []*watchCacheEvent{
    90  				{
    91  					Type:            watch.Added,
    92  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}},
    93  					ObjFields:       fields.Set{"spec.nodeName": "host"},
    94  					ResourceVersion: 1,
    95  				},
    96  				{
    97  					Type:            watch.Modified,
    98  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}},
    99  					PrevObjFields:   fields.Set{"spec.nodeName": "host"},
   100  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}},
   101  					ObjFields:       fields.Set{"spec.nodeName": ""},
   102  					ResourceVersion: 2,
   103  				},
   104  				{
   105  					Type:            watch.Modified,
   106  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}},
   107  					PrevObjFields:   fields.Set{"spec.nodeName": ""},
   108  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}},
   109  					ObjFields:       fields.Set{"spec.nodeName": "host"},
   110  					ResourceVersion: 3,
   111  				},
   112  			},
   113  			expected: []watch.Event{
   114  				{Type: watch.Added, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}}},
   115  				{Type: watch.Deleted, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}}},
   116  				{Type: watch.Added, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}}},
   117  			},
   118  		},
   119  		// properly handle ignoring changes prior to the filter, then getting added, then deleted
   120  		{
   121  			events: []*watchCacheEvent{
   122  				{
   123  					Type:            watch.Added,
   124  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}},
   125  					ObjFields:       fields.Set{"spec.nodeName": ""},
   126  					ResourceVersion: 1,
   127  				},
   128  				{
   129  					Type:            watch.Modified,
   130  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "1"}},
   131  					PrevObjFields:   fields.Set{"spec.nodeName": ""},
   132  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}},
   133  					ObjFields:       fields.Set{"spec.nodeName": ""},
   134  					ResourceVersion: 2,
   135  				},
   136  				{
   137  					Type:            watch.Modified,
   138  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "2"}},
   139  					PrevObjFields:   fields.Set{"spec.nodeName": ""},
   140  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}},
   141  					ObjFields:       fields.Set{"spec.nodeName": "host"},
   142  					ResourceVersion: 3,
   143  				},
   144  				{
   145  					Type:            watch.Modified,
   146  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}},
   147  					PrevObjFields:   fields.Set{"spec.nodeName": "host"},
   148  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "4"}},
   149  					ObjFields:       fields.Set{"spec.nodeName": "host"},
   150  					ResourceVersion: 4,
   151  				},
   152  				{
   153  					Type:            watch.Modified,
   154  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "4"}},
   155  					PrevObjFields:   fields.Set{"spec.nodeName": "host"},
   156  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "5"}},
   157  					ObjFields:       fields.Set{"spec.nodeName": ""},
   158  					ResourceVersion: 5,
   159  				},
   160  				{
   161  					Type:            watch.Modified,
   162  					PrevObject:      &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "5"}},
   163  					PrevObjFields:   fields.Set{"spec.nodeName": ""},
   164  					Object:          &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "6"}},
   165  					ObjFields:       fields.Set{"spec.nodeName": ""},
   166  					ResourceVersion: 6,
   167  				},
   168  			},
   169  			expected: []watch.Event{
   170  				{Type: watch.Added, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "3"}}},
   171  				{Type: watch.Modified, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "4"}}},
   172  				{Type: watch.Deleted, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "5"}}},
   173  			},
   174  		},
   175  	}
   176  
   177  TestCase:
   178  	for i, testCase := range testCases {
   179  		// set the size of the buffer of w.result to 0, so that the writes to
   180  		// w.result is blocked.
   181  		for j := range testCase.events {
   182  			testCase.events[j].ResourceVersion = uint64(j) + 1
   183  		}
   184  
   185  		w := newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, time.Now(), false, schema.GroupResource{Resource: "pods"}, "")
   186  		go w.processInterval(context.Background(), intervalFromEvents(testCase.events), 0)
   187  
   188  		ch := w.ResultChan()
   189  		for j, event := range testCase.expected {
   190  			e := <-ch
   191  			if !reflect.DeepEqual(event, e) {
   192  				t.Errorf("%d: unexpected event %d: %s", i, j, cmp.Diff(event, e))
   193  				break TestCase
   194  			}
   195  		}
   196  		select {
   197  		case obj, ok := <-ch:
   198  			t.Errorf("%d: unexpected excess event: %#v %t", i, obj, ok)
   199  			break TestCase
   200  		default:
   201  		}
   202  		w.setDrainInputBufferLocked(false)
   203  		w.stopLocked()
   204  	}
   205  }
   206  
   207  func TestCacheWatcherStoppedInAnotherGoroutine(t *testing.T) {
   208  	var w *cacheWatcher
   209  	done := make(chan struct{})
   210  	filter := func(string, labels.Set, fields.Set) bool { return true }
   211  	forget := func(drainWatcher bool) {
   212  		w.setDrainInputBufferLocked(drainWatcher)
   213  		w.stopLocked()
   214  		done <- struct{}{}
   215  	}
   216  
   217  	maxRetriesToProduceTheRaceCondition := 1000
   218  	// Simulating the timer is fired and stopped concurrently by set time
   219  	// timeout to zero and run the Stop goroutine concurrently.
   220  	// May sure that the watch will not be blocked on Stop.
   221  	for i := 0; i < maxRetriesToProduceTheRaceCondition; i++ {
   222  		w = newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, time.Now(), false, schema.GroupResource{Resource: "pods"}, "")
   223  		go w.Stop()
   224  		select {
   225  		case <-done:
   226  		case <-time.After(time.Second):
   227  			t.Fatal("stop is blocked when the timer is fired concurrently")
   228  		}
   229  	}
   230  
   231  	deadline := time.Now().Add(time.Hour)
   232  	// After that, verifies the cacheWatcher.process goroutine works correctly.
   233  	for i := 0; i < maxRetriesToProduceTheRaceCondition; i++ {
   234  		w = newCacheWatcher(2, filter, emptyFunc, storage.APIObjectVersioner{}, deadline, false, schema.GroupResource{Resource: "pods"}, "")
   235  		w.input <- &watchCacheEvent{Object: &v1.Pod{}, ResourceVersion: uint64(i + 1)}
   236  		ctx, cancel := context.WithDeadline(context.Background(), deadline)
   237  		defer cancel()
   238  		go w.processInterval(ctx, intervalFromEvents(nil), 0)
   239  		select {
   240  		case <-w.ResultChan():
   241  		case <-time.After(time.Second):
   242  			t.Fatal("expected received a event on ResultChan")
   243  		}
   244  		w.setDrainInputBufferLocked(false)
   245  		w.stopLocked()
   246  	}
   247  }
   248  
   249  func TestCacheWatcherStoppedOnDestroy(t *testing.T) {
   250  	backingStorage := &dummyStorage{}
   251  	cacher, _, err := newTestCacher(backingStorage)
   252  	if err != nil {
   253  		t.Fatalf("Couldn't create cacher: %v", err)
   254  	}
   255  	defer cacher.Stop()
   256  
   257  	// Wait until cacher is initialized.
   258  	if err := cacher.ready.wait(context.Background()); err != nil {
   259  		t.Fatalf("unexpected error waiting for the cache to be ready")
   260  	}
   261  
   262  	w, err := cacher.Watch(context.Background(), "pods/ns", storage.ListOptions{ResourceVersion: "0", Predicate: storage.Everything})
   263  	if err != nil {
   264  		t.Fatalf("Failed to create watch: %v", err)
   265  	}
   266  
   267  	watchClosed := make(chan struct{})
   268  	go func() {
   269  		defer close(watchClosed)
   270  		for event := range w.ResultChan() {
   271  			switch event.Type {
   272  			case watch.Added, watch.Modified, watch.Deleted:
   273  				// ok
   274  			default:
   275  				t.Errorf("unexpected event %#v", event)
   276  			}
   277  		}
   278  	}()
   279  
   280  	cacher.Stop()
   281  
   282  	select {
   283  	case <-watchClosed:
   284  	case <-time.After(wait.ForeverTestTimeout):
   285  		t.Errorf("timed out waiting for watch to close")
   286  	}
   287  
   288  }
   289  
   290  func TestResourceVersionAfterInitEvents(t *testing.T) {
   291  	getAttrsFunc := func(obj runtime.Object) (labels.Set, fields.Set, error) {
   292  		return nil, nil, nil
   293  	}
   294  
   295  	const numObjects = 10
   296  	store := cache.NewIndexer(storeElementKey, storeElementIndexers(nil))
   297  
   298  	for i := 0; i < numObjects; i++ {
   299  		elem := makeTestStoreElement(makeTestPod(fmt.Sprintf("pod-%d", i), uint64(i)))
   300  		store.Add(elem)
   301  	}
   302  
   303  	wci, err := newCacheIntervalFromStore(numObjects, store, getAttrsFunc)
   304  	if err != nil {
   305  		t.Fatal(err)
   306  	}
   307  
   308  	filter := func(_ string, _ labels.Set, _ fields.Set) bool { return true }
   309  	forget := func(_ bool) {}
   310  	deadline := time.Now().Add(time.Minute)
   311  	w := newCacheWatcher(numObjects+1, filter, forget, storage.APIObjectVersioner{}, deadline, true, schema.GroupResource{Resource: "pods"}, "")
   312  
   313  	// Simulate a situation when the last event will that was already in
   314  	// the state, wasn't yet processed by cacher and will be delivered
   315  	// via channel again.
   316  	event := &watchCacheEvent{
   317  		Type:            watch.Added,
   318  		Object:          makeTestPod(fmt.Sprintf("pod-%d", numObjects-1), uint64(numObjects-1)),
   319  		ResourceVersion: uint64(numObjects - 1),
   320  	}
   321  	if !w.add(event, time.NewTimer(time.Second)) {
   322  		t.Fatalf("failed to add event")
   323  	}
   324  	w.stopLocked()
   325  
   326  	wg := sync.WaitGroup{}
   327  	wg.Add(1)
   328  	go func() {
   329  		defer wg.Done()
   330  		w.processInterval(context.Background(), wci, uint64(numObjects-1))
   331  	}()
   332  
   333  	// We expect all init events to be delivered.
   334  	for i := 0; i < numObjects; i++ {
   335  		<-w.ResultChan()
   336  	}
   337  	// We don't expect any other event to be delivered and thus
   338  	// the ResultChan to be closed.
   339  	result, ok := <-w.ResultChan()
   340  	if ok {
   341  		t.Errorf("unexpected event: %#v", result)
   342  	}
   343  
   344  	wg.Wait()
   345  }
   346  
   347  func TestTimeBucketWatchersBasic(t *testing.T) {
   348  	filter := func(_ string, _ labels.Set, _ fields.Set) bool {
   349  		return true
   350  	}
   351  	forget := func(bool) {}
   352  
   353  	newWatcher := func(deadline time.Time) *cacheWatcher {
   354  		w := newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, deadline, true, schema.GroupResource{Resource: "pods"}, "")
   355  		w.setBookmarkAfterResourceVersion(0)
   356  		return w
   357  	}
   358  
   359  	clock := testingclock.NewFakeClock(time.Now())
   360  	watchers := newTimeBucketWatchers(clock, defaultBookmarkFrequency)
   361  	now := clock.Now()
   362  	watchers.addWatcherThreadUnsafe(newWatcher(now.Add(10 * time.Second)))
   363  	watchers.addWatcherThreadUnsafe(newWatcher(now.Add(20 * time.Second)))
   364  	watchers.addWatcherThreadUnsafe(newWatcher(now.Add(20 * time.Second)))
   365  
   366  	if len(watchers.watchersBuckets) != 2 {
   367  		t.Errorf("unexpected bucket size: %#v", watchers.watchersBuckets)
   368  	}
   369  	watchers0 := watchers.popExpiredWatchersThreadUnsafe()
   370  	if len(watchers0) != 0 {
   371  		t.Errorf("unexpected bucket size: %#v", watchers0)
   372  	}
   373  
   374  	clock.Step(10 * time.Second)
   375  	watchers1 := watchers.popExpiredWatchersThreadUnsafe()
   376  	if len(watchers1) != 1 || len(watchers1[0]) != 1 {
   377  		t.Errorf("unexpected bucket size: %v", watchers1)
   378  	}
   379  	watchers1 = watchers.popExpiredWatchersThreadUnsafe()
   380  	if len(watchers1) != 0 {
   381  		t.Errorf("unexpected bucket size: %#v", watchers1)
   382  	}
   383  
   384  	clock.Step(12 * time.Second)
   385  	watchers2 := watchers.popExpiredWatchersThreadUnsafe()
   386  	if len(watchers2) != 1 || len(watchers2[0]) != 2 {
   387  		t.Errorf("unexpected bucket size: %#v", watchers2)
   388  	}
   389  }
   390  
   391  func makeWatchCacheEvent(rv uint64) *watchCacheEvent {
   392  	return &watchCacheEvent{
   393  		Type: watch.Added,
   394  		Object: &v1.Pod{
   395  			ObjectMeta: metav1.ObjectMeta{
   396  				Name:            fmt.Sprintf("pod-%d", rv),
   397  				ResourceVersion: fmt.Sprintf("%d", rv),
   398  			},
   399  		},
   400  		ResourceVersion: rv,
   401  	}
   402  }
   403  
   404  // TestCacheWatcherDraining verifies the cacheWatcher.process goroutine is properly cleaned up when draining was requested
   405  func TestCacheWatcherDraining(t *testing.T) {
   406  	var lock sync.RWMutex
   407  	var w *cacheWatcher
   408  	count := 0
   409  	filter := func(string, labels.Set, fields.Set) bool { return true }
   410  	forget := func(drainWatcher bool) {
   411  		lock.Lock()
   412  		defer lock.Unlock()
   413  		count++
   414  		w.setDrainInputBufferLocked(drainWatcher)
   415  		w.stopLocked()
   416  	}
   417  	initEvents := []*watchCacheEvent{
   418  		makeWatchCacheEvent(5),
   419  		makeWatchCacheEvent(6),
   420  	}
   421  	w = newCacheWatcher(1, filter, forget, storage.APIObjectVersioner{}, time.Now(), true, schema.GroupResource{Resource: "pods"}, "")
   422  	go w.processInterval(context.Background(), intervalFromEvents(initEvents), 1)
   423  	if !w.add(makeWatchCacheEvent(7), time.NewTimer(1*time.Second)) {
   424  		t.Fatal("failed adding an even to the watcher")
   425  	}
   426  	forget(true) // drain the watcher
   427  
   428  	eventCount := 0
   429  	for range w.ResultChan() {
   430  		eventCount++
   431  	}
   432  	if eventCount != 3 {
   433  		t.Errorf("Unexpected number of objects received: %d, expected: 3", eventCount)
   434  	}
   435  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
   436  		lock.RLock()
   437  		defer lock.RUnlock()
   438  		return count == 2, nil
   439  	}); err != nil {
   440  		t.Fatalf("expected forget() to be called twice, because processInterval should call Stop(): %v", err)
   441  	}
   442  }
   443  
   444  // TestCacheWatcherDrainingRequestedButNotDrained verifies the cacheWatcher.process goroutine is properly cleaned up when draining was requested
   445  // but the client never actually get any data
   446  func TestCacheWatcherDrainingRequestedButNotDrained(t *testing.T) {
   447  	var lock sync.RWMutex
   448  	var w *cacheWatcher
   449  	count := 0
   450  	filter := func(string, labels.Set, fields.Set) bool { return true }
   451  	forget := func(drainWatcher bool) {
   452  		lock.Lock()
   453  		defer lock.Unlock()
   454  		count++
   455  		w.setDrainInputBufferLocked(drainWatcher)
   456  		w.stopLocked()
   457  	}
   458  	initEvents := []*watchCacheEvent{
   459  		makeWatchCacheEvent(5),
   460  		makeWatchCacheEvent(6),
   461  	}
   462  	w = newCacheWatcher(1, filter, forget, storage.APIObjectVersioner{}, time.Now(), true, schema.GroupResource{Resource: "pods"}, "")
   463  	go w.processInterval(context.Background(), intervalFromEvents(initEvents), 1)
   464  	if !w.add(makeWatchCacheEvent(7), time.NewTimer(1*time.Second)) {
   465  		t.Fatal("failed adding an even to the watcher")
   466  	}
   467  	forget(true) // drain the watcher
   468  	w.Stop()     // client disconnected, timeout expired or ctx was actually closed
   469  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
   470  		lock.RLock()
   471  		defer lock.RUnlock()
   472  		return count == 3, nil
   473  	}); err != nil {
   474  		t.Fatalf("expected forget() to be called three times, because processInterval should call Stop(): %v", err)
   475  	}
   476  }
   477  
   478  // TestCacheWatcherDrainingNoBookmarkAfterResourceVersionReceived verifies if the watcher will be stopped
   479  // when adding an item times out and the bookmarkAfterResourceVersion hasn't been received
   480  func TestCacheWatcherDrainingNoBookmarkAfterResourceVersionReceived(t *testing.T) {
   481  	var lock sync.RWMutex
   482  	var w *cacheWatcher
   483  	count := 0
   484  	filter := func(string, labels.Set, fields.Set) bool { return true }
   485  	forget := func(drainWatcher bool) {
   486  		lock.Lock()
   487  		defer lock.Unlock()
   488  		if drainWatcher {
   489  			t.Fatalf("didn't expect drainWatcher to be set to true")
   490  		}
   491  		count++
   492  		w.setDrainInputBufferLocked(drainWatcher)
   493  		w.stopLocked()
   494  	}
   495  	initEvents := []*watchCacheEvent{
   496  		{Object: &v1.Pod{}},
   497  		{Object: &v1.Pod{}},
   498  	}
   499  	w = newCacheWatcher(0, filter, forget, storage.APIObjectVersioner{}, time.Now(), true, schema.GroupResource{Resource: "pods"}, "")
   500  	w.setBookmarkAfterResourceVersion(10)
   501  	go w.processInterval(context.Background(), intervalFromEvents(initEvents), 0)
   502  	if w.add(&watchCacheEvent{Object: &v1.Pod{}}, time.NewTimer(1*time.Second)) {
   503  		t.Fatal("expected the add method to fail")
   504  	}
   505  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
   506  		lock.RLock()
   507  		defer lock.RUnlock()
   508  		return count == 2, nil
   509  	}); err != nil {
   510  		t.Fatalf("expected forget() to be called twice, first call from w.add() and then from w.Stop() called from w.processInterval(): %v", err)
   511  	}
   512  
   513  	if !w.stopped {
   514  		t.Fatal("expected the watcher to be stopped but it wasn't")
   515  	}
   516  }
   517  
   518  // TestCacheWatcherDrainingNoBookmarkAfterResourceVersionSent checks if the watcher's input
   519  // channel is drained if the bookmarkAfterResourceVersion was received but not sent
   520  func TestCacheWatcherDrainingNoBookmarkAfterResourceVersionSent(t *testing.T) {
   521  	makePod := func(rv uint64) *v1.Pod {
   522  		return &v1.Pod{
   523  			ObjectMeta: metav1.ObjectMeta{
   524  				Name:            fmt.Sprintf("pod-%d", rv),
   525  				Namespace:       "ns",
   526  				ResourceVersion: fmt.Sprintf("%d", rv),
   527  				Annotations:     map[string]string{},
   528  			},
   529  		}
   530  	}
   531  	var lock sync.RWMutex
   532  	var w *cacheWatcher
   533  	watchInitializationSignal := utilflowcontrol.NewInitializationSignal()
   534  	ctx := utilflowcontrol.WithInitializationSignal(context.Background(), watchInitializationSignal)
   535  	count := 0
   536  	filter := func(string, labels.Set, fields.Set) bool { return true }
   537  	forget := func(drainWatcher bool) {
   538  		lock.Lock()
   539  		defer lock.Unlock()
   540  		count++
   541  		w.setDrainInputBufferLocked(drainWatcher)
   542  		w.stopLocked()
   543  	}
   544  	initEvents := []*watchCacheEvent{{Object: makePod(1)}, {Object: makePod(2)}}
   545  	w = newCacheWatcher(2, filter, forget, storage.APIObjectVersioner{}, time.Now(), true, schema.GroupResource{Resource: "pods"}, "")
   546  	w.setBookmarkAfterResourceVersion(10)
   547  	go w.processInterval(ctx, intervalFromEvents(initEvents), 0)
   548  	watchInitializationSignal.Wait()
   549  
   550  	// note that we can add three events even though the chanSize is two because
   551  	// one event has been popped off from the input chan
   552  	if !w.add(&watchCacheEvent{Object: makePod(5), ResourceVersion: 5}, time.NewTimer(1*time.Second)) {
   553  		t.Fatal("failed adding an even to the watcher")
   554  	}
   555  	if !w.nonblockingAdd(&watchCacheEvent{Type: watch.Bookmark, ResourceVersion: 10, Object: &v1.Pod{ObjectMeta: metav1.ObjectMeta{ResourceVersion: "10"}}}) {
   556  		t.Fatal("failed adding an even to the watcher")
   557  	}
   558  	if !w.add(&watchCacheEvent{Object: makePod(15), ResourceVersion: 15}, time.NewTimer(1*time.Second)) {
   559  		t.Fatal("failed adding an even to the watcher")
   560  	}
   561  	if w.add(&watchCacheEvent{Object: makePod(20), ResourceVersion: 20}, time.NewTimer(1*time.Second)) {
   562  		t.Fatal("expected the add method to fail")
   563  	}
   564  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
   565  		lock.RLock()
   566  		defer lock.RUnlock()
   567  		return count == 1, nil
   568  	}); err != nil {
   569  		t.Fatalf("expected forget() to be called once, just from the w.add() method: %v", err)
   570  	}
   571  
   572  	if !w.stopped {
   573  		t.Fatal("expected the watcher to be stopped but it wasn't")
   574  	}
   575  	verifyEvents(t, w, []watch.Event{
   576  		{Type: watch.Added, Object: makePod(1)},
   577  		{Type: watch.Added, Object: makePod(2)},
   578  		{Type: watch.Added, Object: makePod(5)},
   579  		{Type: watch.Bookmark, Object: &v1.Pod{
   580  			ObjectMeta: metav1.ObjectMeta{
   581  				ResourceVersion: "10",
   582  				Annotations:     map[string]string{"k8s.io/initial-events-end": "true"},
   583  			},
   584  		}},
   585  		{Type: watch.Added, Object: makePod(15)},
   586  	}, true)
   587  
   588  	if err := wait.PollImmediate(1*time.Second, 5*time.Second, func() (bool, error) {
   589  		lock.RLock()
   590  		defer lock.RUnlock()
   591  		return count == 2, nil
   592  	}); err != nil {
   593  		t.Fatalf("expected forget() to be called twice, the second call is from w.Stop() method called from  w.processInterval(): %v", err)
   594  	}
   595  }
   596  
   597  func TestBookmarkAfterResourceVersionWatchers(t *testing.T) {
   598  	newWatcher := func(id string, deadline time.Time) *cacheWatcher {
   599  		w := newCacheWatcher(0, func(_ string, _ labels.Set, _ fields.Set) bool { return true }, func(bool) {}, storage.APIObjectVersioner{}, deadline, true, schema.GroupResource{Resource: "pods"}, id)
   600  		w.setBookmarkAfterResourceVersion(10)
   601  		return w
   602  	}
   603  
   604  	clock := testingclock.NewFakeClock(time.Now())
   605  	target := newTimeBucketWatchers(clock, defaultBookmarkFrequency)
   606  	if !target.addWatcherThreadUnsafe(newWatcher("1", clock.Now().Add(2*time.Minute))) {
   607  		t.Fatal("failed adding an even to the watcher")
   608  	}
   609  
   610  	// the watcher is immediately expired (it's waiting for bookmark, so it is scheduled immediately)
   611  	ret := target.popExpiredWatchersThreadUnsafe()
   612  	if len(ret) != 1 || len(ret[0]) != 1 {
   613  		t.Fatalf("expected only one watcher to be expired")
   614  	}
   615  	if !target.addWatcherThreadUnsafe(ret[0][0]) {
   616  		t.Fatal("failed adding an even to the watcher")
   617  	}
   618  
   619  	// after one second time the watcher is still expired
   620  	clock.Step(1 * time.Second)
   621  	ret = target.popExpiredWatchersThreadUnsafe()
   622  	if len(ret) != 1 || len(ret[0]) != 1 {
   623  		t.Fatalf("expected only one watcher to be expired")
   624  	}
   625  	if !target.addWatcherThreadUnsafe(ret[0][0]) {
   626  		t.Fatal("failed adding an even to the watcher")
   627  	}
   628  
   629  	// after 29 seconds the watcher is still expired
   630  	clock.Step(29 * time.Second)
   631  	ret = target.popExpiredWatchersThreadUnsafe()
   632  	if len(ret) != 1 || len(ret[0]) != 1 {
   633  		t.Fatalf("expected only one watcher to be expired")
   634  	}
   635  
   636  	// after confirming the watcher is not expired immediately
   637  	ret[0][0].markBookmarkAfterRvAsReceived(&watchCacheEvent{Type: watch.Bookmark, ResourceVersion: 10, Object: &v1.Pod{}})
   638  	if !target.addWatcherThreadUnsafe(ret[0][0]) {
   639  		t.Fatal("failed adding an even to the watcher")
   640  	}
   641  	clock.Step(30 * time.Second)
   642  	ret = target.popExpiredWatchersThreadUnsafe()
   643  	if len(ret) != 0 {
   644  		t.Fatalf("didn't expect any watchers to be expired")
   645  	}
   646  
   647  	clock.Step(30 * time.Second)
   648  	ret = target.popExpiredWatchersThreadUnsafe()
   649  	if len(ret) != 1 || len(ret[0]) != 1 {
   650  		t.Fatalf("expected only one watcher to be expired")
   651  	}
   652  }