sigs.k8s.io/prow@v0.0.0-20240503223140-c5e374dc7eb1/pkg/cache/cache_test.go (about)

     1  /*
     2  Copyright 2021 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package cache
    18  
    19  import (
    20  	"fmt"
    21  	"sync"
    22  	"testing"
    23  )
    24  
    25  // TestGetOrAddSimple is a basic check that the underlying LRU cache
    26  // implementation that powers the LRUCache itself is behaving in an expected
    27  // way. We test things like cache eviction and also what to do when value
    28  // construction fails.
    29  func TestGetOrAddSimple(t *testing.T) {
    30  	valConstructorCalls := 0
    31  	goodValConstructor := func(val string) func() (interface{}, error) {
    32  		return func() (interface{}, error) {
    33  			valConstructorCalls++
    34  			return "(val)" + val, nil
    35  		}
    36  	}
    37  	badValConstructor := func(key string) func() (interface{}, error) {
    38  		return func() (interface{}, error) {
    39  			valConstructorCalls++
    40  			return "", fmt.Errorf("could not construct val")
    41  		}
    42  	}
    43  
    44  	goodValConstructorForInitialState := func(val string) func() (interface{}, error) {
    45  		return func() (interface{}, error) {
    46  			return val, nil
    47  		}
    48  	}
    49  
    50  	simpleCache, err := NewLRUCache(2, Callbacks{})
    51  	if err != nil {
    52  		t.Error("could not initialize simpleCache")
    53  	}
    54  
    55  	type expected struct {
    56  		val                 string
    57  		err                 string
    58  		valConstructorCalls int
    59  		cachedValues        int
    60  	}
    61  
    62  	for _, tc := range []struct {
    63  		name              string
    64  		cache             *LRUCache
    65  		cacheInitialState map[string]string
    66  		key               string
    67  		valConstructor    ValConstructor
    68  		expected          expected
    69  	}{
    70  		{
    71  			name:              "EmptyCache",
    72  			cache:             simpleCache,
    73  			cacheInitialState: nil,
    74  			key:               "foo",
    75  			valConstructor:    goodValConstructor("foo"),
    76  			expected: expected{
    77  				val:                 "(val)foo",
    78  				err:                 "",
    79  				valConstructorCalls: 1,
    80  				cachedValues:        1,
    81  			},
    82  		},
    83  		{
    84  			name:  "CacheMissWithoutValueEviction",
    85  			cache: simpleCache,
    86  			cacheInitialState: map[string]string{
    87  				"(key)foo": "(val)foo",
    88  			},
    89  			key:            "bar",
    90  			valConstructor: goodValConstructor("bar"),
    91  			expected: expected{
    92  				val:                 "(val)bar",
    93  				err:                 "",
    94  				valConstructorCalls: 1,
    95  				cachedValues:        2,
    96  			},
    97  		},
    98  		{
    99  			name:  "CacheMissWithValueEviction",
   100  			cache: simpleCache,
   101  			cacheInitialState: map[string]string{
   102  				"(key)foo": "(val)foo",
   103  				"(key)bar": "(val)bar",
   104  			},
   105  			key:            "cat",
   106  			valConstructor: goodValConstructor("cat"),
   107  			expected: expected{
   108  				val:                 "(val)cat",
   109  				err:                 "",
   110  				valConstructorCalls: 1,
   111  				// There are still only 2 values in the cache, even though we
   112  				// tried to add a 3rd item ("cat").
   113  				cachedValues: 2,
   114  			},
   115  		},
   116  		{
   117  			name:  "CacheHit",
   118  			cache: simpleCache,
   119  			cacheInitialState: map[string]string{
   120  				"(key)foo": "(val)foo",
   121  				"(key)bar": "(val)bar",
   122  			},
   123  			key:            "(key)bar",
   124  			valConstructor: goodValConstructor("bar"),
   125  			expected: expected{
   126  				val: "(val)bar",
   127  				err: "",
   128  				// If the constructed value is already in the cache, we do not
   129  				// need to construct it from scratch.
   130  				valConstructorCalls: 0,
   131  				cachedValues:        2,
   132  			},
   133  		},
   134  		{
   135  			// Constructing the value resulted in an error. We evict this entry
   136  			// from the cache.
   137  			name:              "BadValConstructor",
   138  			cache:             simpleCache,
   139  			cacheInitialState: nil,
   140  			key:               "bar",
   141  			valConstructor:    badValConstructor("bar"),
   142  			expected: expected{
   143  				val:                 "",
   144  				err:                 "could not construct val",
   145  				valConstructorCalls: 1,
   146  				cachedValues:        0,
   147  			},
   148  		},
   149  	} {
   150  		t.Run(tc.name, func(t *testing.T) {
   151  			// Reset test state.
   152  			valConstructorCalls = 0
   153  			simpleCache.Purge()
   154  
   155  			for k, v := range tc.cacheInitialState {
   156  				if tc.cache != nil {
   157  					_, _, _ = tc.cache.GetOrAdd(k, goodValConstructorForInitialState(v))
   158  				}
   159  			}
   160  
   161  			val, _, err := tc.cache.GetOrAdd(tc.key, tc.valConstructor)
   162  
   163  			if tc.expected.err == "" {
   164  				if err != nil {
   165  					t.Errorf("Expected error 'nil' got '%v'", err.Error())
   166  				}
   167  			} else {
   168  				if err == nil {
   169  					t.Fatal("Expected non-nil error, got nil")
   170  				}
   171  
   172  				if tc.expected.err != err.Error() {
   173  					t.Errorf("Expected error '%v', got '%v'", tc.expected.err, err.Error())
   174  				}
   175  			}
   176  
   177  			if tc.expected.val == "<nil>" {
   178  				if val != nil {
   179  					t.Errorf("Expected val to be nil, got '%v'", val)
   180  				}
   181  			} else {
   182  				if tc.expected.val != val {
   183  					t.Errorf("Expected val '%v', got '%v'", tc.expected.val, val)
   184  				}
   185  			}
   186  
   187  			if tc.expected.valConstructorCalls != valConstructorCalls {
   188  				t.Errorf("Expected '%d' calls to valConstructor(), got '%d'", tc.expected.valConstructorCalls, valConstructorCalls)
   189  			}
   190  
   191  			if tc.cache != nil && tc.expected.cachedValues != tc.cache.Len() {
   192  				t.Errorf("Expected cachedValues to be '%d', got '%d'", tc.expected.cachedValues, tc.cache.Len())
   193  			}
   194  		})
   195  	}
   196  }
   197  
   198  // TestGetOrAddBurst tests getting 1000 sudden requests for the same cache key
   199  // at the same time. Because our cache can handle this situation (called "cache
   200  // stampede" or by its mitigation strategy known as "duplicate suppression"), we
   201  // expect to only have created a __single__ cached entry, with the remaining 999
   202  // "get" calls against the cache to reuse the cached entry. The HTTP analogue of
   203  // duplicate suppression is known as request coalescing, which uses the same
   204  // principle. For more discussion about duplicate suppression, see Alan Donovan
   205  // and Brian Kernighan, "The Go Programming Language" (Addison-Wesley, 2016), p.
   206  // 277.
   207  func TestGetOrAddBurst(t *testing.T) {
   208  	// testLock is used for guarding valConstructorCalls for purposes of
   209  	// testing.
   210  	testLock := sync.Mutex{}
   211  
   212  	valConstructorCalls := 0
   213  	// goodValConstructor simulates an "expensive" call by calculating the
   214  	// Collatz Conjecture for a small input. The point is that the value
   215  	// generated here will never be able to be optimized away by the compiler
   216  	// (because its value cannot be precomputed by the compiler), guaranteeing
   217  	// that some CPU cycles will be spent between the time we unlock the
   218  	// testLock and the time we retrieve the computed value (all within the same
   219  	// thread).
   220  	goodValConstructor := func(input int) func() (interface{}, error) {
   221  		return func() (interface{}, error) {
   222  			testLock.Lock()
   223  			valConstructorCalls++
   224  			testLock.Unlock()
   225  			steps := 0
   226  			n := input
   227  			max := input
   228  			for n > 1 {
   229  				if n > max {
   230  					max = n
   231  				}
   232  				if n&1 == 0 {
   233  					n >>= 1
   234  				} else {
   235  					n *= 3
   236  					n++
   237  				}
   238  				steps++
   239  			}
   240  			return fmt.Sprintf("(val)input=%d,steps=%d,max=%d", input, steps, max), nil
   241  		}
   242  	}
   243  
   244  	lruCache, err := NewLRUCache(1000, Callbacks{})
   245  	if err != nil {
   246  		t.Error("could not initialize lruCache")
   247  	}
   248  
   249  	valConstructorCalls = 0
   250  	const maxConcurrentRequests = 500
   251  	wg := sync.WaitGroup{}
   252  
   253  	// Consider the case where all threads perform the same cache lookup.
   254  	expectedVal := "(val)input=3,steps=7,max=16"
   255  	wg.Add(maxConcurrentRequests)
   256  	for i := 0; i < maxConcurrentRequests; i++ {
   257  		go func() {
   258  			// Input of 3 for goodValConstructor will take 7 steps and reach a
   259  			// maximum value of 16. We check this below.
   260  			constructedVal, _, err := lruCache.GetOrAdd(3, goodValConstructor(3))
   261  			if err != nil {
   262  				t.Error("could not fetch or construct value")
   263  			}
   264  			if constructedVal != expectedVal {
   265  				t.Errorf("expected constructed value '%v', got '%v'", expectedVal, constructedVal)
   266  			}
   267  			wg.Done()
   268  		}()
   269  	}
   270  	wg.Wait()
   271  
   272  	// Expect that we only invoked the goodValConstructor once. Notice how the
   273  	// user of lruCache does not need to worry about locking. The cache is smart
   274  	// enough to perform duplicate suppression on its own, so that the value is
   275  	// constructed and written into the cache only once, no matter how many
   276  	// concurrent threads attempt to access it.
   277  	if valConstructorCalls != 1 {
   278  		t.Errorf("Expected valConstructorCalls '1', got '%v'", valConstructorCalls)
   279  	}
   280  	if lruCache.Len() != 1 {
   281  		t.Errorf("Expected single cached element, got '%v'", lruCache.Len())
   282  	}
   283  
   284  	valConstructorCalls = 0
   285  	lruCache.Purge()
   286  
   287  	// Consider the case where all threads perform one of 5 different cache lookups.
   288  	wg.Add(maxConcurrentRequests)
   289  	for i := 0; i < maxConcurrentRequests; i++ {
   290  		j := (i % 5) + 1
   291  		expectedVal := ""
   292  		go func() {
   293  			constructedVal, _, err := lruCache.GetOrAdd(j, goodValConstructor(j))
   294  			if err != nil {
   295  				t.Error("could not fetch or construct value")
   296  			}
   297  			switch j {
   298  			case 1:
   299  				expectedVal = "(val)input=1,steps=0,max=1"
   300  			case 2:
   301  				expectedVal = "(val)input=2,steps=1,max=2"
   302  			case 3:
   303  				expectedVal = "(val)input=3,steps=7,max=16"
   304  			case 4:
   305  				expectedVal = "(val)input=4,steps=2,max=4"
   306  			default:
   307  				expectedVal = "(val)input=5,steps=5,max=16"
   308  			}
   309  			if constructedVal != expectedVal {
   310  				t.Errorf("expected constructed value '%v', got '%v'", expectedVal, constructedVal)
   311  			}
   312  			wg.Done()
   313  		}()
   314  	}
   315  	wg.Wait()
   316  
   317  	// Only expect 5 valConstructor calls, because there are only 5 unique key lookups.
   318  	if valConstructorCalls != 5 {
   319  		t.Errorf("Expected valConstructorCalls '5', got '%v'", valConstructorCalls)
   320  	}
   321  	if lruCache.Len() != 5 {
   322  		t.Errorf("Expected 5 cached entries, got '%v'", lruCache.Len())
   323  	}
   324  }
   325  
   326  func TestCallbacks(t *testing.T) {
   327  	goodValConstructor := func(val string) func() (interface{}, error) {
   328  		return func() (interface{}, error) {
   329  			return val, nil
   330  		}
   331  	}
   332  	badValConstructor := func(val string) func() (interface{}, error) {
   333  		return func() (interface{}, error) {
   334  			return "", fmt.Errorf("could not construct val")
   335  		}
   336  	}
   337  
   338  	lookupsCounter := 0
   339  	hitsCounter := 0
   340  	missesCounter := 0
   341  	forcedEvictionsCounter := 0
   342  	manualEvictionsCounter := 0
   343  
   344  	counterLock := &sync.Mutex{}
   345  	mkCallback := func(counter *int) EventCallback {
   346  		callback := func(key interface{}) {
   347  			counterLock.Lock()
   348  			(*counter)++
   349  			counterLock.Unlock()
   350  		}
   351  		return callback
   352  	}
   353  
   354  	lookupsCallback := mkCallback(&lookupsCounter)
   355  	hitsCallback := mkCallback(&hitsCounter)
   356  	missesCallback := mkCallback(&missesCounter)
   357  	forcedEvictionsCallback := func(key interface{}, _ interface{}) {
   358  		forcedEvictionsCounter++
   359  	}
   360  	manualEvictionsCallback := mkCallback(&manualEvictionsCounter)
   361  
   362  	defaultCallbacks := Callbacks{
   363  		LookupsCallback:         lookupsCallback,
   364  		HitsCallback:            hitsCallback,
   365  		MissesCallback:          missesCallback,
   366  		ForcedEvictionsCallback: forcedEvictionsCallback,
   367  		ManualEvictionsCallback: manualEvictionsCallback,
   368  	}
   369  
   370  	type expected struct {
   371  		lookups         int
   372  		hits            int
   373  		misses          int
   374  		forcedEvictions int
   375  		manualEvictions int
   376  		// If the value constructor is flaky, then it can result in a (mostly
   377  		// harmless) race in which events occur. For example, the key may be
   378  		// evicted by either the underlying LRU cache (if it gets to it first),
   379  		// or by us when we manually try to evict it. This can result in an
   380  		// unpredictable number of forced versus manual evictions.
   381  		//
   382  		// This flakiness can have cascading effects to the other metrics like
   383  		// lookups/hits/misses. So, if our test case has bad constructors in it,
   384  		// we need to be less strict about how we compare these expected results
   385  		// versus what we get.
   386  		racyEvictions bool
   387  	}
   388  
   389  	type lookup struct {
   390  		key            string
   391  		valConstructor func(val string) func() (interface{}, error)
   392  	}
   393  
   394  	for _, tc := range []struct {
   395  		name              string
   396  		cacheSize         int
   397  		cacheInitialState map[string]string
   398  		cacheCallbacks    Callbacks
   399  		// Perform lookups for each key here. It could result in a hit or miss.
   400  		lookups  []lookup
   401  		expected expected
   402  	}{
   403  		{
   404  			name:      "NoDefinedCallbacksResultsInNOP",
   405  			cacheSize: 2,
   406  			cacheInitialState: map[string]string{
   407  				"(key)foo": "(val)bar",
   408  			},
   409  			cacheCallbacks: Callbacks{},
   410  			lookups: []lookup{
   411  				{"(key)foo", goodValConstructor},
   412  			},
   413  			expected: expected{
   414  				lookups:         0,
   415  				hits:            0,
   416  				misses:          0,
   417  				forcedEvictions: 0,
   418  				manualEvictions: 0,
   419  			},
   420  		},
   421  		{
   422  			name:              "OneHitOneMiss",
   423  			cacheSize:         2,
   424  			cacheInitialState: map[string]string{},
   425  			cacheCallbacks:    defaultCallbacks,
   426  			lookups: []lookup{
   427  				{"(key)foo", goodValConstructor},
   428  				{"(key)foo", goodValConstructor},
   429  			},
   430  			expected: expected{
   431  				lookups: 2,
   432  				// One hit for a subsequent successful lookup.
   433  				hits: 1,
   434  				// One miss for the initial cache construction (initial state).
   435  				misses:          1,
   436  				forcedEvictions: 0,
   437  				manualEvictions: 0,
   438  			},
   439  		},
   440  		{
   441  			name:              "ManyMissesAndSomeForcedEvictions",
   442  			cacheSize:         2,
   443  			cacheInitialState: map[string]string{},
   444  			cacheCallbacks:    defaultCallbacks,
   445  			lookups: []lookup{
   446  				{"(key)1", goodValConstructor},
   447  				{"(key)2", goodValConstructor},
   448  				{"(key)3", goodValConstructor},
   449  				{"(key)4", goodValConstructor},
   450  				{"(key)5", goodValConstructor},
   451  			},
   452  			expected: expected{
   453  				lookups: 5,
   454  				hits:    0,
   455  				misses:  5,
   456  				// 3 Forced evictions because the cache size is 2.
   457  				forcedEvictions: 3,
   458  				manualEvictions: 0,
   459  			},
   460  		},
   461  		{
   462  			name:              "ManualEvictions",
   463  			cacheSize:         2,
   464  			cacheInitialState: map[string]string{},
   465  			cacheCallbacks:    defaultCallbacks,
   466  			lookups: []lookup{
   467  				{"(key)1", goodValConstructor},
   468  				{"(key)2", goodValConstructor},
   469  				{"(key)3", goodValConstructor},
   470  				{"(key)1", badValConstructor},
   471  				{"(key)2", badValConstructor},
   472  				{"(key)3", badValConstructor},
   473  			},
   474  			expected: expected{
   475  				lookups:         6,
   476  				hits:            0,
   477  				misses:          0,
   478  				forcedEvictions: 0,
   479  				manualEvictions: 0,
   480  				// If racyEvictions is true, then we expect some positive number of evictions to occur.
   481  				racyEvictions: true,
   482  			},
   483  		},
   484  	} {
   485  		t.Run(tc.name, func(t *testing.T) {
   486  			cache, err := NewLRUCache(tc.cacheSize, tc.cacheCallbacks)
   487  			if err != nil {
   488  				t.Error("could not initialize simpleCache")
   489  			}
   490  			// Reset test state.
   491  			lookupsCounter = 0
   492  			hitsCounter = 0
   493  			missesCounter = 0
   494  			forcedEvictionsCounter = 0
   495  			manualEvictionsCounter = 0
   496  
   497  			var wg sync.WaitGroup
   498  
   499  			// For the sake of realism, perform all lookups concurrently. The
   500  			// concurrency should have no effect on the operation of the
   501  			// callbacks.
   502  			for k, v := range tc.cacheInitialState {
   503  				k := k
   504  				v := v
   505  				wg.Add(1)
   506  				go func() {
   507  					cache.GetOrAdd(k, goodValConstructor(v))
   508  					wg.Done()
   509  				}()
   510  			}
   511  
   512  			for _, lookup := range tc.lookups {
   513  				lookup := lookup
   514  				wg.Add(1)
   515  				go func() {
   516  					cache.GetOrAdd(lookup.key, lookup.valConstructor("(val)"+lookup.key))
   517  					wg.Done()
   518  				}()
   519  			}
   520  			wg.Wait()
   521  
   522  			if tc.expected.lookups != lookupsCounter {
   523  				t.Errorf("Expected lookupsCounter to be '%d', got '%d'", tc.expected.lookups, lookupsCounter)
   524  			}
   525  
   526  			// If we expect racy evictions, then we expect *some* evictions to occur.
   527  			if tc.expected.racyEvictions {
   528  				totalEvictions := forcedEvictionsCounter + manualEvictionsCounter
   529  				if totalEvictions == 0 {
   530  					t.Errorf("Expected total evictions to be greater than 0, got '%d'", totalEvictions)
   531  				}
   532  			} else {
   533  				if tc.expected.hits != hitsCounter {
   534  					t.Errorf("Expected hitsCounter to be '%d', got '%d'", tc.expected.hits, hitsCounter)
   535  				}
   536  				if tc.expected.misses != missesCounter {
   537  					t.Errorf("Expected missesCounter to be '%d', got '%d'", tc.expected.misses, missesCounter)
   538  				}
   539  				if tc.expected.forcedEvictions != forcedEvictionsCounter {
   540  					t.Errorf("Expected forcedEvictionsCounter to be '%d', got '%d'", tc.expected.forcedEvictions, forcedEvictionsCounter)
   541  				}
   542  				if tc.expected.manualEvictions != manualEvictionsCounter {
   543  					t.Errorf("Expected manualEvictionsCounter to be '%d', got '%d'", tc.expected.manualEvictions, manualEvictionsCounter)
   544  				}
   545  			}
   546  		})
   547  	}
   548  }