google.golang.org/grpc@v1.72.2/balancer/rls/cache_test.go (about)

     1  /*
     2   *
     3   * Copyright 2021 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package rls
    20  
    21  import (
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/google/go-cmp/cmp"
    26  	"github.com/google/go-cmp/cmp/cmpopts"
    27  	"google.golang.org/grpc/internal/backoff"
    28  	"google.golang.org/grpc/internal/testutils/stats"
    29  )
    30  
    31  var (
    32  	cacheKeys = []cacheKey{
    33  		{path: "0", keys: "a"},
    34  		{path: "1", keys: "b"},
    35  		{path: "2", keys: "c"},
    36  		{path: "3", keys: "d"},
    37  		{path: "4", keys: "e"},
    38  	}
    39  
    40  	longDuration  = 10 * time.Minute
    41  	shortDuration = 1 * time.Millisecond
    42  	cacheEntries  []*cacheEntry
    43  )
    44  
    45  func initCacheEntries() {
    46  	// All entries have a dummy size of 1 to simplify resize operations.
    47  	cacheEntries = []*cacheEntry{
    48  		{
    49  			// Entry is valid and minimum expiry time has not expired.
    50  			expiryTime:        time.Now().Add(longDuration),
    51  			earliestEvictTime: time.Now().Add(longDuration),
    52  			size:              1,
    53  		},
    54  		{
    55  			// Entry is valid and is in backoff.
    56  			expiryTime:   time.Now().Add(longDuration),
    57  			backoffTime:  time.Now().Add(longDuration),
    58  			backoffState: &backoffState{timer: time.NewTimer(longDuration)},
    59  			size:         1,
    60  		},
    61  		{
    62  			// Entry is valid, and not in backoff.
    63  			expiryTime: time.Now().Add(longDuration),
    64  			size:       1,
    65  		},
    66  		{
    67  			// Entry is invalid.
    68  			expiryTime: time.Time{}.Add(shortDuration),
    69  			size:       1,
    70  		},
    71  		{
    72  			// Entry is invalid valid and backoff has expired.
    73  			expiryTime:        time.Time{}.Add(shortDuration),
    74  			backoffExpiryTime: time.Time{}.Add(shortDuration),
    75  			size:              1,
    76  		},
    77  	}
    78  }
    79  
    80  func (s) TestLRU_BasicOperations(t *testing.T) {
    81  	initCacheEntries()
    82  	// Create an LRU and add some entries to it.
    83  	lru := newLRU()
    84  	for _, k := range cacheKeys {
    85  		lru.addEntry(k)
    86  	}
    87  
    88  	// Get the least recent entry. This should be the first entry we added.
    89  	if got, want := lru.getLeastRecentlyUsed(), cacheKeys[0]; got != want {
    90  		t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
    91  	}
    92  
    93  	// Iterate through the slice of keys we added earlier, making them the most
    94  	// recent entry, one at a time. The least recent entry at that point should
    95  	// be the next entry from our slice of keys.
    96  	for i, k := range cacheKeys {
    97  		lru.makeRecent(k)
    98  
    99  		lruIndex := (i + 1) % len(cacheKeys)
   100  		if got, want := lru.getLeastRecentlyUsed(), cacheKeys[lruIndex]; got != want {
   101  			t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
   102  		}
   103  	}
   104  
   105  	// Iterate through the slice of keys we added earlier, removing them one at
   106  	// a time The least recent entry at that point should be the next entry from
   107  	// our slice of keys, except for the last one because the lru will be empty.
   108  	for i, k := range cacheKeys {
   109  		lru.removeEntry(k)
   110  
   111  		var want cacheKey
   112  		if i < len(cacheKeys)-1 {
   113  			want = cacheKeys[i+1]
   114  		}
   115  		if got := lru.getLeastRecentlyUsed(); got != want {
   116  			t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
   117  		}
   118  	}
   119  }
   120  
   121  func (s) TestDataCache_BasicOperations(t *testing.T) {
   122  	initCacheEntries()
   123  	dc := newDataCache(5, nil, &stats.NoopMetricsRecorder{}, "")
   124  	for i, k := range cacheKeys {
   125  		dc.addEntry(k, cacheEntries[i])
   126  	}
   127  	for i, k := range cacheKeys {
   128  		entry := dc.getEntry(k)
   129  		if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) {
   130  			t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", k, entry, cacheEntries[i])
   131  		}
   132  	}
   133  }
   134  
   135  func (s) TestDataCache_AddForcesResize(t *testing.T) {
   136  	initCacheEntries()
   137  	dc := newDataCache(1, nil, &stats.NoopMetricsRecorder{}, "")
   138  
   139  	// The first entry in cacheEntries has a minimum expiry time in the future.
   140  	// This entry would stop the resize operation since we do not evict entries
   141  	// whose minimum expiration time is in the future. So, we do not use that
   142  	// entry in this test. The entry being added has a running backoff timer.
   143  	evicted, ok := dc.addEntry(cacheKeys[1], cacheEntries[1])
   144  	if evicted || !ok {
   145  		t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", evicted, ok)
   146  	}
   147  
   148  	// Add another entry leading to the eviction of the above entry which has a
   149  	// running backoff timer. The first return value is expected to be true.
   150  	backoffCancelled, ok := dc.addEntry(cacheKeys[2], cacheEntries[2])
   151  	if !backoffCancelled || !ok {
   152  		t.Fatalf("dataCache.addEntry() returned (%v, %v) want (true, true)", backoffCancelled, ok)
   153  	}
   154  
   155  	// Add another entry leading to the eviction of the above entry which does not
   156  	// have a running backoff timer. This should evict the above entry, but the
   157  	// first return value is expected to be false.
   158  	backoffCancelled, ok = dc.addEntry(cacheKeys[3], cacheEntries[3])
   159  	if backoffCancelled || !ok {
   160  		t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", backoffCancelled, ok)
   161  	}
   162  }
   163  
   164  func (s) TestDataCache_Resize(t *testing.T) {
   165  	initCacheEntries()
   166  	dc := newDataCache(5, nil, &stats.NoopMetricsRecorder{}, "")
   167  	for i, k := range cacheKeys {
   168  		dc.addEntry(k, cacheEntries[i])
   169  	}
   170  
   171  	// The first cache entry (with a key of cacheKeys[0]) that we added has an
   172  	// earliestEvictTime in the future. As part of the resize operation, we
   173  	// traverse the cache in least recently used order, and this will be first
   174  	// entry that we will encounter. And since the earliestEvictTime is in the
   175  	// future, the resize operation will stop, leaving the cache bigger than
   176  	// what was asked for.
   177  	if dc.resize(1) {
   178  		t.Fatalf("dataCache.resize() returned true, want false")
   179  	}
   180  	if dc.currentSize != 5 {
   181  		t.Fatalf("dataCache.size is %d, want 5", dc.currentSize)
   182  	}
   183  
   184  	// Remove the entry with earliestEvictTime in the future and retry the
   185  	// resize operation.
   186  	dc.removeEntryForTesting(cacheKeys[0])
   187  	if !dc.resize(1) {
   188  		t.Fatalf("dataCache.resize() returned false, want true")
   189  	}
   190  	if dc.currentSize != 1 {
   191  		t.Fatalf("dataCache.size is %d, want 1", dc.currentSize)
   192  	}
   193  }
   194  
   195  func (s) TestDataCache_EvictExpiredEntries(t *testing.T) {
   196  	initCacheEntries()
   197  	dc := newDataCache(5, nil, &stats.NoopMetricsRecorder{}, "")
   198  	for i, k := range cacheKeys {
   199  		dc.addEntry(k, cacheEntries[i])
   200  	}
   201  
   202  	// The last two entries in the cacheEntries list have expired, and will be
   203  	// evicted. The first three should still remain in the cache.
   204  	if !dc.evictExpiredEntries() {
   205  		t.Fatal("dataCache.evictExpiredEntries() returned false, want true")
   206  	}
   207  	if dc.currentSize != 3 {
   208  		t.Fatalf("dataCache.size is %d, want 3", dc.currentSize)
   209  	}
   210  	for i := 0; i < 3; i++ {
   211  		entry := dc.getEntry(cacheKeys[i])
   212  		if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) {
   213  			t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", cacheKeys[i], entry, cacheEntries[i])
   214  		}
   215  	}
   216  }
   217  
   218  func (s) TestDataCache_ResetBackoffState(t *testing.T) {
   219  	type fakeBackoff struct {
   220  		backoff.Strategy
   221  	}
   222  
   223  	initCacheEntries()
   224  	dc := newDataCache(5, nil, &stats.NoopMetricsRecorder{}, "")
   225  	for i, k := range cacheKeys {
   226  		dc.addEntry(k, cacheEntries[i])
   227  	}
   228  
   229  	newBackoffState := &backoffState{bs: &fakeBackoff{}}
   230  	if updatePicker := dc.resetBackoffState(newBackoffState); !updatePicker {
   231  		t.Fatal("dataCache.resetBackoffState() returned updatePicker is false, want true")
   232  	}
   233  
   234  	// Make sure that the entry with no backoff state was not touched.
   235  	if entry := dc.getEntry(cacheKeys[0]); cmp.Equal(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})) {
   236  		t.Fatal("dataCache.resetBackoffState() touched entries without a valid backoffState")
   237  	}
   238  
   239  	// Make sure that the entry with a valid backoff state was reset.
   240  	entry := dc.getEntry(cacheKeys[1])
   241  	if diff := cmp.Diff(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})); diff != "" {
   242  		t.Fatalf("unexpected diff in backoffState for cache entry after dataCache.resetBackoffState(): %s", diff)
   243  	}
   244  }
   245  
   246  func (s) TestDataCache_Metrics(t *testing.T) {
   247  	cacheEntriesMetricsTests := []*cacheEntry{
   248  		{size: 1},
   249  		{size: 2},
   250  		{size: 3},
   251  		{size: 4},
   252  		{size: 5},
   253  	}
   254  	tmr := stats.NewTestMetricsRecorder()
   255  	dc := newDataCache(50, nil, tmr, "")
   256  
   257  	dc.updateRLSServerTarget("rls-server-target")
   258  	for i, k := range cacheKeys {
   259  		dc.addEntry(k, cacheEntriesMetricsTests[i])
   260  	}
   261  
   262  	const cacheEntriesKey = "grpc.lb.rls.cache_entries"
   263  	const cacheSizeKey = "grpc.lb.rls.cache_size"
   264  	// 5 total entries which add up to 15 size, so should record that.
   265  	if got, _ := tmr.Metric(cacheEntriesKey); got != 5 {
   266  		t.Fatalf("Unexpected data for metric %v, got: %v, want: %v", cacheEntriesKey, got, 5)
   267  	}
   268  	if got, _ := tmr.Metric(cacheSizeKey); got != 15 {
   269  		t.Fatalf("Unexpected data for metric %v, got: %v, want: %v", cacheSizeKey, got, 15)
   270  	}
   271  
   272  	// Resize down the cache to 2 entries (deterministic as based of LRU).
   273  	dc.resize(9)
   274  	if got, _ := tmr.Metric(cacheEntriesKey); got != 2 {
   275  		t.Fatalf("Unexpected data for metric %v, got: %v, want: %v", cacheEntriesKey, got, 2)
   276  	}
   277  	if got, _ := tmr.Metric(cacheSizeKey); got != 9 {
   278  		t.Fatalf("Unexpected data for metric %v, got: %v, want: %v", cacheSizeKey, got, 9)
   279  	}
   280  
   281  	// Update an entry to have size 6. This should reflect in the size metrics,
   282  	// which will increase by 1 to 11, while the number of cache entries should
   283  	// stay same. This write is deterministic and writes to the last one.
   284  	dc.updateEntrySize(cacheEntriesMetricsTests[4], 6)
   285  
   286  	if got, _ := tmr.Metric(cacheEntriesKey); got != 2 {
   287  		t.Fatalf("Unexpected data for metric %v, got: %v, want: %v", cacheEntriesKey, got, 2)
   288  	}
   289  	if got, _ := tmr.Metric(cacheSizeKey); got != 10 {
   290  		t.Fatalf("Unexpected data for metric %v, got: %v, want: %v", cacheSizeKey, got, 10)
   291  	}
   292  
   293  	// Delete this scaled up cache key. This should scale down the cache to 1
   294  	// entries, and remove 6 size so cache size should be 4.
   295  	dc.deleteAndCleanup(cacheKeys[4], cacheEntriesMetricsTests[4])
   296  	if got, _ := tmr.Metric(cacheEntriesKey); got != 1 {
   297  		t.Fatalf("Unexpected data for metric %v, got: %v, want: %v", cacheEntriesKey, got, 1)
   298  	}
   299  	if got, _ := tmr.Metric(cacheSizeKey); got != 4 {
   300  		t.Fatalf("Unexpected data for metric %v, got: %v, want: %v", cacheSizeKey, got, 4)
   301  	}
   302  }