github.com/hxx258456/ccgo@v0.0.5-0.20230213014102-48b35f46f66f/grpc/balancer/rls/internal/cache_test.go (about)

     1  /*
     2   *
     3   * Copyright 2021 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package rls
    20  
    21  import (
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/google/go-cmp/cmp"
    26  	"github.com/google/go-cmp/cmp/cmpopts"
    27  	"github.com/hxx258456/ccgo/grpc/internal/backoff"
    28  )
    29  
    30  var (
    31  	cacheKeys = []cacheKey{
    32  		{path: "0", keys: "a"},
    33  		{path: "1", keys: "b"},
    34  		{path: "2", keys: "c"},
    35  		{path: "3", keys: "d"},
    36  		{path: "4", keys: "e"},
    37  	}
    38  
    39  	longDuration  = 10 * time.Minute
    40  	shortDuration = 1 * time.Millisecond
    41  	cacheEntries  []*cacheEntry
    42  )
    43  
    44  func initCacheEntries() {
    45  	// All entries have a dummy size of 1 to simplify resize operations.
    46  	cacheEntries = []*cacheEntry{
    47  		{
    48  			// Entry is valid and minimum expiry time has not expired.
    49  			expiryTime:        time.Now().Add(longDuration),
    50  			earliestEvictTime: time.Now().Add(longDuration),
    51  			size:              1,
    52  		},
    53  		{
    54  			// Entry is valid and is in backoff.
    55  			expiryTime:   time.Now().Add(longDuration),
    56  			backoffTime:  time.Now().Add(longDuration),
    57  			backoffState: &backoffState{timer: time.NewTimer(longDuration)},
    58  			size:         1,
    59  		},
    60  		{
    61  			// Entry is valid, and not in backoff.
    62  			expiryTime: time.Now().Add(longDuration),
    63  			size:       1,
    64  		},
    65  		{
    66  			// Entry is invalid.
    67  			expiryTime: time.Time{}.Add(shortDuration),
    68  			size:       1,
    69  		},
    70  		{
    71  			// Entry is invalid valid and backoff has expired.
    72  			expiryTime:        time.Time{}.Add(shortDuration),
    73  			backoffExpiryTime: time.Time{}.Add(shortDuration),
    74  			size:              1,
    75  		},
    76  	}
    77  }
    78  
    79  func (s) TestLRU_BasicOperations(t *testing.T) {
    80  	initCacheEntries()
    81  	// Create an LRU and add some entries to it.
    82  	lru := newLRU()
    83  	for _, k := range cacheKeys {
    84  		lru.addEntry(k)
    85  	}
    86  
    87  	// Get the least recent entry. This should be the first entry we added.
    88  	if got, want := lru.getLeastRecentlyUsed(), cacheKeys[0]; got != want {
    89  		t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
    90  	}
    91  
    92  	// Iterate through the slice of keys we added earlier, making them the most
    93  	// recent entry, one at a time. The least recent entry at that point should
    94  	// be the next entry from our slice of keys.
    95  	for i, k := range cacheKeys {
    96  		lru.makeRecent(k)
    97  
    98  		lruIndex := (i + 1) % len(cacheKeys)
    99  		if got, want := lru.getLeastRecentlyUsed(), cacheKeys[lruIndex]; got != want {
   100  			t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
   101  		}
   102  	}
   103  
   104  	// Iterate through the slice of keys we added earlier, removing them one at
   105  	// a time The least recent entry at that point should be the next entry from
   106  	// our slice of keys, except for the last one because the lru will be empty.
   107  	for i, k := range cacheKeys {
   108  		lru.removeEntry(k)
   109  
   110  		var want cacheKey
   111  		if i < len(cacheKeys)-1 {
   112  			want = cacheKeys[i+1]
   113  		}
   114  		if got := lru.getLeastRecentlyUsed(); got != want {
   115  			t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
   116  		}
   117  	}
   118  }
   119  
   120  func (s) TestLRU_IterateAndRun(t *testing.T) {
   121  	initCacheEntries()
   122  	// Create an LRU and add some entries to it.
   123  	lru := newLRU()
   124  	for _, k := range cacheKeys {
   125  		lru.addEntry(k)
   126  	}
   127  
   128  	// Iterate through the lru to make sure that entries are returned in the
   129  	// least recently used order.
   130  	var gotKeys []cacheKey
   131  	lru.iterateAndRun(func(key cacheKey) {
   132  		gotKeys = append(gotKeys, key)
   133  	})
   134  	if !cmp.Equal(gotKeys, cacheKeys, cmp.AllowUnexported(cacheKey{})) {
   135  		t.Fatalf("lru.iterateAndRun returned %v, want %v", gotKeys, cacheKeys)
   136  	}
   137  
   138  	// Make sure that removing entries from the lru while iterating through it
   139  	// is a safe operation.
   140  	lru.iterateAndRun(func(key cacheKey) {
   141  		lru.removeEntry(key)
   142  	})
   143  
   144  	// Check the lru internals to make sure we freed up all the memory.
   145  	if len := lru.ll.Len(); len != 0 {
   146  		t.Fatalf("Number of entries in the lru's underlying list is %d, want 0", len)
   147  	}
   148  	if len := len(lru.m); len != 0 {
   149  		t.Fatalf("Number of entries in the lru's underlying map is %d, want 0", len)
   150  	}
   151  }
   152  
   153  func (s) TestDataCache_BasicOperations(t *testing.T) {
   154  	initCacheEntries()
   155  	dc := newDataCache(5, nil)
   156  	for i, k := range cacheKeys {
   157  		dc.addEntry(k, cacheEntries[i])
   158  	}
   159  	for i, k := range cacheKeys {
   160  		entry := dc.getEntry(k)
   161  		if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) {
   162  			t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", k, entry, cacheEntries[i])
   163  		}
   164  	}
   165  }
   166  
   167  func (s) TestDataCache_AddForcesResize(t *testing.T) {
   168  	initCacheEntries()
   169  	dc := newDataCache(1, nil)
   170  
   171  	// The first entry in cacheEntries has a minimum expiry time in the future.
   172  	// This entry would stop the resize operation since we do not evict entries
   173  	// whose minimum expiration time is in the future. So, we do not use that
   174  	// entry in this test. The entry being added has a running backoff timer.
   175  	evicted, ok := dc.addEntry(cacheKeys[1], cacheEntries[1])
   176  	if evicted || !ok {
   177  		t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", evicted, ok)
   178  	}
   179  
   180  	// Add another entry leading to the eviction of the above entry which has a
   181  	// running backoff timer. The first return value is expected to be true.
   182  	backoffCancelled, ok := dc.addEntry(cacheKeys[2], cacheEntries[2])
   183  	if !backoffCancelled || !ok {
   184  		t.Fatalf("dataCache.addEntry() returned (%v, %v) want (true, true)", backoffCancelled, ok)
   185  	}
   186  
   187  	// Add another entry leading to the eviction of the above entry which does not
   188  	// have a running backoff timer. This should evict the above entry, but the
   189  	// first return value is expected to be false.
   190  	backoffCancelled, ok = dc.addEntry(cacheKeys[3], cacheEntries[3])
   191  	if backoffCancelled || !ok {
   192  		t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", backoffCancelled, ok)
   193  	}
   194  }
   195  
   196  func (s) TestDataCache_Resize(t *testing.T) {
   197  	initCacheEntries()
   198  	dc := newDataCache(5, nil)
   199  	for i, k := range cacheKeys {
   200  		dc.addEntry(k, cacheEntries[i])
   201  	}
   202  
   203  	// The first cache entry (with a key of cacheKeys[0]) that we added has an
   204  	// earliestEvictTime in the future. As part of the resize operation, we
   205  	// traverse the cache in least recently used order, and this will be first
   206  	// entry that we will encounter. And since the earliestEvictTime is in the
   207  	// future, the resize operation will stop, leaving the cache bigger than
   208  	// what was asked for.
   209  	if dc.resize(1) {
   210  		t.Fatalf("dataCache.resize() returned true, want false")
   211  	}
   212  	if dc.currentSize != 5 {
   213  		t.Fatalf("dataCache.size is %d, want 5", dc.currentSize)
   214  	}
   215  
   216  	// Remove the entry with earliestEvictTime in the future and retry the
   217  	// resize operation.
   218  	dc.removeEntryForTesting(cacheKeys[0])
   219  	if !dc.resize(1) {
   220  		t.Fatalf("dataCache.resize() returned false, want true")
   221  	}
   222  	if dc.currentSize != 1 {
   223  		t.Fatalf("dataCache.size is %d, want 1", dc.currentSize)
   224  	}
   225  }
   226  
   227  func (s) TestDataCache_EvictExpiredEntries(t *testing.T) {
   228  	initCacheEntries()
   229  	dc := newDataCache(5, nil)
   230  	for i, k := range cacheKeys {
   231  		dc.addEntry(k, cacheEntries[i])
   232  	}
   233  
   234  	// The last two entries in the cacheEntries list have expired, and will be
   235  	// evicted. The first three should still remain in the cache.
   236  	if !dc.evictExpiredEntries() {
   237  		t.Fatal("dataCache.evictExpiredEntries() returned false, want true")
   238  	}
   239  	if dc.currentSize != 3 {
   240  		t.Fatalf("dataCache.size is %d, want 3", dc.currentSize)
   241  	}
   242  	for i := 0; i < 3; i++ {
   243  		entry := dc.getEntry(cacheKeys[i])
   244  		if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) {
   245  			t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", cacheKeys[i], entry, cacheEntries[i])
   246  		}
   247  	}
   248  }
   249  
   250  func (s) TestDataCache_ResetBackoffState(t *testing.T) {
   251  	type fakeBackoff struct {
   252  		backoff.Strategy
   253  	}
   254  
   255  	initCacheEntries()
   256  	dc := newDataCache(5, nil)
   257  	for i, k := range cacheKeys {
   258  		dc.addEntry(k, cacheEntries[i])
   259  	}
   260  
   261  	newBackoffState := &backoffState{bs: &fakeBackoff{}}
   262  	if updatePicker := dc.resetBackoffState(newBackoffState); !updatePicker {
   263  		t.Fatal("dataCache.resetBackoffState() returned updatePicker is false, want true")
   264  	}
   265  
   266  	// Make sure that the entry with no backoff state was not touched.
   267  	if entry := dc.getEntry(cacheKeys[0]); cmp.Equal(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})) {
   268  		t.Fatal("dataCache.resetBackoffState() touched entries without a valid backoffState")
   269  	}
   270  
   271  	// Make sure that the entry with a valid backoff state was reset.
   272  	entry := dc.getEntry(cacheKeys[1])
   273  	if diff := cmp.Diff(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})); diff != "" {
   274  		t.Fatalf("unexpected diff in backoffState for cache entry after dataCache.resetBackoffState(): %s", diff)
   275  	}
   276  }