github.com/m3db/m3@v1.5.1-0.20231129193456-75a402aa583b/src/x/cache/lru_cache_test.go (about) 1 // Copyright (c) 2020 Uber Technologies, Inc. 2 // 3 // Permission is hereby granted, free of charge, to any person obtaining a copy 4 // of this software and associated documentation files (the "Software"), to deal 5 // in the Software without restriction, including without limitation the rights 6 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 // copies of the Software, and to permit persons to whom the Software is 8 // furnished to do so, subject to the following conditions: 9 // 10 // The above copyright notice and this permission notice shall be included in 11 // all copies or substantial portions of the Software. 12 // 13 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 // THE SOFTWARE. 20 21 package cache 22 23 import ( 24 "context" 25 "errors" 26 "fmt" 27 "strconv" 28 "sync" 29 "sync/atomic" 30 "testing" 31 "time" 32 33 "github.com/m3db/m3/src/x/tallytest" 34 35 "github.com/stretchr/testify/assert" 36 "github.com/stretchr/testify/require" 37 "github.com/uber-go/tally" 38 ) 39 40 func metric(key string) string { 41 return "lru-cache." + key 42 } 43 44 func TestLRU_Get_SingleLoadPerKey(t *testing.T) { 45 tt := newLRUTester(3, 0) 46 47 // Spin up a bunch of goroutines to access the cache simultaneously, and 48 // only release once they are all ready. 49 var ( 50 wgDone sync.WaitGroup 51 wgReady sync.WaitGroup 52 53 releaseCh = make(chan struct{}) 54 ) 55 56 keys := []string{"key-0", "key-1"} 57 58 for i := 0; i < 10; i++ { 59 key := keys[i%len(keys)] 60 wgReady.Add(1) 61 wgDone.Add(1) 62 go func() { 63 defer wgDone.Done() 64 65 // Unblock the triggering goroutine 66 wgReady.Done() 67 68 // Wait for the triggering goroutine to unblock us 69 <-releaseCh 70 71 // Sleep a bit to let other threads wake up 72 time.Sleep(time.Millisecond * 100) 73 74 // Fetch and tell the triggering goroutine that we're done 75 value, err := tt.c.Get(context.Background(), key, tt.defaultLoad) 76 require.NoError(t, err) 77 require.Equal(t, fmt.Sprintf("%s-00001", key), value) 78 }() 79 } 80 81 wgReady.Wait() 82 close(releaseCh) 83 wgDone.Wait() 84 85 // We should only have entered the loader once for each key, even though 86 // multiple goroutines were active simultaneously. 87 assert.Equal(t, int64(1), *tt.callsToLoad["key-0"]) 88 assert.Equal(t, int64(1), *tt.callsToLoad["key-1"]) 89 90 // Make sure we're reporting proper metrics 91 snapshot := tt.metrics.Snapshot() 92 tallytest.AssertCounterValue(t, 2, snapshot, metric(loadAttemptsCounter), nil) 93 tallytest.AssertCounterValue(t, 2, snapshot, metric(loadsCounter), successTags) 94 tallytest.AssertCounterValue(t, 0, snapshot, metric(loadsCounter), failureTags) 95 tallytest.AssertCounterValue(t, 2, snapshot, metric(accessCounter), missesTags) 96 tallytest.AssertCounterValue(t, 8, snapshot, metric(accessCounter), hitsTags) 97 tallytest.AssertGaugeValue(t, 2, snapshot, metric(entriesGauge), nil) 98 } 99 100 func TestLRU_Get_HonorsContext(t *testing.T) { 101 tt := newLRUTester(3, 0) 102 103 // Spin up a background goroutines that loads a key. 104 var ( 105 blockerCh = make(chan struct{}) 106 doneCh = make(chan struct{}) 107 ) 108 109 blockedLoad, waitForStartCh := blockingLoad(blockerCh, tt.defaultLoad) 110 go func() { 111 // NB(mmihic): Does not use the cancellation context 112 defer close(doneCh) 113 val, err := tt.c.Get(context.Background(), "key-0", blockedLoad) 114 require.NoError(t, err) 115 require.Equal(t, "key-0-00001", val) 116 }() 117 118 <-waitForStartCh 119 120 // Spin up several more background goroutines that access the same key. 121 // These will block until the main goroutine completes or the context is done. 122 var wg sync.WaitGroup 123 ctx, cancel := context.WithTimeout(context.Background(), time.Hour*24) 124 for i := 0; i < 10; i++ { 125 wg.Add(1) 126 go func() { 127 defer wg.Done() 128 _, err := tt.c.Get(ctx, "key-0", tt.defaultLoad) 129 require.Equal(t, context.Canceled, err) 130 }() 131 } 132 133 // Cancel the context, the background goroutines should exit ContextCancelled. Wait for them to complete. 134 cancel() 135 wg.Wait() 136 137 // Now let the first goroutine complete. 138 close(blockerCh) 139 <-doneCh 140 } 141 142 func TestLRU_Get_LimitsTotalConcurrentLoad(t *testing.T) { 143 tt := newLRUTester(10, 5) 144 145 // Spin up 5 blocked goroutines, each for a different key 146 var ( 147 blockedChs = make([]chan struct{}, 5) 148 doneChs = make([]chan struct{}, 5) 149 ) 150 for i := 0; i < len(blockedChs); i++ { 151 key := fmt.Sprintf("key-%d", i) 152 doneCh := make(chan struct{}) 153 154 blockedChs[i] = make(chan struct{}) 155 doneChs[i] = doneCh 156 157 blockingLoadFn, waitForStartCh := blockingLoad(blockedChs[i], tt.defaultLoad) 158 go func() { 159 defer close(doneCh) 160 val, err := tt.c.Get(context.Background(), key, blockingLoadFn) 161 require.NoError(t, err) 162 require.Equal(t, fmt.Sprintf("%s-00001", key), val.(string)) 163 }() 164 <-waitForStartCh 165 } 166 167 // Try to acquire a 6th key - this will block since there are no concurrency leases 168 // available. Let it timeout 169 ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*500) 170 defer cancel() 171 172 _, err := tt.c.Get(ctx, "key-9", tt.defaultLoad) 173 require.Error(t, err) 174 assert.Equal(t, err, context.DeadlineExceeded) 175 176 // Release one of the 5 blocked goroutines and wait for it to complete 177 close(blockedChs[0]) 178 <-doneChs[0] 179 180 // Try to acquire a 6th key again - this should proceed since we've freed up a lease 181 val, err := tt.c.Get(context.Background(), "key-9", tt.defaultLoad) 182 require.NoError(t, err) 183 require.Equal(t, "key-9-00001", val) 184 185 // Release the other 5 blocked goroutines 186 for i := 1; i < len(blockedChs); i++ { 187 close(blockedChs[i]) 188 <-doneChs[i] 189 } 190 } 191 192 func TestLRU_Get_EvictsExpiredEntriesPriorToLoading(t *testing.T) { 193 tt := newLRUTester(3, 0) 194 ctx := context.Background() 195 196 // Load 3 entries with enough time between them that we can expire two without expiring the third 197 val, err := tt.c.Get(ctx, "key-0", tt.defaultLoad) 198 require.NoError(t, err) 199 require.Equal(t, "key-0-00001", val) 200 tt.now = tt.now.Add(time.Minute * 5) 201 202 val, err = tt.c.Get(ctx, "key-1", tt.defaultLoad) 203 require.NoError(t, err) 204 require.Equal(t, "key-1-00001", val) 205 tt.now = tt.now.Add(time.Minute * 5) 206 207 val, err = tt.c.Get(ctx, "key-2", tt.defaultLoad) 208 require.NoError(t, err) 209 require.Equal(t, "key-2-00001", val) 210 tt.now = tt.now.Add(time.Minute * 5) 211 212 // Access the oldest expiring entry to make sure that access does not affect expiration 213 for i := 0; i < 10; i++ { 214 val, err = tt.c.Get(ctx, "key-0", tt.defaultLoad) 215 require.NoError(t, err) 216 require.Equal(t, "key-0-00001", val) 217 } 218 219 // Advance time far enough to expire the first two entries 220 tt.now = tt.now.Add(tt.ttl - (time.Minute * 5) - time.Second) // just before the last entries expiration 221 222 // Access a (non-expired) cached entry, should not expiry anything 223 val, err = tt.c.Get(ctx, "key-2", tt.defaultLoad) 224 require.NoError(t, err) 225 require.Equal(t, "key-2-00001", val) 226 snapshot := tt.metrics.Snapshot() 227 tallytest.AssertGaugeValue(t, 3, snapshot, metric(entriesGauge), nil) 228 assert.True(t, tt.c.has("key-0", false)) 229 assert.True(t, tt.c.has("key-1", false)) 230 assert.True(t, tt.c.has("key-2", false)) 231 232 // Access a new entry, should remove the two expired entries 233 val, err = tt.c.Get(ctx, "key-3", tt.defaultLoad) 234 require.NoError(t, err) 235 require.Equal(t, "key-3-00001", val) 236 snapshot = tt.metrics.Snapshot() 237 tallytest.AssertGaugeValue(t, 2, snapshot, metric(entriesGauge), nil) 238 assert.False(t, tt.c.has("key-0", false)) // removed due to expiry 239 assert.False(t, tt.c.has("key-1", false)) // removed due to expiry 240 assert.True(t, tt.c.has("key-2", false)) // not expired 241 assert.True(t, tt.c.has("key-3", false)) // not expired 242 243 // Spin up a go-routine to load another entry, but let it block in the loading function 244 var ( 245 blockerCh = make(chan struct{}) 246 doneCh = make(chan struct{}) 247 ) 248 249 blockedLoadFn, waitForStartCh := blockingLoad(blockerCh, tt.defaultLoad) 250 go func() { 251 // nolint: govet 252 val, err := tt.c.Get(ctx, "key-4", blockedLoadFn) 253 require.NoError(t, err) 254 require.Equal(t, "key-4-00001", val) 255 close(doneCh) 256 }() 257 <-waitForStartCh 258 259 // Advance time enough that all entries are expired, included the one that's being actively loaded 260 tt.now = tt.now.Add(tt.ttl + time.Second) 261 262 // Access a new entry, will remove all of the expired entries except the one that is currently loading 263 val, err = tt.c.Get(ctx, "key-5", tt.defaultLoad) 264 require.NoError(t, err) 265 require.Equal(t, "key-5-00001", val) 266 snapshot = tt.metrics.Snapshot() 267 tallytest.AssertGaugeValue(t, 2, snapshot, metric(entriesGauge), nil) 268 assert.False(t, tt.c.has("key-0", false)) // removed due to expiry 269 assert.False(t, tt.c.has("key-1", false)) // removed due to expiry 270 assert.False(t, tt.c.has("key-2", false)) // removed due to expiry 271 assert.False(t, tt.c.has("key-3", false)) // removed due to expiry 272 assert.True(t, tt.c.has("key-4", false)) // technically expired, but not removed due to being in loading state 273 assert.True(t, tt.c.has("key-5", true)) // newly loaded key 274 275 // Allow the load to complete - the newly loaded entry should no longer be expired 276 close(blockerCh) 277 <-doneCh 278 assert.False(t, tt.c.has("key-0", false)) // removed due to expiry 279 assert.False(t, tt.c.has("key-1", false)) // removed due to expiry 280 assert.False(t, tt.c.has("key-2", false)) // removed due to expiry 281 assert.False(t, tt.c.has("key-3", false)) // removed due to expiry 282 assert.True(t, tt.c.has("key-4", true)) // not expired 283 assert.True(t, tt.c.has("key-5", true)) // not expired 284 285 // Advance time so that all entries are expired 286 tt.now = tt.now.Add(tt.ttl + time.Second) 287 288 // Access one of the previously cached entries - since it is expired it should be loaded again properly 289 val, err = tt.c.Get(ctx, "key-3", tt.defaultLoad) 290 require.NoError(t, err) 291 require.Equal(t, "key-3-00002", val) 292 293 // And ensure that it is not expired after that load 294 assert.False(t, tt.c.has("key-0", false)) // removed due to expiry 295 assert.False(t, tt.c.has("key-1", false)) // removed due to expiry 296 assert.False(t, tt.c.has("key-2", false)) // removed due to expiry 297 assert.True(t, tt.c.has("key-3", true)) // no longer expired 298 assert.False(t, tt.c.has("key-4", true)) // has now expired 299 assert.False(t, tt.c.has("key-5", true)) // how now expired 300 } 301 302 func TestLRU_Get_EvictsLRUEntriesToReserveCapacity(t *testing.T) { 303 tt := newLRUTester(3, 0) 304 ctx := context.Background() 305 306 // Load three entries. 307 val, err := tt.c.Get(ctx, "key-0", tt.defaultLoad) 308 require.NoError(t, err) 309 require.Equal(t, "key-0-00001", val) 310 311 val, err = tt.c.Get(ctx, "key-1", tt.defaultLoad) 312 require.NoError(t, err) 313 require.Equal(t, "key-1-00001", val) 314 315 val, err = tt.c.Get(ctx, "key-2", tt.defaultLoad) 316 require.NoError(t, err) 317 require.Equal(t, "key-2-00001", val) 318 319 // Revisit the second entry to move it to the front of the LRU. 320 val, err = tt.c.Get(ctx, "key-1", tt.defaultLoad) 321 require.NoError(t, err) 322 require.Equal(t, "key-1-00001", val) 323 324 // Load a fourth and fifth entry - should evict the first and third entry. 325 val, err = tt.c.Get(ctx, "key-3", tt.defaultLoad) 326 require.NoError(t, err) 327 require.Equal(t, "key-3-00001", val) 328 329 val, err = tt.c.Get(ctx, "key-4", tt.defaultLoad) 330 require.NoError(t, err) 331 require.Equal(t, "key-4-00001", val) 332 333 assert.False(t, tt.c.has("key-0", false)) // removed due to LRU 334 assert.True(t, tt.c.has("key-1", false)) // was MRU so not removed 335 assert.False(t, tt.c.has("key-2", false)) // removed due to LRU 336 assert.True(t, tt.c.has("key-3", false)) // newly loaded 337 assert.True(t, tt.c.has("key-4", false)) // newly loaded 338 339 // Spin up a blocked background goroutine to load a 6th entry - this will evict the second entry. 340 var ( 341 blockerCh = make(chan struct{}) 342 wg sync.WaitGroup 343 ) 344 345 wg.Add(1) 346 blockedLoadFn, waitForStartCh := blockingLoad(blockerCh, tt.defaultLoad) 347 go func() { 348 defer wg.Done() 349 350 // nolint: govet 351 val, err := tt.c.Get(ctx, "key-5", blockedLoadFn) 352 require.NoError(t, err) 353 require.Equal(t, "key-5-00001", val) 354 }() 355 <-waitForStartCh 356 357 val, err = tt.c.Get(ctx, "key-3", tt.defaultLoad) 358 require.NoError(t, err) 359 require.Equal(t, "key-3-00001", val) 360 361 val, err = tt.c.Get(ctx, "key-4", tt.defaultLoad) 362 require.NoError(t, err) 363 require.Equal(t, "key-4-00001", val) 364 365 assert.False(t, tt.c.has("key-0", false)) // removed due to LRU 366 assert.False(t, tt.c.has("key-1", false)) // removed due to LRU 367 assert.False(t, tt.c.has("key-2", false)) // removed due to LRU 368 assert.True(t, tt.c.has("key-3", false)) // newly loaded 369 assert.True(t, tt.c.has("key-4", false)) // newly loaded 370 assert.True(t, tt.c.has("key-5", false)) // loading 371 372 // Access the 4th key to move it in front of the actively loading key in the LRU 373 val, err = tt.c.Get(ctx, "key-3", tt.defaultLoad) 374 require.NoError(t, err) 375 require.Equal(t, "key-3-00001", val) 376 377 // Load a 7th and 8th entry, this will evict the fourth and fifth entries. Technically 378 // we've accessed the fourth entry after the 6th entry, but we can't evict the 6th 379 // entry because it is in the process of loading 380 val, err = tt.c.Get(ctx, "key-6", tt.defaultLoad) 381 require.NoError(t, err) 382 require.Equal(t, "key-6-00001", val) 383 384 val, err = tt.c.Get(ctx, "key-7", tt.defaultLoad) 385 require.NoError(t, err) 386 require.Equal(t, "key-7-00001", val) 387 388 // Spin up other blocked goroutines to reload the first and second entry 389 wg.Add(1) 390 blockedLoadFn, waitForStartCh = blockingLoad(blockerCh, tt.defaultLoad) 391 go func() { 392 defer wg.Done() 393 394 // nolint: govet 395 val, err := tt.c.Get(ctx, "key-0", blockedLoadFn) 396 require.NoError(t, err) 397 require.Equal(t, "key-0-00002", val) 398 }() 399 <-waitForStartCh 400 401 wg.Add(1) 402 blockedLoadFn, waitForStartCh = blockingLoad(blockerCh, tt.defaultLoad) 403 go func() { 404 defer wg.Done() 405 406 // nolint: govet 407 val, err := tt.c.Get(ctx, "key-1", blockedLoadFn) 408 require.NoError(t, err) 409 require.Equal(t, "key-1-00002", val) 410 }() 411 <-waitForStartCh 412 413 // Try to load a 9th entry - this will fail because we cannot evict any of the 414 // entries that are being loaded. 415 _, err = tt.c.Get(ctx, "key-9", tt.defaultLoad) 416 require.Error(t, err) 417 assert.Equal(t, ErrCacheFull, err) 418 419 // Let the background loads complete, then re-attempt the 9th entry - this 420 // will evict the 7th entry 421 close(blockerCh) 422 } 423 424 func TestLRU_Get_CacheLoadErrors(t *testing.T) { 425 loadAttempts := map[string]int{} 426 427 now := time.Date(2020, time.August, 22, 14, 56, 17, 100, time.UTC) 428 429 c := NewLRU(&LRUOptions{ 430 TTL: time.Second * 30, 431 CacheErrorsByDefault: true, 432 Now: func() time.Time { return now }, 433 }) 434 435 loader := func(_ context.Context, key string) (interface{}, error) { 436 loadAttempts[key]++ 437 438 switch key { 439 case "key-1": 440 return nil, errors.New("this failed") 441 case "key-2": 442 return "foo", nil 443 case "key-3": 444 return nil, UncachedError{errors.New("this also failed")} 445 default: 446 return nil, ErrEntryNotFound 447 } 448 } 449 450 // Load a key which generates an error 451 _, err := c.Get(context.Background(), "key-1", loader) 452 require.EqualError(t, err, "this failed") 453 454 // Access it a few more times - the error should be cached 455 for i := 0; i < 10; i++ { 456 _, err = c.Get(context.Background(), "key-1", loader) 457 require.EqualError(t, err, "this failed") 458 } 459 460 // Should only have been loaded once despite resulting in an error 461 assert.Equal(t, 1, loadAttempts["key-1"]) 462 463 // Load a key which doesn't generate an error - this should still be triggered 464 _, err = c.Get(context.Background(), "key-1", loader) 465 require.EqualError(t, err, "this failed") 466 467 // Load a key which doesn't exist - this should be triggered and the result cached 468 _, err = c.Get(context.Background(), "non-existent", loader) 469 require.Equal(t, ErrEntryNotFound, err) 470 471 for i := 0; i < 10; i++ { 472 _, err = c.Get(context.Background(), "non-existent", loader) 473 require.Equal(t, ErrEntryNotFound, err) 474 } 475 476 assert.Equal(t, 1, loadAttempts["non-existent"]) 477 478 // Advance past the TTL and re-access the key that generated an error - should reload that key 479 now = now.Add(time.Hour * 10) 480 _, err = c.Get(context.Background(), "key-1", loader) 481 require.EqualError(t, err, "this failed") 482 assert.Equal(t, 2, loadAttempts["key-1"]) 483 484 // Load a key that results in an error that we are explicitly not caching - should constantly 485 // attempt to reload that key 486 for i := 0; i < 10; i++ { 487 _, err = c.Get(context.Background(), "key-3", loader) 488 require.EqualError(t, err, "this also failed") 489 require.False(t, errors.As(err, &UncachedError{})) // should have been unwrapped 490 } 491 assert.Equal(t, 10, loadAttempts["key-3"]) 492 } 493 494 func TestLRU_Get_DontCacheLoadErrors(t *testing.T) { 495 loadAttempts := map[string]int{} 496 c := NewLRU(&LRUOptions{ 497 TTL: time.Second * 30, 498 CacheErrorsByDefault: false, 499 }) 500 501 loader := func(_ context.Context, key string) (interface{}, error) { 502 loadAttempts[key]++ 503 504 if key == "always-cached" { 505 return nil, &CachedError{errors.New("this failed")} 506 } 507 508 if key == "always-uncached" { 509 return nil, &UncachedError{errors.New("this failed")} 510 } 511 512 return nil, errors.New("this failed") 513 } 514 515 // No matter how many times we access the erroring key, we'll keep going back to the loader 516 for i := 0; i < 10; i++ { 517 _, err := c.Get(context.Background(), "key-1", loader) 518 require.EqualError(t, err, "this failed") 519 require.False(t, errors.As(err, &UncachedError{})) 520 require.False(t, errors.As(err, &CachedError{})) 521 } 522 assert.Equal(t, 10, loadAttempts["key-1"]) 523 524 // Allow explicit caching even when caching is disabled by default 525 for i := 0; i < 10; i++ { 526 _, err := c.Get(context.Background(), "always-cached", loader) 527 require.EqualError(t, err, "this failed") 528 require.False(t, errors.As(err, &UncachedError{})) 529 require.False(t, errors.As(err, &CachedError{})) 530 } 531 assert.Equal(t, 1, loadAttempts["always-cached"]) 532 533 // Still unwrap uncached errors even when caching is disabled 534 for i := 0; i < 10; i++ { 535 _, err := c.Get(context.Background(), "always-uncached", loader) 536 require.EqualError(t, err, "this failed") 537 require.False(t, errors.As(err, &UncachedError{})) 538 require.False(t, errors.As(err, &CachedError{})) 539 } 540 assert.Equal(t, 10, loadAttempts["always-uncached"]) 541 } 542 543 func TestLRU_GetWithTTL_AllowEntrySpecificTTLs(t *testing.T) { 544 var ( 545 loadAttempts = 0 546 now = time.Date(2020, time.August, 22, 14, 56, 17, 100, time.UTC) 547 loader = func(_ context.Context, key string) (interface{}, time.Time, error) { 548 loadAttempts++ 549 return fmt.Sprintf("%s-%05d", key, loadAttempts), now.Add(time.Hour * 24), nil 550 } 551 ) 552 553 c := NewLRU(&LRUOptions{ 554 TTL: time.Second * 30, 555 Now: func() time.Time { 556 return now 557 }, 558 }) 559 560 // Repeatedly load, returning a custom TTL, advancing time past the "default" TTL but 561 // still within the TTL returned from the load function - should not reload 562 for i := 0; i < 10; i++ { 563 val, err := c.GetWithTTL(context.Background(), "my-key", loader) 564 require.NoError(t, err) 565 assert.Equal(t, "my-key-00001", val) 566 assert.Equal(t, 1, loadAttempts) 567 now = now.Add(time.Minute) 568 } 569 570 // Advance past the TTL returned from the loader and try again - should reload 571 now = now.Add(time.Hour * 72) 572 val, err := c.GetWithTTL(context.Background(), "my-key", loader) 573 require.NoError(t, err) 574 assert.Equal(t, "my-key-00002", val) 575 assert.Equal(t, 2, loadAttempts) 576 } 577 578 func TestLRU_GetWithTTL_DoubleGetNoExistingEntryNoLoader(t *testing.T) { 579 lru := NewLRU(nil) 580 581 _, err := lru.GetWithTTL(context.Background(), "foo", nil) 582 require.Error(t, err) 583 assert.Equal(t, err, ErrEntryNotFound) 584 585 _, err = lru.GetWithTTL(context.Background(), "foo", nil) 586 require.Error(t, err) 587 assert.Equal(t, err, ErrEntryNotFound) 588 } 589 590 func TestLRU_PutWithTTL_NoExistingEntry(t *testing.T) { 591 lru := NewLRU(nil) 592 593 lru.PutWithTTL("foo", "bar", 0) 594 595 value, err := lru.GetWithTTL(context.Background(), "foo", nil) 596 require.NoError(t, err) 597 assert.Equal(t, "bar", value.(string)) 598 } 599 600 func TestLRU_GetNonExisting_FromFullCache_AfterDoublePut(t *testing.T) { 601 lru := NewLRU(&LRUOptions{MaxEntries: 2}) 602 603 // Insert same key twice: 604 lru.Put("foo", "1") 605 lru.Put("foo", "1") 606 607 // Insert another key to make cache full: 608 lru.Put("bar", "2") 609 610 // Try to get entry that does not exist - this was getting LRU.reserveCapacity into 611 // an infinite loop because the second Put above was inserting a copy of the original entry 612 // into double-linked lists and mutating its state (loadTimeElt and accessTimeElt fields), 613 // making it's removal impossible: 614 _, err := lru.Get(context.Background(), "new", nil) 615 require.Error(t, err, ErrEntryNotFound.Error()) 616 } 617 618 // TestLRU_TryGetExpired is a regression test for a bug that would create a loadingCh for expired entries in the cache, 619 // even if the loader was nil. 620 func TestLRU_TryGetExpired(t *testing.T) { 621 now := time.Now() 622 lru := NewLRU(&LRUOptions{MaxEntries: 2, TTL: time.Second, Now: func() time.Time { 623 return now 624 }}) 625 626 // create an entry in the cache and expire it. 627 lru.Put("foo", "1") 628 now = now.Add(time.Second * 2) 629 630 // first load is not found since it's expired. 631 _, ok := lru.TryGet("foo") 632 require.False(t, ok) 633 634 // second load is still not found since it's expired. previously the bug would attempt to wait on the loadingCh 635 // and fail with a panic because the ctx is nil. 636 _, ok = lru.TryGet("foo") 637 require.False(t, ok) 638 } 639 640 func TestLRU_EnforceMaxEntries(t *testing.T) { 641 var ( 642 maxEntries = 2 643 lru = NewLRU(&LRUOptions{MaxEntries: maxEntries, TTL: time.Second, Now: time.Now}) 644 ) 645 646 for i := 0; i <= maxEntries; i++ { 647 lru.Put(strconv.Itoa(i), "foo") 648 } 649 650 assert.Len(t, lru.entries, maxEntries) 651 } 652 653 func TestLRU_PutAboveLimit(t *testing.T) { 654 var ( 655 maxEntries = 2 656 lru = NewLRU(&LRUOptions{MaxEntries: maxEntries, TTL: time.Second, Now: time.Now}) 657 ) 658 659 for i := 0; i < 3*maxEntries; i++ { 660 key, value := strconv.Itoa(i), fmt.Sprintf("value for %d", i) 661 lru.Put(key, value) 662 663 res, ok := lru.TryGet(key) 664 require.True(t, ok) 665 require.Equal(t, value, res.(string)) 666 } 667 668 assert.Len(t, lru.entries, maxEntries) 669 } 670 671 var defaultKeys = []string{ 672 "key-0", "key-1", "key-2", "key-3", "key-4", "key-5", "key-6", "key-7", "key-8", "key-9", "key10", 673 } 674 675 type lruTester struct { 676 c *LRU 677 callsToLoad map[string]*int64 678 now time.Time 679 ttl time.Duration 680 metrics tally.TestScope 681 } 682 683 // newLRUTester creates a new tester for covering LRU cache functionality 684 func newLRUTester(maxEntries, maxConcurrency int) *lruTester { 685 tt := &lruTester{ 686 ttl: time.Minute * 30, 687 now: time.Date(2020, time.April, 13, 22, 15, 35, 200, time.UTC), 688 callsToLoad: make(map[string]*int64, len(defaultKeys)), 689 metrics: tally.NewTestScope("", nil), 690 } 691 692 for _, key := range defaultKeys { 693 var i int64 694 tt.callsToLoad[key] = &i 695 } 696 697 cacheOpts := &LRUOptions{ 698 MaxEntries: maxEntries, 699 TTL: tt.ttl, 700 Metrics: tt.metrics, 701 MaxConcurrency: maxConcurrency, 702 Now: func() time.Time { return tt.now }, // use the test time 703 } 704 705 tt.c = NewLRU(cacheOpts) 706 return tt 707 } 708 709 // defaultLoad is the default implementation of a loader for a cache 710 func (tt *lruTester) defaultLoad(_ context.Context, key string) (interface{}, error) { 711 callPtr := tt.callsToLoad[key] 712 if callPtr == nil { 713 return nil, ErrEntryNotFound 714 } 715 716 calls := atomic.AddInt64(callPtr, 1) 717 return fmt.Sprintf("%s-%05d", key, calls), nil 718 } 719 720 // blockingLoad wraps a load function with one that blocks until the 721 // provided channel is closed. Returns a channel that the caller can wait on 722 // to ensure that the load function has been entered. 723 func blockingLoad(blockerCh chan struct{}, loader LoaderFunc) (LoaderFunc, chan struct{}) { 724 // Channel to block the caller until the loader has been called 725 loadFnEnteredCh := make(chan struct{}) 726 return func(ctx context.Context, key string) (interface{}, error) { 727 close(loadFnEnteredCh) 728 select { 729 case <-ctx.Done(): 730 return nil, UncachedError{ctx.Err()} 731 case <-blockerCh: 732 } 733 734 return loader(ctx, key) 735 }, loadFnEnteredCh 736 }