github.com/fiatjaf/generic-ristretto@v0.0.1/cache_test.go (about) 1 package ristretto 2 3 import ( 4 "fmt" 5 "math/rand" 6 "runtime" 7 "strconv" 8 "strings" 9 "sync" 10 "testing" 11 "time" 12 13 "github.com/fiatjaf/generic-ristretto/z" 14 "github.com/stretchr/testify/require" 15 ) 16 17 var wait = time.Millisecond * 10 18 19 func TestCacheKeyToHash(t *testing.T) { 20 keyToHashCount := 0 21 c, err := NewCache(&Config[int, int]{ 22 NumCounters: 10, 23 MaxCost: 1000, 24 BufferItems: 64, 25 IgnoreInternalCost: true, 26 KeyToHash: func(key int) (uint64, uint64) { 27 keyToHashCount++ 28 return z.KeyToHash(key) 29 }, 30 }) 31 require.NoError(t, err) 32 if c.Set(1, 1, 1) { 33 time.Sleep(wait) 34 val, ok := c.Get(1) 35 require.True(t, ok) 36 require.NotNil(t, val) 37 c.Del(1) 38 } 39 require.Equal(t, 3, keyToHashCount) 40 } 41 42 func TestUpdateMaxCost(t *testing.T) { 43 c, err := NewCache(&Config[int, int]{ 44 NumCounters: 10, 45 MaxCost: 10, 46 BufferItems: 64, 47 }) 48 require.NoError(t, err) 49 require.Equal(t, int64(10), c.MaxCost()) 50 require.True(t, c.Set(1, 1, 1)) 51 time.Sleep(wait) 52 _, ok := c.Get(1) 53 // Set is rejected because the cost of the entry is too high 54 // when accounting for the internal cost of storing the entry. 55 require.False(t, ok) 56 57 // Update the max cost of the cache and retry. 58 c.UpdateMaxCost(1000) 59 require.Equal(t, int64(1000), c.MaxCost()) 60 require.True(t, c.Set(1, 1, 1)) 61 time.Sleep(wait) 62 val, ok := c.Get(1) 63 require.True(t, ok) 64 require.NotNil(t, val) 65 c.Del(1) 66 } 67 68 func TestNewCache(t *testing.T) { 69 _, err := NewCache(&Config[int, int]{ 70 NumCounters: 0, 71 }) 72 require.Error(t, err) 73 74 _, err = NewCache(&Config[int, int]{ 75 NumCounters: 100, 76 MaxCost: 0, 77 }) 78 require.Error(t, err) 79 80 _, err = NewCache(&Config[int, int]{ 81 NumCounters: 100, 82 MaxCost: 10, 83 BufferItems: 0, 84 }) 85 require.Error(t, err) 86 87 c, err := NewCache(&Config[int, int]{ 88 NumCounters: 100, 89 MaxCost: 10, 90 BufferItems: 64, 91 Metrics: true, 92 }) 93 require.NoError(t, err) 94 require.NotNil(t, c) 95 } 96 97 func TestNilCache(t *testing.T) { 98 var c *Cache[int, int] 99 val, ok := c.Get(1) 100 require.False(t, ok) 101 require.Zero(t, val) 102 103 require.False(t, c.Set(1, 1, 1)) 104 c.Del(1) 105 c.Clear() 106 c.Close() 107 } 108 109 func TestMultipleClose(t *testing.T) { 110 var c *Cache[int, int] 111 c.Close() 112 113 var err error 114 c, err = NewCache(&Config[int, int]{ 115 NumCounters: 100, 116 MaxCost: 10, 117 BufferItems: 64, 118 Metrics: true, 119 }) 120 require.NoError(t, err) 121 c.Close() 122 c.Close() 123 } 124 125 func TestSetAfterClose(t *testing.T) { 126 c, err := newTestCache() 127 require.NoError(t, err) 128 require.NotNil(t, c) 129 130 c.Close() 131 require.False(t, c.Set(1, 1, 1)) 132 } 133 134 func TestClearAfterClose(t *testing.T) { 135 c, err := newTestCache() 136 require.NoError(t, err) 137 require.NotNil(t, c) 138 139 c.Close() 140 c.Clear() 141 } 142 143 func TestGetAfterClose(t *testing.T) { 144 c, err := newTestCache() 145 require.NoError(t, err) 146 require.NotNil(t, c) 147 148 require.True(t, c.Set(1, 1, 1)) 149 c.Close() 150 151 _, ok := c.Get(1) 152 require.False(t, ok) 153 } 154 155 func TestDelAfterClose(t *testing.T) { 156 c, err := newTestCache() 157 require.NoError(t, err) 158 require.NotNil(t, c) 159 160 require.True(t, c.Set(1, 1, 1)) 161 c.Close() 162 163 c.Del(1) 164 } 165 166 func TestCacheProcessItems(t *testing.T) { 167 m := &sync.Mutex{} 168 evicted := make(map[uint64]struct{}) 169 c, err := NewCache(&Config[int, int]{ 170 NumCounters: 100, 171 MaxCost: 10, 172 BufferItems: 64, 173 IgnoreInternalCost: true, 174 Cost: func(value int) int64 { 175 return int64(value) 176 }, 177 OnEvict: func(item *Item[int]) { 178 m.Lock() 179 defer m.Unlock() 180 evicted[item.Key] = struct{}{} 181 }, 182 }) 183 require.NoError(t, err) 184 185 var key uint64 186 var conflict uint64 187 188 key, conflict = z.KeyToHash(1) 189 c.setBuf <- &Item[int]{ 190 flag: itemNew, 191 Key: key, 192 Conflict: conflict, 193 Value: 1, 194 Cost: 0, 195 } 196 time.Sleep(wait) 197 require.True(t, c.cachePolicy.Has(1)) 198 require.Equal(t, int64(1), c.cachePolicy.Cost(1)) 199 200 key, conflict = z.KeyToHash(1) 201 c.setBuf <- &Item[int]{ 202 flag: itemUpdate, 203 Key: key, 204 Conflict: conflict, 205 Value: 2, 206 Cost: 0, 207 } 208 time.Sleep(wait) 209 require.Equal(t, int64(2), c.cachePolicy.Cost(1)) 210 211 key, conflict = z.KeyToHash(1) 212 c.setBuf <- &Item[int]{ 213 flag: itemDelete, 214 Key: key, 215 Conflict: conflict, 216 } 217 time.Sleep(wait) 218 key, conflict = z.KeyToHash(1) 219 val, ok := c.storedItems.Get(key, conflict) 220 require.False(t, ok) 221 require.Zero(t, val) 222 require.False(t, c.cachePolicy.Has(1)) 223 224 key, conflict = z.KeyToHash(2) 225 c.setBuf <- &Item[int]{ 226 flag: itemNew, 227 Key: key, 228 Conflict: conflict, 229 Value: 2, 230 Cost: 3, 231 } 232 key, conflict = z.KeyToHash(3) 233 c.setBuf <- &Item[int]{ 234 flag: itemNew, 235 Key: key, 236 Conflict: conflict, 237 Value: 3, 238 Cost: 3, 239 } 240 key, conflict = z.KeyToHash(4) 241 c.setBuf <- &Item[int]{ 242 flag: itemNew, 243 Key: key, 244 Conflict: conflict, 245 Value: 3, 246 Cost: 3, 247 } 248 key, conflict = z.KeyToHash(5) 249 c.setBuf <- &Item[int]{ 250 flag: itemNew, 251 Key: key, 252 Conflict: conflict, 253 Value: 3, 254 Cost: 5, 255 } 256 time.Sleep(wait) 257 m.Lock() 258 require.NotEqual(t, 0, len(evicted)) 259 m.Unlock() 260 261 defer func() { 262 require.NotNil(t, recover()) 263 }() 264 c.Close() 265 c.setBuf <- &Item[int]{flag: itemNew} 266 } 267 268 func TestCacheGet(t *testing.T) { 269 c, err := NewCache(&Config[int, int]{ 270 NumCounters: 100, 271 MaxCost: 10, 272 BufferItems: 64, 273 IgnoreInternalCost: true, 274 Metrics: true, 275 }) 276 require.NoError(t, err) 277 278 key, conflict := z.KeyToHash(1) 279 i := Item[int]{ 280 Key: key, 281 Conflict: conflict, 282 Value: 1, 283 } 284 c.storedItems.Set(&i) 285 val, ok := c.Get(1) 286 require.True(t, ok) 287 require.NotNil(t, val) 288 289 val, ok = c.Get(2) 290 require.False(t, ok) 291 require.Zero(t, val) 292 293 // 0.5 and not 1.0 because we tried Getting each item twice 294 require.Equal(t, 0.5, c.Metrics.Ratio()) 295 296 c = nil 297 val, ok = c.Get(0) 298 require.False(t, ok) 299 require.Zero(t, val) 300 } 301 302 // retrySet calls SetWithTTL until the item is accepted by the cache. 303 func retrySet(t *testing.T, c *Cache[int, int], key, value int, cost int64, ttl time.Duration) { 304 for { 305 if set := c.SetWithTTL(key, value, cost, ttl); !set { 306 time.Sleep(wait) 307 continue 308 } 309 310 time.Sleep(wait) 311 val, ok := c.Get(key) 312 require.True(t, ok) 313 require.NotNil(t, val) 314 require.Equal(t, value, val) 315 return 316 } 317 } 318 319 func TestCacheSet(t *testing.T) { 320 c, err := NewCache(&Config[int, int]{ 321 NumCounters: 100, 322 MaxCost: 10, 323 IgnoreInternalCost: true, 324 BufferItems: 64, 325 Metrics: true, 326 }) 327 require.NoError(t, err) 328 329 retrySet(t, c, 1, 1, 1, 0) 330 331 c.Set(1, 2, 2) 332 val, ok := c.storedItems.Get(z.KeyToHash(1)) 333 require.True(t, ok) 334 require.Equal(t, 2, val) 335 336 c.stop <- struct{}{} 337 for i := 0; i < setBufSize; i++ { 338 key, conflict := z.KeyToHash(1) 339 c.setBuf <- &Item[int]{ 340 flag: itemUpdate, 341 Key: key, 342 Conflict: conflict, 343 Value: 1, 344 Cost: 1, 345 } 346 } 347 require.False(t, c.Set(2, 2, 1)) 348 require.Equal(t, uint64(1), c.Metrics.SetsDropped()) 349 close(c.setBuf) 350 close(c.stop) 351 352 c = nil 353 require.False(t, c.Set(1, 1, 1)) 354 } 355 356 func TestCacheInternalCost(t *testing.T) { 357 c, err := NewCache(&Config[int, int]{ 358 NumCounters: 100, 359 MaxCost: 10, 360 BufferItems: 64, 361 Metrics: true, 362 }) 363 require.NoError(t, err) 364 365 // Get should return false because the cache's cost is too small to storedItems the item 366 // when accounting for the internal cost. 367 c.SetWithTTL(1, 1, 1, 0) 368 time.Sleep(wait) 369 _, ok := c.Get(1) 370 require.False(t, ok) 371 } 372 373 func TestRecacheWithTTL(t *testing.T) { 374 c, err := NewCache(&Config[int, int]{ 375 NumCounters: 100, 376 MaxCost: 10, 377 IgnoreInternalCost: true, 378 BufferItems: 64, 379 Metrics: true, 380 }) 381 382 require.NoError(t, err) 383 384 // Set initial value for key = 1 385 insert := c.SetWithTTL(1, 1, 1, 5*time.Second) 386 require.True(t, insert) 387 time.Sleep(2 * time.Second) 388 389 // Get value from cache for key = 1 390 val, ok := c.Get(1) 391 require.True(t, ok) 392 require.NotNil(t, val) 393 require.Equal(t, 1, val) 394 395 // Wait for expiration 396 time.Sleep(5 * time.Second) 397 398 // The cached value for key = 1 should be gone 399 val, ok = c.Get(1) 400 require.False(t, ok) 401 require.Zero(t, val) 402 403 // Set new value for key = 1 404 insert = c.SetWithTTL(1, 2, 1, 5*time.Second) 405 require.True(t, insert) 406 time.Sleep(2 * time.Second) 407 408 // Get value from cache for key = 1 409 val, ok = c.Get(1) 410 require.True(t, ok) 411 require.NotNil(t, val) 412 require.Equal(t, 2, val) 413 } 414 415 func TestCacheSetWithTTL(t *testing.T) { 416 m := &sync.Mutex{} 417 evicted := make(map[uint64]struct{}) 418 c, err := NewCache(&Config[int, int]{ 419 NumCounters: 100, 420 MaxCost: 10, 421 IgnoreInternalCost: true, 422 BufferItems: 64, 423 Metrics: true, 424 OnEvict: func(item *Item[int]) { 425 m.Lock() 426 defer m.Unlock() 427 evicted[item.Key] = struct{}{} 428 }, 429 }) 430 require.NoError(t, err) 431 432 retrySet(t, c, 1, 1, 1, time.Second) 433 434 // Sleep to make sure the item has expired after execution resumes. 435 time.Sleep(2 * time.Second) 436 val, ok := c.Get(1) 437 require.False(t, ok) 438 require.Zero(t, val) 439 440 // Sleep to ensure that the bucket where the item was stored has been cleared 441 // from the expiraton map. 442 time.Sleep(5 * time.Second) 443 m.Lock() 444 require.Equal(t, 1, len(evicted)) 445 _, ok = evicted[1] 446 require.True(t, ok) 447 m.Unlock() 448 449 // Verify that expiration times are overwritten. 450 retrySet(t, c, 2, 1, 1, time.Second) 451 retrySet(t, c, 2, 2, 1, 100*time.Second) 452 time.Sleep(3 * time.Second) 453 val, ok = c.Get(2) 454 require.True(t, ok) 455 require.Equal(t, 2, val) 456 457 // Verify that entries with no expiration are overwritten. 458 retrySet(t, c, 3, 1, 1, 0) 459 retrySet(t, c, 3, 2, 1, time.Second) 460 time.Sleep(3 * time.Second) 461 val, ok = c.Get(3) 462 require.False(t, ok) 463 require.Zero(t, val) 464 } 465 466 func TestCacheDel(t *testing.T) { 467 c, err := NewCache(&Config[int, int]{ 468 NumCounters: 100, 469 MaxCost: 10, 470 BufferItems: 64, 471 }) 472 require.NoError(t, err) 473 474 c.Set(1, 1, 1) 475 c.Del(1) 476 // The deletes and sets are pushed through the setbuf. It might be possible 477 // that the delete is not processed before the following get is called. So 478 // wait for a millisecond for things to be processed. 479 time.Sleep(time.Millisecond) 480 val, ok := c.Get(1) 481 require.False(t, ok) 482 require.Zero(t, val) 483 484 c = nil 485 defer func() { 486 require.Nil(t, recover()) 487 }() 488 c.Del(1) 489 } 490 491 func TestCacheDelWithTTL(t *testing.T) { 492 c, err := NewCache(&Config[int, int]{ 493 NumCounters: 100, 494 MaxCost: 10, 495 IgnoreInternalCost: true, 496 BufferItems: 64, 497 }) 498 require.NoError(t, err) 499 retrySet(t, c, 3, 1, 1, 10*time.Second) 500 time.Sleep(1 * time.Second) 501 // Delete the item 502 c.Del(3) 503 // Ensure the key is deleted. 504 val, ok := c.Get(3) 505 require.False(t, ok) 506 require.Zero(t, val) 507 } 508 509 func TestCacheGetTTL(t *testing.T) { 510 c, err := NewCache(&Config[int, int]{ 511 NumCounters: 100, 512 MaxCost: 10, 513 IgnoreInternalCost: true, 514 BufferItems: 64, 515 Metrics: true, 516 }) 517 require.NoError(t, err) 518 519 // try expiration with valid ttl item 520 { 521 expiration := time.Second * 5 522 retrySet(t, c, 1, 1, 1, expiration) 523 524 val, ok := c.Get(1) 525 require.True(t, ok) 526 require.Equal(t, 1, val) 527 528 ttl, ok := c.GetTTL(1) 529 require.True(t, ok) 530 require.WithinDuration(t, 531 time.Now().Add(expiration), time.Now().Add(ttl), 1*time.Second) 532 533 c.Del(1) 534 535 ttl, ok = c.GetTTL(1) 536 require.False(t, ok) 537 require.Equal(t, ttl, time.Duration(0)) 538 } 539 // try expiration with no ttl 540 { 541 retrySet(t, c, 2, 2, 1, time.Duration(0)) 542 543 val, ok := c.Get(2) 544 require.True(t, ok) 545 require.Equal(t, 2, val) 546 547 ttl, ok := c.GetTTL(2) 548 require.True(t, ok) 549 require.Equal(t, ttl, time.Duration(0)) 550 } 551 // try expiration with missing item 552 { 553 ttl, ok := c.GetTTL(3) 554 require.False(t, ok) 555 require.Equal(t, ttl, time.Duration(0)) 556 } 557 // try expiration with expired item 558 { 559 expiration := time.Second 560 retrySet(t, c, 3, 3, 1, expiration) 561 562 val, ok := c.Get(3) 563 require.True(t, ok) 564 require.Equal(t, 3, val) 565 566 time.Sleep(time.Second) 567 568 ttl, ok := c.GetTTL(3) 569 require.False(t, ok) 570 require.Equal(t, ttl, time.Duration(0)) 571 } 572 } 573 574 func TestCacheClear(t *testing.T) { 575 c, err := NewCache(&Config[int, int]{ 576 NumCounters: 100, 577 MaxCost: 10, 578 IgnoreInternalCost: true, 579 BufferItems: 64, 580 Metrics: true, 581 }) 582 require.NoError(t, err) 583 584 for i := 0; i < 10; i++ { 585 c.Set(i, i, 1) 586 } 587 time.Sleep(wait) 588 require.Equal(t, uint64(10), c.Metrics.KeysAdded()) 589 590 c.Clear() 591 require.Equal(t, uint64(0), c.Metrics.KeysAdded()) 592 593 for i := 0; i < 10; i++ { 594 val, ok := c.Get(i) 595 require.False(t, ok) 596 require.Zero(t, val) 597 } 598 } 599 600 func TestCacheMetrics(t *testing.T) { 601 c, err := NewCache(&Config[int, int]{ 602 NumCounters: 100, 603 MaxCost: 10, 604 IgnoreInternalCost: true, 605 BufferItems: 64, 606 Metrics: true, 607 }) 608 require.NoError(t, err) 609 610 for i := 0; i < 10; i++ { 611 c.Set(i, i, 1) 612 } 613 time.Sleep(wait) 614 m := c.Metrics 615 require.Equal(t, uint64(10), m.KeysAdded()) 616 } 617 618 func TestMetrics(t *testing.T) { 619 newMetrics() 620 } 621 622 func TestNilMetrics(t *testing.T) { 623 var m *Metrics 624 for _, f := range []func() uint64{ 625 m.Hits, 626 m.Misses, 627 m.KeysAdded, 628 m.KeysEvicted, 629 m.CostEvicted, 630 m.SetsDropped, 631 m.SetsRejected, 632 m.GetsDropped, 633 m.GetsKept, 634 } { 635 require.Equal(t, uint64(0), f()) 636 } 637 } 638 639 func TestMetricsAddGet(t *testing.T) { 640 m := newMetrics() 641 m.add(hit, 1, 1) 642 m.add(hit, 2, 2) 643 m.add(hit, 3, 3) 644 require.Equal(t, uint64(6), m.Hits()) 645 646 m = nil 647 m.add(hit, 1, 1) 648 require.Equal(t, uint64(0), m.Hits()) 649 } 650 651 func TestMetricsRatio(t *testing.T) { 652 m := newMetrics() 653 require.Equal(t, float64(0), m.Ratio()) 654 655 m.add(hit, 1, 1) 656 m.add(hit, 2, 2) 657 m.add(miss, 1, 1) 658 m.add(miss, 2, 2) 659 require.Equal(t, 0.5, m.Ratio()) 660 661 m = nil 662 require.Equal(t, float64(0), m.Ratio()) 663 } 664 665 func TestMetricsString(t *testing.T) { 666 m := newMetrics() 667 m.add(hit, 1, 1) 668 m.add(miss, 1, 1) 669 m.add(keyAdd, 1, 1) 670 m.add(keyUpdate, 1, 1) 671 m.add(keyEvict, 1, 1) 672 m.add(costAdd, 1, 1) 673 m.add(costEvict, 1, 1) 674 m.add(dropSets, 1, 1) 675 m.add(rejectSets, 1, 1) 676 m.add(dropGets, 1, 1) 677 m.add(keepGets, 1, 1) 678 require.Equal(t, uint64(1), m.Hits()) 679 require.Equal(t, uint64(1), m.Misses()) 680 require.Equal(t, 0.5, m.Ratio()) 681 require.Equal(t, uint64(1), m.KeysAdded()) 682 require.Equal(t, uint64(1), m.KeysUpdated()) 683 require.Equal(t, uint64(1), m.KeysEvicted()) 684 require.Equal(t, uint64(1), m.CostAdded()) 685 require.Equal(t, uint64(1), m.CostEvicted()) 686 require.Equal(t, uint64(1), m.SetsDropped()) 687 require.Equal(t, uint64(1), m.SetsRejected()) 688 require.Equal(t, uint64(1), m.GetsDropped()) 689 require.Equal(t, uint64(1), m.GetsKept()) 690 691 require.NotEqual(t, 0, len(m.String())) 692 693 m = nil 694 require.Equal(t, 0, len(m.String())) 695 696 require.Equal(t, "unidentified", stringFor(doNotUse)) 697 } 698 699 func TestCacheMetricsClear(t *testing.T) { 700 c, err := NewCache(&Config[int, int]{ 701 NumCounters: 100, 702 MaxCost: 10, 703 BufferItems: 64, 704 Metrics: true, 705 }) 706 require.NoError(t, err) 707 708 c.Set(1, 1, 1) 709 stop := make(chan struct{}) 710 go func() { 711 for { 712 select { 713 case <-stop: 714 return 715 default: 716 c.Get(1) 717 } 718 } 719 }() 720 time.Sleep(wait) 721 c.Clear() 722 stop <- struct{}{} 723 c.Metrics = nil 724 c.Metrics.Clear() 725 } 726 727 func init() { 728 // Set bucketSizeSecs to 1 to avoid waiting too much during the tests. 729 bucketDurationSecs = 1 730 } 731 732 func TestBlockOnClear(t *testing.T) { 733 c, err := NewCache(&Config[int, int]{ 734 NumCounters: 100, 735 MaxCost: 10, 736 BufferItems: 64, 737 Metrics: false, 738 }) 739 require.NoError(t, err) 740 defer c.Close() 741 742 done := make(chan struct{}) 743 744 go func() { 745 for i := 0; i < 10; i++ { 746 c.Wait() 747 } 748 close(done) 749 }() 750 751 for i := 0; i < 10; i++ { 752 c.Clear() 753 } 754 755 select { 756 case <-done: 757 // We're OK 758 case <-time.After(1 * time.Second): 759 t.Fatalf("timed out while waiting on cache") 760 } 761 } 762 763 // Regression test for bug https://github.com/fiatjaf/generic-ristretto/issues/167 764 func TestDropUpdates(t *testing.T) { 765 originalSetBugSize := setBufSize 766 defer func() { setBufSize = originalSetBugSize }() 767 768 test := func() { 769 // dropppedMap stores the items dropped from the cache. 770 droppedMap := make(map[int]struct{}) 771 lastEvictedSet := int64(-1) 772 773 var err error 774 handler := func(_ interface{}, value interface{}) { 775 v := value.(string) 776 lastEvictedSet, err = strconv.ParseInt(string(v), 10, 32) 777 require.NoError(t, err) 778 779 _, ok := droppedMap[int(lastEvictedSet)] 780 if ok { 781 panic(fmt.Sprintf("val = %+v was dropped but it got evicted. Dropped items: %+v\n", 782 lastEvictedSet, droppedMap)) 783 } 784 } 785 786 // This is important. The race condition shows up only when the setBuf 787 // is full and that's why we reduce the buf size here. The test will 788 // try to fill up the setbuf to it's capacity and then perform an 789 // update on a key. 790 setBufSize = 10 791 792 c, err := NewCache(&Config[int, string]{ 793 NumCounters: 100, 794 MaxCost: 10, 795 BufferItems: 64, 796 Metrics: true, 797 OnEvict: func(item *Item[string]) { 798 handler(nil, item.Value) 799 }, 800 }) 801 require.NoError(t, err) 802 803 for i := 0; i < 5*setBufSize; i++ { 804 v := fmt.Sprintf("%0100d", i) 805 // We're updating the same key. 806 if !c.Set(0, v, 1) { 807 // The race condition doesn't show up without this sleep. 808 time.Sleep(time.Microsecond) 809 droppedMap[i] = struct{}{} 810 } 811 } 812 // Wait for all the items to be processed. 813 time.Sleep(time.Millisecond) 814 // This will cause eviction from the cache. 815 816 require.True(t, c.Set(1, "", 10)) 817 c.Close() 818 } 819 820 // Run the test 100 times since it's not reliable. 821 for i := 0; i < 100; i++ { 822 test() 823 } 824 } 825 826 func TestRistrettoCalloc(t *testing.T) { 827 maxCacheSize := 1 << 20 828 config := &Config[int, []byte]{ 829 // Use 5% of cache memory for storing counters. 830 NumCounters: int64(float64(maxCacheSize) * 0.05 * 2), 831 MaxCost: int64(float64(maxCacheSize) * 0.95), 832 BufferItems: 64, 833 Metrics: true, 834 OnExit: func(val []byte) { 835 z.Free(val) 836 }, 837 } 838 r, err := NewCache(config) 839 require.NoError(t, err) 840 defer r.Close() 841 842 var wg sync.WaitGroup 843 for i := 0; i < runtime.NumCPU(); i++ { 844 wg.Add(1) 845 go func() { 846 defer wg.Done() 847 rd := rand.New(rand.NewSource(time.Now().UnixNano())) 848 for i := 0; i < 10000; i++ { 849 k := rd.Intn(10000) 850 v := z.Calloc(256, "test") 851 rd.Read(v) 852 if !r.Set(k, v, 256) { 853 z.Free(v) 854 } 855 if rd.Intn(10) == 0 { 856 r.Del(k) 857 } 858 } 859 }() 860 } 861 wg.Wait() 862 r.Clear() 863 require.Zero(t, z.NumAllocBytes()) 864 } 865 866 func TestRistrettoCallocTTL(t *testing.T) { 867 maxCacheSize := 1 << 20 868 config := &Config[int, []byte]{ 869 // Use 5% of cache memory for storing counters. 870 NumCounters: int64(float64(maxCacheSize) * 0.05 * 2), 871 MaxCost: int64(float64(maxCacheSize) * 0.95), 872 BufferItems: 64, 873 Metrics: true, 874 OnExit: func(val []byte) { 875 z.Free(val) 876 }, 877 } 878 r, err := NewCache(config) 879 require.NoError(t, err) 880 defer r.Close() 881 882 var wg sync.WaitGroup 883 for i := 0; i < runtime.NumCPU(); i++ { 884 wg.Add(1) 885 go func() { 886 defer wg.Done() 887 rd := rand.New(rand.NewSource(time.Now().UnixNano())) 888 for i := 0; i < 10000; i++ { 889 k := rd.Intn(10000) 890 v := z.Calloc(256, "test") 891 rd.Read(v) 892 if !r.SetWithTTL(k, v, 256, time.Second) { 893 z.Free(v) 894 } 895 if rd.Intn(10) == 0 { 896 r.Del(k) 897 } 898 } 899 }() 900 } 901 wg.Wait() 902 time.Sleep(5 * time.Second) 903 require.Zero(t, z.NumAllocBytes()) 904 } 905 906 func newTestCache() (*Cache[int, int], error) { 907 return NewCache(&Config[int, int]{ 908 NumCounters: 100, 909 MaxCost: 10, 910 BufferItems: 64, 911 Metrics: true, 912 }) 913 } 914 915 func TestCacheWithTTL(t *testing.T) { 916 // There may be a race condition, so run the test multiple times. 917 const try = 10 918 919 for i := 0; i < try; i++ { 920 t.Run(strconv.Itoa(i), func(t *testing.T) { 921 c, err := NewCache(&Config[int, int]{ 922 NumCounters: 100, 923 MaxCost: 1000, 924 BufferItems: 64, 925 Metrics: true, 926 }) 927 928 require.NoError(t, err) 929 930 // Set initial value for key = 1 931 insert := c.SetWithTTL(1, 1, 1, 800*time.Millisecond) 932 require.True(t, insert) 933 934 time.Sleep(100 * time.Millisecond) 935 936 // Get value from cache for key = 1 937 val, ok := c.Get(1) 938 require.True(t, ok) 939 require.NotNil(t, val) 940 require.Equal(t, 1, val) 941 942 time.Sleep(1200 * time.Millisecond) 943 944 val, ok = c.Get(1) 945 require.False(t, ok) 946 require.Zero(t, val) 947 }) 948 } 949 } 950 951 func TestCacheMaxCost(t *testing.T) { 952 charset := "abcdefghijklmnopqrstuvwxyz0123456789" 953 key := func() []byte { 954 k := make([]byte, 2) 955 for i := range k { 956 k[i] = charset[rand.Intn(len(charset))] 957 } 958 return k 959 } 960 c, err := NewCache(&Config[[]byte, string]{ 961 NumCounters: 12960, // 36^2 * 10 962 MaxCost: 1e6, // 1mb 963 BufferItems: 64, 964 Metrics: true, 965 }) 966 require.NoError(t, err) 967 stop := make(chan struct{}, 8) 968 for i := 0; i < 8; i++ { 969 go func() { 970 for { 971 select { 972 case <-stop: 973 return 974 default: 975 time.Sleep(time.Millisecond) 976 977 k := key() 978 if _, ok := c.Get(k); !ok { 979 val := "" 980 if rand.Intn(100) < 10 { 981 val = "test" 982 } else { 983 val = strings.Repeat("a", 1000) 984 } 985 c.Set(key(), val, int64(2+len(val))) 986 } 987 } 988 } 989 }() 990 } 991 for i := 0; i < 20; i++ { 992 time.Sleep(time.Second) 993 cacheCost := c.Metrics.CostAdded() - c.Metrics.CostEvicted() 994 t.Logf("total cache cost: %d\n", cacheCost) 995 require.True(t, float64(cacheCost) <= float64(1e6*1.05)) 996 } 997 for i := 0; i < 8; i++ { 998 stop <- struct{}{} 999 } 1000 }