github.com/outcaste-io/ristretto@v0.2.3/cache_test.go (about) 1 package ristretto 2 3 import ( 4 "fmt" 5 "math/rand" 6 "runtime" 7 "strconv" 8 "strings" 9 "sync" 10 "testing" 11 "time" 12 13 "github.com/outcaste-io/ristretto/z" 14 "github.com/stretchr/testify/require" 15 ) 16 17 var wait = time.Millisecond * 10 18 19 func TestCacheKeyToHash(t *testing.T) { 20 keyToHashCount := 0 21 c, err := NewCache(&Config{ 22 NumCounters: 10, 23 MaxCost: 1000, 24 BufferItems: 64, 25 IgnoreInternalCost: true, 26 KeyToHash: func(key interface{}) (uint64, uint64) { 27 keyToHashCount++ 28 return z.KeyToHash(key) 29 }, 30 }) 31 require.NoError(t, err) 32 if c.Set(1, 1, 1) { 33 time.Sleep(wait) 34 val, ok := c.Get(1) 35 require.True(t, ok) 36 require.NotNil(t, val) 37 c.Del(1) 38 } 39 require.Equal(t, 3, keyToHashCount) 40 } 41 42 func TestCacheMaxCost(t *testing.T) { 43 charset := "abcdefghijklmnopqrstuvwxyz0123456789" 44 key := func() []byte { 45 k := make([]byte, 2) 46 for i := range k { 47 k[i] = charset[rand.Intn(len(charset))] 48 } 49 return k 50 } 51 c, err := NewCache(&Config{ 52 NumCounters: 12960, // 36^2 * 10 53 MaxCost: 1e6, // 1mb 54 BufferItems: 64, 55 Metrics: true, 56 }) 57 require.NoError(t, err) 58 stop := make(chan struct{}, 8) 59 for i := 0; i < 8; i++ { 60 go func() { 61 for { 62 select { 63 case <-stop: 64 return 65 default: 66 time.Sleep(time.Millisecond) 67 68 k := key() 69 if _, ok := c.Get(k); !ok { 70 val := "" 71 if rand.Intn(100) < 10 { 72 val = "test" 73 } else { 74 val = strings.Repeat("a", 1000) 75 } 76 c.Set(key(), val, int64(2+len(val))) 77 } 78 } 79 } 80 }() 81 } 82 for i := 0; i < 20; i++ { 83 time.Sleep(time.Second) 84 cacheCost := c.Metrics.CostAdded() - c.Metrics.CostEvicted() 85 t.Logf("total cache cost: %d\n", cacheCost) 86 require.True(t, float64(cacheCost) <= float64(1e6*1.05)) 87 } 88 for i := 0; i < 8; i++ { 89 stop <- struct{}{} 90 } 91 } 92 93 func TestUpdateMaxCost(t *testing.T) { 94 c, err := NewCache(&Config{ 95 NumCounters: 10, 96 MaxCost: 10, 97 BufferItems: 64, 98 }) 99 require.NoError(t, err) 100 require.Equal(t, int64(10), c.MaxCost()) 101 require.True(t, c.Set(1, 1, 1)) 102 time.Sleep(wait) 103 _, ok := c.Get(1) 104 // Set is rejected because the cost of the entry is too high 105 // when accounting for the internal cost of storing the entry. 106 require.False(t, ok) 107 108 // Update the max cost of the cache and retry. 109 c.UpdateMaxCost(1000) 110 require.Equal(t, int64(1000), c.MaxCost()) 111 require.True(t, c.Set(1, 1, 1)) 112 time.Sleep(wait) 113 val, ok := c.Get(1) 114 require.True(t, ok) 115 require.NotNil(t, val) 116 c.Del(1) 117 } 118 119 func TestNewCache(t *testing.T) { 120 _, err := NewCache(&Config{ 121 NumCounters: 0, 122 }) 123 require.Error(t, err) 124 125 _, err = NewCache(&Config{ 126 NumCounters: 100, 127 MaxCost: 0, 128 }) 129 require.Error(t, err) 130 131 _, err = NewCache(&Config{ 132 NumCounters: 100, 133 MaxCost: 10, 134 BufferItems: 0, 135 }) 136 require.Error(t, err) 137 138 c, err := NewCache(&Config{ 139 NumCounters: 100, 140 MaxCost: 10, 141 BufferItems: 64, 142 Metrics: true, 143 }) 144 require.NoError(t, err) 145 require.NotNil(t, c) 146 } 147 148 func TestNilCache(t *testing.T) { 149 var c *Cache 150 val, ok := c.Get(1) 151 require.False(t, ok) 152 require.Nil(t, val) 153 154 require.False(t, c.Set(1, 1, 1)) 155 c.Del(1) 156 c.Clear() 157 c.Close() 158 } 159 160 func TestMultipleClose(t *testing.T) { 161 var c *Cache 162 c.Close() 163 164 var err error 165 c, err = NewCache(&Config{ 166 NumCounters: 100, 167 MaxCost: 10, 168 BufferItems: 64, 169 Metrics: true, 170 }) 171 require.NoError(t, err) 172 c.Close() 173 c.Close() 174 } 175 176 func TestSetAfterClose(t *testing.T) { 177 c, err := newTestCache() 178 require.NoError(t, err) 179 require.NotNil(t, c) 180 181 c.Close() 182 require.False(t, c.Set(1, 1, 1)) 183 } 184 185 func TestClearAfterClose(t *testing.T) { 186 c, err := newTestCache() 187 require.NoError(t, err) 188 require.NotNil(t, c) 189 190 c.Close() 191 c.Clear() 192 } 193 194 func TestGetAfterClose(t *testing.T) { 195 c, err := newTestCache() 196 require.NoError(t, err) 197 require.NotNil(t, c) 198 199 require.True(t, c.Set(1, 1, 1)) 200 c.Close() 201 202 _, ok := c.Get(1) 203 require.False(t, ok) 204 } 205 206 func TestDelAfterClose(t *testing.T) { 207 c, err := newTestCache() 208 require.NoError(t, err) 209 require.NotNil(t, c) 210 211 require.True(t, c.Set(1, 1, 1)) 212 c.Close() 213 214 c.Del(1) 215 } 216 217 func TestCacheProcessItems(t *testing.T) { 218 m := &sync.Mutex{} 219 evicted := make(map[uint64]struct{}) 220 c, err := NewCache(&Config{ 221 NumCounters: 100, 222 MaxCost: 10, 223 BufferItems: 64, 224 IgnoreInternalCost: true, 225 Cost: func(value interface{}) int64 { 226 return int64(value.(int)) 227 }, 228 OnEvict: func(item *Item) { 229 m.Lock() 230 defer m.Unlock() 231 evicted[item.Key] = struct{}{} 232 }, 233 }) 234 require.NoError(t, err) 235 236 var key uint64 237 var conflict uint64 238 239 key, conflict = z.KeyToHash(1) 240 c.setBuf <- &Item{ 241 flag: itemNew, 242 Key: key, 243 Conflict: conflict, 244 Value: 1, 245 Cost: 0, 246 } 247 time.Sleep(wait) 248 require.True(t, c.policy.Has(1)) 249 require.Equal(t, int64(1), c.policy.Cost(1)) 250 251 key, conflict = z.KeyToHash(1) 252 c.setBuf <- &Item{ 253 flag: itemUpdate, 254 Key: key, 255 Conflict: conflict, 256 Value: 2, 257 Cost: 0, 258 } 259 time.Sleep(wait) 260 require.Equal(t, int64(2), c.policy.Cost(1)) 261 262 key, conflict = z.KeyToHash(1) 263 c.setBuf <- &Item{ 264 flag: itemDelete, 265 Key: key, 266 Conflict: conflict, 267 } 268 time.Sleep(wait) 269 key, conflict = z.KeyToHash(1) 270 val, ok := c.store.Get(key, conflict) 271 require.False(t, ok) 272 require.Nil(t, val) 273 require.False(t, c.policy.Has(1)) 274 275 key, conflict = z.KeyToHash(2) 276 c.setBuf <- &Item{ 277 flag: itemNew, 278 Key: key, 279 Conflict: conflict, 280 Value: 2, 281 Cost: 3, 282 } 283 key, conflict = z.KeyToHash(3) 284 c.setBuf <- &Item{ 285 flag: itemNew, 286 Key: key, 287 Conflict: conflict, 288 Value: 3, 289 Cost: 3, 290 } 291 key, conflict = z.KeyToHash(4) 292 c.setBuf <- &Item{ 293 flag: itemNew, 294 Key: key, 295 Conflict: conflict, 296 Value: 3, 297 Cost: 3, 298 } 299 key, conflict = z.KeyToHash(5) 300 c.setBuf <- &Item{ 301 flag: itemNew, 302 Key: key, 303 Conflict: conflict, 304 Value: 3, 305 Cost: 5, 306 } 307 time.Sleep(wait) 308 m.Lock() 309 require.NotEqual(t, 0, len(evicted)) 310 m.Unlock() 311 312 defer func() { 313 require.NotNil(t, recover()) 314 }() 315 c.Close() 316 c.setBuf <- &Item{flag: itemNew} 317 } 318 319 func TestCacheGet(t *testing.T) { 320 c, err := NewCache(&Config{ 321 NumCounters: 100, 322 MaxCost: 10, 323 BufferItems: 64, 324 IgnoreInternalCost: true, 325 Metrics: true, 326 }) 327 require.NoError(t, err) 328 329 key, conflict := z.KeyToHash(1) 330 i := Item{ 331 Key: key, 332 Conflict: conflict, 333 Value: 1, 334 } 335 c.store.Set(&i) 336 val, ok := c.Get(1) 337 require.True(t, ok) 338 require.NotNil(t, val) 339 340 val, ok = c.Get(2) 341 require.False(t, ok) 342 require.Nil(t, val) 343 344 // 0.5 and not 1.0 because we tried Getting each item twice 345 require.Equal(t, 0.5, c.Metrics.Ratio()) 346 347 c = nil 348 val, ok = c.Get(0) 349 require.False(t, ok) 350 require.Nil(t, val) 351 } 352 353 // retrySet calls SetWithTTL until the item is accepted by the cache. 354 func retrySet(t *testing.T, c *Cache, key, value int, cost int64, ttl time.Duration) { 355 for { 356 if set := c.SetWithTTL(key, value, cost, ttl); !set { 357 time.Sleep(wait) 358 continue 359 } 360 361 time.Sleep(wait) 362 val, ok := c.Get(key) 363 require.True(t, ok) 364 require.NotNil(t, val) 365 require.Equal(t, value, val.(int)) 366 return 367 } 368 } 369 370 func TestCacheSet(t *testing.T) { 371 c, err := NewCache(&Config{ 372 NumCounters: 100, 373 MaxCost: 10, 374 IgnoreInternalCost: true, 375 BufferItems: 64, 376 Metrics: true, 377 }) 378 require.NoError(t, err) 379 380 retrySet(t, c, 1, 1, 1, 0) 381 382 c.Set(1, 2, 2) 383 val, ok := c.store.Get(z.KeyToHash(1)) 384 require.True(t, ok) 385 require.Equal(t, 2, val.(int)) 386 387 c.stop <- struct{}{} 388 for i := 0; i < setBufSize; i++ { 389 key, conflict := z.KeyToHash(1) 390 c.setBuf <- &Item{ 391 flag: itemUpdate, 392 Key: key, 393 Conflict: conflict, 394 Value: 1, 395 Cost: 1, 396 } 397 } 398 require.False(t, c.Set(2, 2, 1)) 399 require.Equal(t, uint64(1), c.Metrics.SetsDropped()) 400 close(c.setBuf) 401 close(c.stop) 402 403 c = nil 404 require.False(t, c.Set(1, 1, 1)) 405 } 406 407 func TestCacheInternalCost(t *testing.T) { 408 c, err := NewCache(&Config{ 409 NumCounters: 100, 410 MaxCost: 10, 411 BufferItems: 64, 412 Metrics: true, 413 }) 414 require.NoError(t, err) 415 416 // Get should return false because the cache's cost is too small to store the item 417 // when accounting for the internal cost. 418 c.SetWithTTL(1, 1, 1, 0) 419 time.Sleep(wait) 420 _, ok := c.Get(1) 421 require.False(t, ok) 422 } 423 424 func TestRecacheWithTTL(t *testing.T) { 425 c, err := NewCache(&Config{ 426 NumCounters: 100, 427 MaxCost: 10, 428 IgnoreInternalCost: true, 429 BufferItems: 64, 430 Metrics: true, 431 }) 432 433 require.NoError(t, err) 434 435 // Set initial value for key = 1 436 insert := c.SetWithTTL(1, 1, 1, 5*time.Second) 437 require.True(t, insert) 438 time.Sleep(2 * time.Second) 439 440 // Get value from cache for key = 1 441 val, ok := c.Get(1) 442 require.True(t, ok) 443 require.NotNil(t, val) 444 require.Equal(t, 1, val) 445 446 // Wait for expiration 447 time.Sleep(5 * time.Second) 448 449 // The cached value for key = 1 should be gone 450 val, ok = c.Get(1) 451 require.False(t, ok) 452 require.Nil(t, val) 453 454 // Set new value for key = 1 455 insert = c.SetWithTTL(1, 2, 1, 5*time.Second) 456 require.True(t, insert) 457 time.Sleep(2 * time.Second) 458 459 // Get value from cache for key = 1 460 val, ok = c.Get(1) 461 require.True(t, ok) 462 require.NotNil(t, val) 463 require.Equal(t, 2, val) 464 } 465 466 func TestCacheSetWithTTL(t *testing.T) { 467 m := &sync.Mutex{} 468 evicted := make(map[uint64]struct{}) 469 c, err := NewCache(&Config{ 470 NumCounters: 100, 471 MaxCost: 10, 472 IgnoreInternalCost: true, 473 BufferItems: 64, 474 Metrics: true, 475 OnEvict: func(item *Item) { 476 m.Lock() 477 defer m.Unlock() 478 evicted[item.Key] = struct{}{} 479 }, 480 }) 481 require.NoError(t, err) 482 483 retrySet(t, c, 1, 1, 1, time.Second) 484 485 // Sleep to make sure the item has expired after execution resumes. 486 time.Sleep(2 * time.Second) 487 val, ok := c.Get(1) 488 require.False(t, ok) 489 require.Nil(t, val) 490 491 // Sleep to ensure that the bucket where the item was stored has been cleared 492 // from the expiraton map. 493 time.Sleep(5 * time.Second) 494 m.Lock() 495 require.Equal(t, 1, len(evicted)) 496 _, ok = evicted[1] 497 require.True(t, ok) 498 m.Unlock() 499 500 // Verify that expiration times are overwritten. 501 retrySet(t, c, 2, 1, 1, time.Second) 502 retrySet(t, c, 2, 2, 1, 100*time.Second) 503 time.Sleep(3 * time.Second) 504 val, ok = c.Get(2) 505 require.True(t, ok) 506 require.Equal(t, 2, val.(int)) 507 508 // Verify that entries with no expiration are overwritten. 509 retrySet(t, c, 3, 1, 1, 0) 510 retrySet(t, c, 3, 2, 1, time.Second) 511 time.Sleep(3 * time.Second) 512 val, ok = c.Get(3) 513 require.False(t, ok) 514 require.Nil(t, val) 515 } 516 517 func TestCacheDel(t *testing.T) { 518 c, err := NewCache(&Config{ 519 NumCounters: 100, 520 MaxCost: 10, 521 BufferItems: 64, 522 }) 523 require.NoError(t, err) 524 525 c.Set(1, 1, 1) 526 c.Del(1) 527 // The deletes and sets are pushed through the setbuf. It might be possible 528 // that the delete is not processed before the following get is called. So 529 // wait for a millisecond for things to be processed. 530 time.Sleep(time.Millisecond) 531 val, ok := c.Get(1) 532 require.False(t, ok) 533 require.Nil(t, val) 534 535 c = nil 536 defer func() { 537 require.Nil(t, recover()) 538 }() 539 c.Del(1) 540 } 541 542 func TestCacheDelWithTTL(t *testing.T) { 543 c, err := NewCache(&Config{ 544 NumCounters: 100, 545 MaxCost: 10, 546 IgnoreInternalCost: true, 547 BufferItems: 64, 548 }) 549 require.NoError(t, err) 550 retrySet(t, c, 3, 1, 1, 10*time.Second) 551 time.Sleep(1 * time.Second) 552 // Delete the item 553 c.Del(3) 554 // Ensure the key is deleted. 555 val, ok := c.Get(3) 556 require.False(t, ok) 557 require.Nil(t, val) 558 } 559 560 func TestCacheGetTTL(t *testing.T) { 561 c, err := NewCache(&Config{ 562 NumCounters: 100, 563 MaxCost: 10, 564 IgnoreInternalCost: true, 565 BufferItems: 64, 566 Metrics: true, 567 }) 568 require.NoError(t, err) 569 570 // try expiration with valid ttl item 571 { 572 expiration := time.Second * 5 573 retrySet(t, c, 1, 1, 1, expiration) 574 575 val, ok := c.Get(1) 576 require.True(t, ok) 577 require.Equal(t, 1, val.(int)) 578 579 ttl, ok := c.GetTTL(1) 580 require.True(t, ok) 581 require.WithinDuration(t, 582 time.Now().Add(expiration), time.Now().Add(ttl), 1*time.Second) 583 584 c.Del(1) 585 586 ttl, ok = c.GetTTL(1) 587 require.False(t, ok) 588 require.Equal(t, ttl, time.Duration(0)) 589 } 590 // try expiration with no ttl 591 { 592 retrySet(t, c, 2, 2, 1, time.Duration(0)) 593 594 val, ok := c.Get(2) 595 require.True(t, ok) 596 require.Equal(t, 2, val.(int)) 597 598 ttl, ok := c.GetTTL(2) 599 require.True(t, ok) 600 require.Equal(t, ttl, time.Duration(0)) 601 } 602 // try expiration with missing item 603 { 604 ttl, ok := c.GetTTL(3) 605 require.False(t, ok) 606 require.Equal(t, ttl, time.Duration(0)) 607 } 608 // try expiration with expired item 609 { 610 expiration := time.Second 611 retrySet(t, c, 3, 3, 1, expiration) 612 613 val, ok := c.Get(3) 614 require.True(t, ok) 615 require.Equal(t, 3, val.(int)) 616 617 time.Sleep(time.Second) 618 619 ttl, ok := c.GetTTL(3) 620 require.False(t, ok) 621 require.Equal(t, ttl, time.Duration(0)) 622 } 623 } 624 625 func TestCacheClear(t *testing.T) { 626 c, err := NewCache(&Config{ 627 NumCounters: 100, 628 MaxCost: 10, 629 IgnoreInternalCost: true, 630 BufferItems: 64, 631 Metrics: true, 632 }) 633 require.NoError(t, err) 634 635 for i := 0; i < 10; i++ { 636 c.Set(i, i, 1) 637 } 638 time.Sleep(wait) 639 require.Equal(t, uint64(10), c.Metrics.KeysAdded()) 640 641 c.Clear() 642 require.Equal(t, uint64(0), c.Metrics.KeysAdded()) 643 644 for i := 0; i < 10; i++ { 645 val, ok := c.Get(i) 646 require.False(t, ok) 647 require.Nil(t, val) 648 } 649 } 650 651 func TestCacheMetrics(t *testing.T) { 652 c, err := NewCache(&Config{ 653 NumCounters: 100, 654 MaxCost: 10, 655 IgnoreInternalCost: true, 656 BufferItems: 64, 657 Metrics: true, 658 }) 659 require.NoError(t, err) 660 661 for i := 0; i < 10; i++ { 662 c.Set(i, i, 1) 663 } 664 time.Sleep(wait) 665 m := c.Metrics 666 require.Equal(t, uint64(10), m.KeysAdded()) 667 } 668 669 func TestMetrics(t *testing.T) { 670 newMetrics() 671 } 672 673 func TestNilMetrics(t *testing.T) { 674 var m *Metrics 675 for _, f := range []func() uint64{ 676 m.Hits, 677 m.Misses, 678 m.KeysAdded, 679 m.KeysEvicted, 680 m.CostEvicted, 681 m.SetsDropped, 682 m.SetsRejected, 683 m.GetsDropped, 684 m.GetsKept, 685 } { 686 require.Equal(t, uint64(0), f()) 687 } 688 } 689 690 func TestMetricsAddGet(t *testing.T) { 691 m := newMetrics() 692 m.add(hit, 1, 1) 693 m.add(hit, 2, 2) 694 m.add(hit, 3, 3) 695 require.Equal(t, uint64(6), m.Hits()) 696 697 m = nil 698 m.add(hit, 1, 1) 699 require.Equal(t, uint64(0), m.Hits()) 700 } 701 702 func TestMetricsRatio(t *testing.T) { 703 m := newMetrics() 704 require.Equal(t, float64(0), m.Ratio()) 705 706 m.add(hit, 1, 1) 707 m.add(hit, 2, 2) 708 m.add(miss, 1, 1) 709 m.add(miss, 2, 2) 710 require.Equal(t, 0.5, m.Ratio()) 711 712 m = nil 713 require.Equal(t, float64(0), m.Ratio()) 714 } 715 716 func TestMetricsString(t *testing.T) { 717 m := newMetrics() 718 m.add(hit, 1, 1) 719 m.add(miss, 1, 1) 720 m.add(keyAdd, 1, 1) 721 m.add(keyUpdate, 1, 1) 722 m.add(keyEvict, 1, 1) 723 m.add(costAdd, 1, 1) 724 m.add(costEvict, 1, 1) 725 m.add(dropSets, 1, 1) 726 m.add(rejectSets, 1, 1) 727 m.add(dropGets, 1, 1) 728 m.add(keepGets, 1, 1) 729 require.Equal(t, uint64(1), m.Hits()) 730 require.Equal(t, uint64(1), m.Misses()) 731 require.Equal(t, 0.5, m.Ratio()) 732 require.Equal(t, uint64(1), m.KeysAdded()) 733 require.Equal(t, uint64(1), m.KeysUpdated()) 734 require.Equal(t, uint64(1), m.KeysEvicted()) 735 require.Equal(t, uint64(1), m.CostAdded()) 736 require.Equal(t, uint64(1), m.CostEvicted()) 737 require.Equal(t, uint64(1), m.SetsDropped()) 738 require.Equal(t, uint64(1), m.SetsRejected()) 739 require.Equal(t, uint64(1), m.GetsDropped()) 740 require.Equal(t, uint64(1), m.GetsKept()) 741 742 require.NotEqual(t, 0, len(m.String())) 743 744 m = nil 745 require.Equal(t, 0, len(m.String())) 746 747 require.Equal(t, "unidentified", stringFor(doNotUse)) 748 } 749 750 func TestCacheMetricsClear(t *testing.T) { 751 c, err := NewCache(&Config{ 752 NumCounters: 100, 753 MaxCost: 10, 754 BufferItems: 64, 755 Metrics: true, 756 }) 757 require.NoError(t, err) 758 759 c.Set(1, 1, 1) 760 stop := make(chan struct{}) 761 go func() { 762 for { 763 select { 764 case <-stop: 765 return 766 default: 767 c.Get(1) 768 } 769 } 770 }() 771 time.Sleep(wait) 772 c.Clear() 773 stop <- struct{}{} 774 c.Metrics = nil 775 c.Metrics.Clear() 776 } 777 778 func init() { 779 // Set bucketSizeSecs to 1 to avoid waiting too much during the tests. 780 bucketDurationSecs = 1 781 } 782 783 func TestBlockOnClear(t *testing.T) { 784 c, err := NewCache(&Config{ 785 NumCounters: 100, 786 MaxCost: 10, 787 BufferItems: 64, 788 Metrics: false, 789 }) 790 require.NoError(t, err) 791 defer c.Close() 792 793 done := make(chan struct{}) 794 795 go func() { 796 for i := 0; i < 10; i++ { 797 c.Wait() 798 } 799 close(done) 800 }() 801 802 for i := 0; i < 10; i++ { 803 c.Clear() 804 } 805 806 select { 807 case <-done: 808 // We're OK 809 case <-time.After(1 * time.Second): 810 t.Fatalf("timed out while waiting on cache") 811 } 812 } 813 814 // Regression test for bug https://github.com/dgraph-io/ristretto/issues/167 815 func TestDropUpdates(t *testing.T) { 816 originalSetBugSize := setBufSize 817 defer func() { setBufSize = originalSetBugSize }() 818 819 test := func() { 820 // dropppedMap stores the items dropped from the cache. 821 droppedMap := make(map[int]struct{}) 822 lastEvictedSet := int64(-1) 823 824 var err error 825 handler := func(_ interface{}, value interface{}) { 826 v := value.(string) 827 lastEvictedSet, err = strconv.ParseInt(string(v), 10, 32) 828 require.NoError(t, err) 829 830 _, ok := droppedMap[int(lastEvictedSet)] 831 if ok { 832 panic(fmt.Sprintf("val = %+v was dropped but it got evicted. Dropped items: %+v\n", 833 lastEvictedSet, droppedMap)) 834 } 835 } 836 837 // This is important. The race condition shows up only when the setBuf 838 // is full and that's why we reduce the buf size here. The test will 839 // try to fill up the setbuf to it's capacity and then perform an 840 // update on a key. 841 setBufSize = 10 842 843 c, err := NewCache(&Config{ 844 NumCounters: 100, 845 MaxCost: 10, 846 BufferItems: 64, 847 Metrics: true, 848 OnEvict: func(item *Item) { 849 handler(nil, item.Value) 850 }, 851 }) 852 require.NoError(t, err) 853 854 for i := 0; i < 5*setBufSize; i++ { 855 v := fmt.Sprintf("%0100d", i) 856 // We're updating the same key. 857 if !c.Set(0, v, 1) { 858 // The race condition doesn't show up without this sleep. 859 time.Sleep(time.Microsecond) 860 droppedMap[i] = struct{}{} 861 } 862 } 863 // Wait for all the items to be processed. 864 time.Sleep(time.Millisecond) 865 // This will cause eviction from the cache. 866 require.True(t, c.Set(1, nil, 10)) 867 c.Close() 868 } 869 870 // Run the test 100 times since it's not reliable. 871 for i := 0; i < 100; i++ { 872 test() 873 } 874 } 875 876 func TestRistrettoCalloc(t *testing.T) { 877 maxCacheSize := 1 << 20 878 config := &Config{ 879 // Use 5% of cache memory for storing counters. 880 NumCounters: int64(float64(maxCacheSize) * 0.05 * 2), 881 MaxCost: int64(float64(maxCacheSize) * 0.95), 882 BufferItems: 64, 883 Metrics: true, 884 OnExit: func(val interface{}) { 885 z.Free(val.([]byte)) 886 }, 887 } 888 r, err := NewCache(config) 889 require.NoError(t, err) 890 defer r.Close() 891 892 var wg sync.WaitGroup 893 for i := 0; i < runtime.NumCPU(); i++ { 894 wg.Add(1) 895 go func() { 896 defer wg.Done() 897 rd := rand.New(rand.NewSource(time.Now().UnixNano())) 898 for i := 0; i < 10000; i++ { 899 k := rd.Intn(10000) 900 v := z.Calloc(256, "test") 901 rd.Read(v) 902 if !r.Set(k, v, 256) { 903 z.Free(v) 904 } 905 if rd.Intn(10) == 0 { 906 r.Del(k) 907 } 908 } 909 }() 910 } 911 wg.Wait() 912 r.Clear() 913 require.Zero(t, z.NumAllocBytes()) 914 } 915 916 func TestRistrettoCallocTTL(t *testing.T) { 917 maxCacheSize := 1 << 20 918 config := &Config{ 919 // Use 5% of cache memory for storing counters. 920 NumCounters: int64(float64(maxCacheSize) * 0.05 * 2), 921 MaxCost: int64(float64(maxCacheSize) * 0.95), 922 BufferItems: 64, 923 Metrics: true, 924 OnExit: func(val interface{}) { 925 z.Free(val.([]byte)) 926 }, 927 } 928 r, err := NewCache(config) 929 require.NoError(t, err) 930 defer r.Close() 931 932 var wg sync.WaitGroup 933 for i := 0; i < runtime.NumCPU(); i++ { 934 wg.Add(1) 935 go func() { 936 defer wg.Done() 937 rd := rand.New(rand.NewSource(time.Now().UnixNano())) 938 for i := 0; i < 10000; i++ { 939 k := rd.Intn(10000) 940 v := z.Calloc(256, "test") 941 rd.Read(v) 942 if !r.SetWithTTL(k, v, 256, time.Second) { 943 z.Free(v) 944 } 945 if rd.Intn(10) == 0 { 946 r.Del(k) 947 } 948 } 949 }() 950 } 951 wg.Wait() 952 time.Sleep(5 * time.Second) 953 require.Zero(t, z.NumAllocBytes()) 954 } 955 956 func newTestCache() (*Cache, error) { 957 return NewCache(&Config{ 958 NumCounters: 100, 959 MaxCost: 10, 960 BufferItems: 64, 961 Metrics: true, 962 }) 963 } 964 965 func TestCacheWithTTL(t *testing.T) { 966 // There may be a race condition, so run the test multiple times. 967 const try = 10 968 969 for i := 0; i < try; i++ { 970 t.Run(strconv.Itoa(i), func(t *testing.T) { 971 c, err := NewCache(&Config{ 972 NumCounters: 100, 973 MaxCost: 1000, 974 BufferItems: 64, 975 Metrics: true, 976 }) 977 978 require.NoError(t, err) 979 980 // Set initial value for key = 1 981 insert := c.SetWithTTL(1, 1, 1, 800*time.Millisecond) 982 require.True(t, insert) 983 984 time.Sleep(100 * time.Millisecond) 985 986 // Get value from cache for key = 1 987 val, ok := c.Get(1) 988 require.True(t, ok) 989 require.NotNil(t, val) 990 require.Equal(t, 1, val) 991 992 time.Sleep(1200 * time.Millisecond) 993 994 val, ok = c.Get(1) 995 require.False(t, ok) 996 require.Nil(t, val) 997 }) 998 } 999 } 1000 1001 func TestConcurrentSetAfterClose(t *testing.T) { 1002 c, err := NewCache(&Config{ 1003 NumCounters: 100, 1004 MaxCost: 10, 1005 IgnoreInternalCost: true, 1006 BufferItems: 64, 1007 Metrics: true, 1008 }) 1009 require.NoError(t, err) 1010 1011 var wg sync.WaitGroup 1012 done := make(chan struct{}) 1013 1014 // Spin up a bunch of goroutines that simply call Set over and over. 1015 for i := 0; i <= 200; i += 5 { 1016 wg.Add(1) 1017 go (func(duration int) { 1018 wg.Done() 1019 1020 tc := time.Tick(time.Duration(duration) * time.Millisecond) 1021 outer: 1022 for { 1023 select { 1024 case <-tc: 1025 c.Set("somekey", duration, 1) 1026 1027 case <-done: 1028 break outer 1029 } 1030 } 1031 })(i) 1032 } 1033 1034 wg.Wait() 1035 1036 // Wait some time so Set can run a few times. 1037 tc := time.Tick(50 * time.Millisecond) 1038 <-tc 1039 1040 // Run close to ensure nothing panics. 1041 c.Close() 1042 1043 // Wait some more time so Set can run a few additional times. 1044 tcd := time.Tick(250 * time.Millisecond) 1045 <-tcd 1046 close(done) 1047 }