github.com/jxskiss/gopkg/v2@v2.14.9-0.20240514120614-899f3e7952b4/perf/lru/sharded.go (about) 1 package lru 2 3 import ( 4 "time" 5 6 "github.com/jxskiss/gopkg/v2/internal" 7 "github.com/jxskiss/gopkg/v2/internal/rthash" 8 ) 9 10 // NewShardedCache returns a hash-sharded lru cache instance which is suitable 11 // to use for heavy lock contention use-case. It keeps same interface with 12 // the lru cache instance returned by NewCache function. 13 // Generally NewCache should be used instead of this unless you are sure that 14 // you are facing the lock contention problem. 15 func NewShardedCache[K comparable, V any](buckets, bucketCapacity int) *ShardedCache[K, V] { 16 buckets = int(internal.NextPowerOfTwo(uint(buckets))) 17 mask := uintptr(buckets - 1) 18 mc := &ShardedCache[K, V]{ 19 buckets: uintptr(buckets), 20 mask: mask, 21 cache: make([]*Cache[K, V], buckets), 22 } 23 for i := 0; i < buckets; i++ { 24 mc.cache[i] = NewCache[K, V](bucketCapacity) 25 } 26 mc.hashFunc = rthash.NewHashFunc[K]() 27 return mc 28 } 29 30 // ShardedCache is a hash-sharded version of Cache, it minimizes lock 31 // contention for heavy read workload. Generally Cache should be used 32 // instead of this unless you are sure that you are facing the lock 33 // contention problem. 34 // 35 // It implements Interface in this package, see Interface for detailed 36 // api documents. 37 type ShardedCache[K comparable, V any] struct { 38 buckets uintptr 39 mask uintptr 40 cache []*Cache[K, V] 41 42 hashFunc rthash.HashFunc[K] 43 } 44 45 func (c *ShardedCache[K, V]) Len() (n int) { 46 for _, c := range c.cache { 47 n += c.Len() 48 } 49 return 50 } 51 52 func (c *ShardedCache[K, V]) Has(key K) (exists, expired bool) { 53 h := c.hashFunc(key) 54 return c.cache[h&c.mask].Has(key) 55 } 56 57 func (c *ShardedCache[K, V]) Get(key K) (v V, exists, expired bool) { 58 h := c.hashFunc(key) 59 return c.cache[h&c.mask].Get(key) 60 } 61 62 func (c *ShardedCache[K, V]) GetWithTTL(key K) (v V, exists bool, ttl *time.Duration) { 63 h := c.hashFunc(key) 64 return c.cache[h&c.mask].GetWithTTL(key) 65 } 66 67 func (c *ShardedCache[K, V]) GetQuiet(key K) (v V, exists, expired bool) { 68 h := c.hashFunc(key) 69 return c.cache[h&c.mask].GetQuiet(key) 70 } 71 72 func (c *ShardedCache[K, V]) GetNotStale(key K) (v V, exists bool) { 73 h := c.hashFunc(key) 74 return c.cache[h&c.mask].GetNotStale(key) 75 } 76 77 func (c *ShardedCache[K, V]) MGet(keys ...K) map[K]V { 78 return c.mget(false, keys...) 79 } 80 81 func (c *ShardedCache[K, V]) MGetNotStale(keys ...K) map[K]V { 82 return c.mget(true, keys...) 83 } 84 85 func (c *ShardedCache[K, V]) mget(notStale bool, keys ...K) map[K]V { 86 grpKeys := c.groupKeys(keys) 87 nowNano := time.Now().UnixNano() 88 89 var res map[K]V 90 for idx, keys := range grpKeys { 91 grp := c.cache[idx].mget(notStale, nowNano, keys...) 92 if res == nil { 93 res = grp 94 } else { 95 for k, v := range grp { 96 res[k] = v 97 } 98 } 99 } 100 return res 101 } 102 103 func (c *ShardedCache[K, V]) Set(key K, value V, ttl time.Duration) { 104 h := c.hashFunc(key) 105 c.cache[h&c.mask].Set(key, value, ttl) 106 } 107 108 func (c *ShardedCache[K, V]) MSet(kvmap map[K]V, ttl time.Duration) { 109 for key, val := range kvmap { 110 c.Set(key, val, ttl) 111 } 112 } 113 114 func (c *ShardedCache[K, V]) Delete(key K) { 115 h := c.hashFunc(key) 116 c.cache[h&c.mask].Delete(key) 117 } 118 119 func (c *ShardedCache[K, V]) MDelete(keys ...K) { 120 grpKeys := c.groupKeys(keys) 121 122 for idx, keys := range grpKeys { 123 c.cache[idx].MDelete(keys...) 124 } 125 } 126 127 func (c *ShardedCache[K, V]) groupKeys(keys []K) map[uintptr][]K { 128 grpKeys := make(map[uintptr][]K) 129 for _, key := range keys { 130 idx := c.hashFunc(key) & c.mask 131 grpKeys[idx] = append(grpKeys[idx], key) 132 } 133 return grpKeys 134 }