github.com/jxskiss/gopkg/v2@v2.14.9-0.20240514120614-899f3e7952b4/perf/lru/cache.go (about) 1 package lru 2 3 import ( 4 "sync" 5 "sync/atomic" 6 "time" 7 "unsafe" 8 ) 9 10 const maxCapacity = 1<<32 - 1 11 12 // NewCache returns a lru cache instance with given capacity, the underlying 13 // memory will be immediately allocated. For best performance, the memory 14 // will be reused and won't be freed for the lifetime of the cache. 15 // 16 // Param capacity must be smaller than 2^32, else it will panic. 17 func NewCache[K comparable, V any](capacity int) *Cache[K, V] { 18 if capacity > maxCapacity { 19 panic("invalid too large capacity") 20 } 21 list := newList(capacity) 22 c := &Cache[K, V]{ 23 list: list, 24 m: make(map[K]uint32, capacity), 25 buf: unsafe.Pointer(newWalBuf()), 26 } 27 return c 28 } 29 30 // Cache is an in-memory cache using LRU algorithm. 31 // 32 // It implements Interface in this package, see Interface for detailed 33 // api documents. 34 type Cache[K comparable, V any] struct { 35 mu sync.RWMutex 36 list *list 37 m map[K]uint32 38 39 buf unsafe.Pointer // *walbuf 40 } 41 42 func (c *Cache[K, V]) Len() (n int) { 43 c.mu.RLock() 44 n = len(c.m) 45 c.mu.RUnlock() 46 return 47 } 48 49 func (c *Cache[K, V]) Has(key K) (exists, expired bool) { 50 c.mu.RLock() 51 _, elem, exists := c.get(key) 52 if exists { 53 expired = elem.expires > 0 && elem.expires < time.Now().UnixNano() 54 } 55 c.mu.RUnlock() 56 return 57 } 58 59 func (c *Cache[K, V]) Get(key K) (v V, exists, expired bool) { 60 c.mu.RLock() 61 idx, elem, exists := c.get(key) 62 if exists { 63 v = elem.value.(V) 64 expired = elem.expires > 0 && elem.expires < time.Now().UnixNano() 65 c.promote(idx) 66 } 67 c.mu.RUnlock() 68 return 69 } 70 71 func (c *Cache[K, V]) GetWithTTL(key K) (v V, exists bool, ttl *time.Duration) { 72 c.mu.RLock() 73 idx, elem, exists := c.get(key) 74 if exists { 75 v = elem.value.(V) 76 if elem.expires > 0 { 77 x := time.Duration(elem.expires - time.Now().UnixNano()) 78 ttl = &x 79 } 80 c.promote(idx) 81 } 82 c.mu.RUnlock() 83 return 84 } 85 86 func (c *Cache[K, V]) GetQuiet(key K) (v V, exists, expired bool) { 87 c.mu.RLock() 88 _, elem, exists := c.get(key) 89 if exists { 90 v = elem.value.(V) 91 expired = elem.expires > 0 && elem.expires < time.Now().UnixNano() 92 } 93 c.mu.RUnlock() 94 return 95 } 96 97 func (c *Cache[K, V]) GetNotStale(key K) (v V, exists bool) { 98 c.mu.RLock() 99 idx, elem, exists := c.get(key) 100 if exists { 101 expired := elem.expires > 0 && elem.expires < time.Now().UnixNano() 102 if !expired { 103 v = elem.value.(V) 104 c.promote(idx) 105 } else { 106 exists = false 107 } 108 } 109 c.mu.RUnlock() 110 return 111 } 112 113 func (c *Cache[K, V]) get(key K) (idx uint32, elem *element, exists bool) { 114 idx, exists = c.m[key] 115 if exists { 116 elem = c.list.get(idx) 117 } 118 return 119 } 120 121 func (c *Cache[K, V]) MGet(keys ...K) map[K]V { 122 nowNano := time.Now().UnixNano() 123 return c.mget(false, nowNano, keys...) 124 } 125 126 func (c *Cache[K, V]) MGetNotStale(keys ...K) map[K]V { 127 nowNano := time.Now().UnixNano() 128 return c.mget(true, nowNano, keys...) 129 } 130 131 func (c *Cache[K, V]) mget(notStale bool, nowNano int64, keys ...K) map[K]V { 132 res := make(map[K]V, len(keys)) 133 134 // Split into batches to let the LRU cache to have chance to be updated 135 // if length of keys is much larger than walBufSize. 136 total := len(keys) 137 batch := walBufSize 138 for i, j := 0, batch; i < total; i, j = i+batch, j+batch { 139 if j > total { 140 j = total 141 } 142 143 c.mu.RLock() 144 for _, key := range keys[i:j] { 145 idx, elem, exists := c.get(key) 146 if exists { 147 if notStale { 148 expired := elem.expires > 0 && elem.expires < nowNano 149 if expired { 150 continue 151 } 152 } 153 res[key] = elem.value.(V) 154 c.promote(idx) 155 } 156 } 157 c.mu.RUnlock() 158 } 159 return res 160 } 161 162 func (c *Cache[K, V]) Set(key K, value V, ttl time.Duration) { 163 var expires int64 164 if ttl > 0 { 165 expires = time.Now().UnixNano() + int64(ttl) 166 } 167 c.mu.Lock() 168 c.checkAndFlushBuf() 169 c.set(key, value, expires) 170 c.mu.Unlock() 171 } 172 173 func (c *Cache[K, V]) MSet(kvmap map[K]V, ttl time.Duration) { 174 var expires int64 175 if ttl > 0 { 176 expires = time.Now().UnixNano() + int64(ttl) 177 } 178 179 c.mu.Lock() 180 c.checkAndFlushBuf() 181 for key, val := range kvmap { 182 c.set(key, val, expires) 183 } 184 c.mu.Unlock() 185 } 186 187 func (c *Cache[K, V]) set(k K, v V, expires int64) { 188 idx, exists := c.m[k] 189 if exists { 190 e := c.list.get(idx) 191 e.value = v 192 e.expires = expires 193 c.list.MoveToFront(e) 194 } else { 195 e := c.list.Back() 196 if e.key != nil { 197 delete(c.m, e.key.(K)) 198 } 199 e.key = k 200 e.value = v 201 e.expires = expires 202 c.m[k] = e.index 203 c.list.MoveToFront(e) 204 } 205 } 206 207 func (c *Cache[K, V]) Delete(key K) { 208 c.mu.Lock() 209 c.checkAndFlushBuf() 210 c.del(key) 211 c.mu.Unlock() 212 } 213 214 func (c *Cache[K, V]) MDelete(keys ...K) { 215 c.mu.Lock() 216 c.checkAndFlushBuf() 217 for _, key := range keys { 218 c.del(key) 219 } 220 c.mu.Unlock() 221 } 222 223 func (c *Cache[K, V]) del(key K) { 224 idx, exists := c.m[key] 225 if exists { 226 delete(c.m, key) 227 elem := c.list.get(idx) 228 elem.key = nil 229 elem.value = nil 230 c.list.MoveToBack(elem) 231 } 232 } 233 234 func (c *Cache[K, V]) promote(idx uint32) { 235 buf := (*walbuf)(atomic.LoadPointer(&c.buf)) 236 i := atomic.AddInt32(&buf.p, 1) 237 if i <= walBufSize { 238 buf.b[i-1] = idx 239 return 240 } 241 242 // buffer is full, swap buffer 243 oldbuf := buf 244 245 // create new buffer, and reserve the first element to use for 246 // this promotion request 247 newbuf := newWalBuf() 248 newbuf.p = 1 249 for { 250 swapped := atomic.CompareAndSwapPointer(&c.buf, unsafe.Pointer(oldbuf), unsafe.Pointer(newbuf)) 251 if swapped { 252 newbuf.b[0] = idx 253 break 254 } 255 256 // try again 257 oldbuf = (*walbuf)(atomic.LoadPointer(&c.buf)) 258 i = atomic.AddInt32(&oldbuf.p, 1) 259 if i <= walBufSize { 260 oldbuf.b[i-1] = idx 261 newbuf.p = 0 262 walbufpool.Put(newbuf) 263 return 264 } 265 } 266 267 // the oldbuf has been swapped, we take responsibility to flush it 268 go func(c *Cache[K, V], buf *walbuf) { 269 c.mu.Lock() 270 c.flushBuf(buf) 271 c.mu.Unlock() 272 buf.reset() 273 walbufpool.Put(buf) 274 }(c, oldbuf) 275 } 276 277 func (c *Cache[K, V]) checkAndFlushBuf() { 278 buf := (*walbuf)(c.buf) 279 if buf.p > 0 { 280 c.flushBuf(buf) 281 buf.reset() 282 } 283 } 284 285 func (c *Cache[K, V]) flushBuf(buf *walbuf) { 286 if buf.p == 0 { 287 return 288 } 289 290 // remove duplicate elements 291 b := buf.deduplicate() 292 293 // promote elements by their access order 294 for _, idx := range b { 295 elem := c.list.get(idx) 296 c.list.MoveToFront(elem) 297 } 298 }