github.com/jxskiss/gopkg@v0.17.3/lru/cache.go (about) 1 package lru 2 3 import ( 4 "reflect" 5 "sync" 6 "sync/atomic" 7 "time" 8 "unsafe" 9 ) 10 11 const maxCapacity = 1<<32 - 1 12 13 // NewCache returns a lru cache instance with given capacity, the underlying 14 // memory will be immediately allocated. For best performance, the memory 15 // will be reused and won't be freed for the lifetime of the cache. 16 // 17 // Param capacity must be smaller than 2^32, else it will panic. 18 func NewCache(capacity int) *Cache { 19 if capacity > maxCapacity { 20 panic("invalid too large capacity") 21 } 22 list := newList(capacity) 23 c := &Cache{ 24 list: list, 25 m: make(map[interface{}]uint32, capacity), 26 buf: unsafe.Pointer(newWalBuf()), 27 } 28 return c 29 } 30 31 // Cache is an in-memory cache using LRU algorithm. 32 // 33 // It implements Interface in this package, see Interface for detailed 34 // api documents. 35 type Cache struct { 36 mu sync.RWMutex 37 list *list 38 m map[interface{}]uint32 39 40 buf unsafe.Pointer // *walbuf 41 } 42 43 func (c *Cache) Len() (n int) { 44 c.mu.RLock() 45 n = len(c.m) 46 c.mu.RUnlock() 47 return 48 } 49 50 func (c *Cache) Has(key interface{}) (exists, expired bool) { 51 c.mu.RLock() 52 _, elem, exists := c.get(key) 53 if exists { 54 expired = elem.expires > 0 && elem.expires < time.Now().UnixNano() 55 } 56 c.mu.RUnlock() 57 return 58 } 59 60 func (c *Cache) Get(key interface{}) (v interface{}, exists, expired bool) { 61 c.mu.RLock() 62 idx, elem, exists := c.get(key) 63 if exists { 64 v = elem.value 65 expired = elem.expires > 0 && elem.expires < time.Now().UnixNano() 66 c.promote(idx) 67 } 68 c.mu.RUnlock() 69 return 70 } 71 72 func (c *Cache) GetWithTTL(key interface{}) (v interface{}, exists bool, ttl *time.Duration) { 73 c.mu.RLock() 74 idx, elem, exists := c.get(key) 75 if exists { 76 v = elem.value 77 if elem.expires > 0 { 78 x := time.Duration(elem.expires - time.Now().UnixNano()) 79 ttl = &x 80 } 81 c.promote(idx) 82 } 83 c.mu.RUnlock() 84 return 85 } 86 87 func (c *Cache) GetQuiet(key interface{}) (v interface{}, exists, expired bool) { 88 c.mu.RLock() 89 _, elem, exists := c.get(key) 90 if exists { 91 v = elem.value 92 expired = elem.expires > 0 && elem.expires < time.Now().UnixNano() 93 } 94 c.mu.RUnlock() 95 return 96 } 97 98 func (c *Cache) GetNotStale(key interface{}) (v interface{}, exists bool) { 99 c.mu.RLock() 100 idx, elem, exists := c.get(key) 101 if exists { 102 expired := elem.expires > 0 && elem.expires < time.Now().UnixNano() 103 if !expired { 104 v = elem.value 105 c.promote(idx) 106 } else { 107 exists = false 108 } 109 } 110 c.mu.RUnlock() 111 return 112 } 113 114 func (c *Cache) get(key interface{}) (idx uint32, elem *element, exists bool) { 115 idx, exists = c.m[key] 116 if exists { 117 elem = c.list.get(idx) 118 } 119 return 120 } 121 122 func (c *Cache) MGet(keys ...interface{}) map[interface{}]interface{} { 123 nowNano := time.Now().UnixNano() 124 return c.mget(false, nowNano, keys...) 125 } 126 127 func (c *Cache) MGetNotStale(keys ...interface{}) map[interface{}]interface{} { 128 nowNano := time.Now().UnixNano() 129 return c.mget(true, nowNano, keys...) 130 } 131 132 func (c *Cache) mget(notStale bool, nowNano int64, keys ...interface{}) map[interface{}]interface{} { 133 res := make(map[interface{}]interface{}, len(keys)) 134 135 // Split into batches to let the LRU cache to have chance to be updated 136 // if length of keys is much larger than walBufSize. 137 total := len(keys) 138 batch := walBufSize 139 for i, j := 0, batch; i < total; i, j = i+batch, j+batch { 140 if j > total { 141 j = total 142 } 143 144 c.mu.RLock() 145 for _, key := range keys[i:j] { 146 idx, elem, exists := c.get(key) 147 if exists { 148 if notStale { 149 expired := elem.expires > 0 && elem.expires < nowNano 150 if expired { 151 continue 152 } 153 } 154 res[key] = elem.value 155 c.promote(idx) 156 } 157 } 158 c.mu.RUnlock() 159 } 160 return res 161 } 162 163 func (c *Cache) MGetInt(keys ...int) map[int]interface{} { 164 nowNano := time.Now().UnixNano() 165 return c.mgetInt(false, nowNano, keys...) 166 } 167 168 func (c *Cache) MGetIntNotStale(keys ...int) map[int]interface{} { 169 nowNano := time.Now().UnixNano() 170 return c.mgetInt(true, nowNano, keys...) 171 } 172 173 func (c *Cache) mgetInt(notStale bool, nowNano int64, keys ...int) map[int]interface{} { 174 res := make(map[int]interface{}, len(keys)) 175 c.mu.RLock() 176 for _, key := range keys { 177 idx, elem, exists := c.get(key) 178 if exists { 179 if notStale { 180 expired := elem.expires > 0 && elem.expires < nowNano 181 if expired { 182 continue 183 } 184 } 185 res[key] = elem.value 186 c.promote(idx) 187 } 188 } 189 c.mu.RUnlock() 190 return res 191 } 192 193 func (c *Cache) MGetInt64(keys ...int64) map[int64]interface{} { 194 nowNano := time.Now().UnixNano() 195 return c.mgetInt64(false, nowNano, keys...) 196 } 197 198 func (c *Cache) MGetInt64NotStale(keys ...int64) map[int64]interface{} { 199 nowNano := time.Now().UnixNano() 200 return c.mgetInt64(true, nowNano, keys...) 201 } 202 203 func (c *Cache) mgetInt64(notStale bool, nowNano int64, keys ...int64) map[int64]interface{} { 204 res := make(map[int64]interface{}, len(keys)) 205 c.mu.RLock() 206 for _, key := range keys { 207 idx, elem, exists := c.get(key) 208 if exists { 209 if notStale { 210 expired := elem.expires > 0 && elem.expires < nowNano 211 if expired { 212 continue 213 } 214 } 215 res[key] = elem.value 216 c.promote(idx) 217 } 218 } 219 c.mu.RUnlock() 220 return res 221 } 222 223 func (c *Cache) MGetUint64(keys ...uint64) map[uint64]interface{} { 224 nowNano := time.Now().UnixNano() 225 return c.mgetUint64(false, nowNano, keys...) 226 } 227 228 func (c *Cache) MGetUint64NotStale(keys ...uint64) map[uint64]interface{} { 229 nowNano := time.Now().UnixNano() 230 return c.mgetUint64(true, nowNano, keys...) 231 } 232 233 func (c *Cache) mgetUint64(notStale bool, nowNano int64, keys ...uint64) map[uint64]interface{} { 234 res := make(map[uint64]interface{}, len(keys)) 235 c.mu.RLock() 236 for _, key := range keys { 237 idx, elem, exists := c.get(key) 238 if exists { 239 if notStale { 240 expired := elem.expires > 0 && elem.expires < nowNano 241 if expired { 242 continue 243 } 244 } 245 res[key] = elem.value 246 c.promote(idx) 247 } 248 } 249 c.mu.RUnlock() 250 return res 251 } 252 253 func (c *Cache) MGetString(keys ...string) map[string]interface{} { 254 nowNano := time.Now().UnixNano() 255 return c.mgetString(false, nowNano, keys...) 256 } 257 258 func (c *Cache) MGetStringNotStale(keys ...string) map[string]interface{} { 259 nowNano := time.Now().UnixNano() 260 return c.mgetString(true, nowNano, keys...) 261 } 262 263 func (c *Cache) mgetString(notStale bool, nowNano int64, keys ...string) map[string]interface{} { 264 res := make(map[string]interface{}, len(keys)) 265 c.mu.RLock() 266 for _, key := range keys { 267 idx, elem, exists := c.get(key) 268 if exists { 269 if notStale { 270 expired := elem.expires > 0 && elem.expires < nowNano 271 if expired { 272 continue 273 } 274 } 275 res[key] = elem.value 276 c.promote(idx) 277 } 278 } 279 c.mu.RUnlock() 280 return res 281 } 282 283 func (c *Cache) promote(idx uint32) { 284 buf := (*walbuf)(atomic.LoadPointer(&c.buf)) 285 i := atomic.AddInt32(&buf.p, 1) 286 if i <= walBufSize { 287 buf.b[i-1] = idx 288 return 289 } 290 291 // buffer is full, swap buffer 292 oldbuf := buf 293 294 // create new buffer, and reserve the first element to use for 295 // this promotion request 296 newbuf := newWalBuf() 297 newbuf.p = 1 298 for { 299 swapped := atomic.CompareAndSwapPointer(&c.buf, unsafe.Pointer(oldbuf), unsafe.Pointer(newbuf)) 300 if swapped { 301 newbuf.b[0] = idx 302 break 303 } 304 305 // try again 306 oldbuf = (*walbuf)(atomic.LoadPointer(&c.buf)) 307 i = atomic.AddInt32(&oldbuf.p, 1) 308 if i <= walBufSize { 309 oldbuf.b[i-1] = idx 310 newbuf.p = 0 311 walbufpool.Put(newbuf) 312 return 313 } 314 } 315 316 // the oldbuf has been swapped, we take responsibility to flush it 317 go func(c *Cache, buf *walbuf) { 318 c.mu.Lock() 319 c.flushBuf(buf) 320 c.mu.Unlock() 321 buf.reset() 322 walbufpool.Put(buf) 323 }(c, oldbuf) 324 } 325 326 func (c *Cache) Set(key, value interface{}, ttl time.Duration) { 327 var expires int64 328 if ttl > 0 { 329 expires = time.Now().UnixNano() + int64(ttl) 330 } 331 c.mu.Lock() 332 c.checkAndFlushBuf() 333 c.set(key, value, expires) 334 c.mu.Unlock() 335 } 336 337 func (c *Cache) MSet(kvmap interface{}, ttl time.Duration) { 338 var expires int64 339 if ttl > 0 { 340 expires = time.Now().UnixNano() + int64(ttl) 341 } 342 m := reflect.ValueOf(kvmap) 343 keys := m.MapKeys() 344 345 c.mu.Lock() 346 c.checkAndFlushBuf() 347 for _, key := range keys { 348 value := m.MapIndex(key) 349 c.set(key.Interface(), value.Interface(), expires) 350 } 351 c.mu.Unlock() 352 } 353 354 func (c *Cache) set(k, v interface{}, expires int64) { 355 idx, exists := c.m[k] 356 if exists { 357 e := c.list.get(idx) 358 e.value = v 359 e.expires = expires 360 c.list.MoveToFront(e) 361 } else { 362 e := c.list.Back() 363 if e.key != nil { 364 delete(c.m, e.key) 365 } 366 e.key = k 367 e.value = v 368 e.expires = expires 369 c.m[k] = e.index 370 c.list.MoveToFront(e) 371 } 372 } 373 374 func (c *Cache) Del(key interface{}) { 375 c.mu.Lock() 376 c.checkAndFlushBuf() 377 c.del(key) 378 c.mu.Unlock() 379 } 380 381 func (c *Cache) MDel(keys ...interface{}) { 382 c.mu.Lock() 383 c.checkAndFlushBuf() 384 for _, key := range keys { 385 c.del(key) 386 } 387 c.mu.Unlock() 388 } 389 390 func (c *Cache) MDelInt(keys ...int) { 391 c.mu.Lock() 392 c.checkAndFlushBuf() 393 for _, key := range keys { 394 c.del(key) 395 } 396 c.mu.Unlock() 397 } 398 399 func (c *Cache) MDelInt64(keys ...int64) { 400 c.mu.Lock() 401 c.checkAndFlushBuf() 402 for _, key := range keys { 403 c.del(key) 404 } 405 c.mu.Unlock() 406 } 407 408 func (c *Cache) MDelUint64(keys ...uint64) { 409 c.mu.Lock() 410 c.checkAndFlushBuf() 411 for _, key := range keys { 412 c.del(key) 413 } 414 c.mu.Unlock() 415 } 416 417 func (c *Cache) MDelString(keys ...string) { 418 c.mu.Lock() 419 c.checkAndFlushBuf() 420 for _, key := range keys { 421 c.del(key) 422 } 423 c.mu.Unlock() 424 } 425 426 func (c *Cache) del(key interface{}) { 427 idx, exists := c.m[key] 428 if exists { 429 delete(c.m, key) 430 elem := c.list.get(idx) 431 elem.key = nil 432 elem.value = nil 433 c.list.MoveToBack(elem) 434 } 435 } 436 437 func (c *Cache) checkAndFlushBuf() { 438 buf := (*walbuf)(c.buf) 439 if buf.p > 0 { 440 c.flushBuf(buf) 441 buf.reset() 442 } 443 } 444 445 func (c *Cache) flushBuf(buf *walbuf) { 446 if buf.p == 0 { 447 return 448 } 449 450 // remove duplicate elements 451 b := buf.deduplicate() 452 453 // promote elements by their access order 454 for _, idx := range b { 455 elem := c.list.get(idx) 456 c.list.MoveToFront(elem) 457 } 458 }