github.com/mailgun/holster/v4@v4.20.0/collections/lru_cache.go (about) 1 /* 2 Modifications Copyright 2017 Mailgun Technologies Inc 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 16 This work is derived from github.com/golang/groupcache/lru 17 */ 18 package collections 19 20 import ( 21 "container/list" 22 "sync" 23 24 "github.com/mailgun/holster/v4/clock" 25 "github.com/mailgun/holster/v4/syncutil" 26 ) 27 28 // Holds stats collected about the cache 29 type LRUCacheStats struct { 30 Size int64 31 Miss int64 32 Hit int64 33 } 34 35 // Cache is an thread safe LRU cache that also supports optional TTL expiration 36 // You can use an non thread safe version of this 37 type LRUCache struct { 38 // MaxEntries is the maximum number of cache entries before 39 // an item is evicted. Zero means no limit. 40 MaxEntries int 41 42 // OnEvicted optionally specifies a callback function to be 43 // executed when an entry is purged from the cache. 44 OnEvicted func(key Key, value interface{}) 45 46 mutex sync.Mutex 47 stats LRUCacheStats 48 ll *list.List 49 cache map[interface{}]*list.Element 50 } 51 52 // A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators 53 type Key interface{} 54 55 type CacheItem struct { 56 Key Key 57 Value interface{} 58 ExpireAt *clock.Time 59 } 60 61 // New creates a new Cache. 62 // If maxEntries is zero, the cache has no limit and it's assumed 63 // that eviction is done by the caller. 64 func NewLRUCache(maxEntries int) *LRUCache { 65 return &LRUCache{ 66 MaxEntries: maxEntries, 67 ll: list.New(), 68 cache: make(map[interface{}]*list.Element), 69 } 70 } 71 72 // Add or Update a value in the cache, return true if the key already existed 73 func (c *LRUCache) Add(key Key, value interface{}) bool { 74 return c.addRecord(&CacheItem{Key: key, Value: value}) 75 } 76 77 // Adds a value to the cache with a TTL 78 func (c *LRUCache) AddWithTTL(key Key, value interface{}, ttl clock.Duration) bool { 79 expireAt := clock.Now().UTC().Add(ttl) 80 return c.addRecord(&CacheItem{ 81 Key: key, 82 Value: value, 83 ExpireAt: &expireAt, 84 }) 85 } 86 87 // Adds a value to the cache. 88 func (c *LRUCache) addRecord(record *CacheItem) bool { 89 defer c.mutex.Unlock() 90 c.mutex.Lock() 91 92 // If the key already exist, set the new value 93 if ee, ok := c.cache[record.Key]; ok { 94 c.ll.MoveToFront(ee) 95 temp := ee.Value.(*CacheItem) 96 *temp = *record 97 return true 98 } 99 100 ele := c.ll.PushFront(record) 101 c.cache[record.Key] = ele 102 if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries { 103 c.removeOldest() 104 } 105 return false 106 } 107 108 // Get looks up a key's value from the cache. 109 func (c *LRUCache) Get(key Key) (value interface{}, ok bool) { 110 defer c.mutex.Unlock() 111 c.mutex.Lock() 112 113 if ele, hit := c.cache[key]; hit { 114 entry := ele.Value.(*CacheItem) 115 116 // If the entry has expired, remove it from the cache 117 if entry.ExpireAt != nil && entry.ExpireAt.Before(clock.Now().UTC()) { 118 c.removeElement(ele) 119 c.stats.Miss++ 120 return 121 } 122 c.stats.Hit++ 123 c.ll.MoveToFront(ele) 124 return entry.Value, true 125 } 126 c.stats.Miss++ 127 return 128 } 129 130 // Remove removes the provided key from the cache. 131 func (c *LRUCache) Remove(key Key) { 132 defer c.mutex.Unlock() 133 c.mutex.Lock() 134 135 if ele, hit := c.cache[key]; hit { 136 c.removeElement(ele) 137 } 138 } 139 140 // RemoveOldest removes the oldest item from the cache. 141 func (c *LRUCache) removeOldest() { 142 ele := c.ll.Back() 143 if ele != nil { 144 c.removeElement(ele) 145 } 146 } 147 148 func (c *LRUCache) removeElement(e *list.Element) { 149 c.ll.Remove(e) 150 kv := e.Value.(*CacheItem) 151 delete(c.cache, kv.Key) 152 if c.OnEvicted != nil { 153 c.OnEvicted(kv.Key, kv.Value) 154 } 155 } 156 157 // Len returns the number of items in the cache. 158 func (c *LRUCache) Size() int { 159 defer c.mutex.Unlock() 160 c.mutex.Lock() 161 return c.ll.Len() 162 } 163 164 // Returns stats about the current state of the cache 165 func (c *LRUCache) Stats() LRUCacheStats { 166 defer func() { 167 c.stats = LRUCacheStats{} 168 c.mutex.Unlock() 169 }() 170 c.mutex.Lock() 171 c.stats.Size = int64(len(c.cache)) 172 return c.stats 173 } 174 175 // Get a list of keys at this point in time 176 func (c *LRUCache) Keys() (keys []interface{}) { 177 defer c.mutex.Unlock() 178 c.mutex.Lock() 179 180 for key := range c.cache { 181 keys = append(keys, key) 182 } 183 return 184 } 185 186 // Get the value without updating the expiration or last used or stats 187 func (c *LRUCache) Peek(key interface{}) (value interface{}, ok bool) { 188 defer c.mutex.Unlock() 189 c.mutex.Lock() 190 191 if ele, hit := c.cache[key]; hit { 192 entry := ele.Value.(*CacheItem) 193 return entry.Value, true 194 } 195 return nil, false 196 } 197 198 // Processes each item in the cache in a thread safe way, such that the cache can be in use 199 // while processing items in the cache. Processing the cache with `Each()` does not update 200 // the expiration or last used. 201 func (c *LRUCache) Each(concurrent int, callBack func(key interface{}, value interface{}) error) []error { 202 fanOut := syncutil.NewFanOut(concurrent) 203 keys := c.Keys() 204 205 for _, key := range keys { 206 fanOut.Run(func(key interface{}) error { 207 value, ok := c.Peek(key) 208 if !ok { 209 // Key disappeared during cache iteration, This can occur as 210 // expiration and removal can happen during iteration 211 return nil 212 } 213 214 err := callBack(key, value) 215 if err != nil { 216 return err 217 } 218 return nil 219 }, key) 220 } 221 222 // Wait for all the routines to complete 223 errs := fanOut.Wait() 224 if errs != nil { 225 return errs 226 } 227 return nil 228 } 229 230 // Map modifies the cache according to the mapping function, If mapping returns false the 231 // item is removed from the cache and `OnEvicted` is called if defined. Map claims exclusive 232 // access to the cache; as such concurrent access will block until Map returns. 233 func (c *LRUCache) Map(mapping func(item *CacheItem) bool) { 234 defer c.mutex.Unlock() 235 c.mutex.Lock() 236 237 for _, v := range c.cache { 238 if !mapping(v.Value.(*CacheItem)) { 239 c.removeElement(v) 240 } 241 } 242 }