github.com/blend/go-sdk@v1.20220411.3/cache/local_cache.go (about) 1 /* 2 3 Copyright (c) 2022 - Present. Blend Labs, Inc. All rights reserved 4 Use of this source code is governed by a MIT license that can be found in the LICENSE file. 5 6 */ 7 8 package cache 9 10 import ( 11 "context" 12 "reflect" 13 "sync" 14 "time" 15 "unsafe" 16 17 "github.com/blend/go-sdk/async" 18 ) 19 20 var ( 21 _ Cache = (*LocalCache)(nil) 22 _ Locker = (*LocalCache)(nil) 23 ) 24 25 // New returns a new LocalLocalCache. 26 // It defaults to 500ms sweep intervals and an LRU queue for invalidation. 27 func New(options ...LocalCacheOption) *LocalCache { 28 c := LocalCache{ 29 Data: make(map[interface{}]*Value), 30 LRU: NewLRUQueue(), 31 } 32 c.Sweeper = async.NewInterval(c.Sweep, 500*time.Millisecond) 33 for _, opt := range options { 34 opt(&c) 35 } 36 return &c 37 } 38 39 // LocalCacheOption is a local cache option. 40 type LocalCacheOption func(*LocalCache) 41 42 // OptSweepInterval sets the local cache sweep interval. 43 func OptSweepInterval(d time.Duration) LocalCacheOption { 44 return func(lc *LocalCache) { 45 lc.Sweeper = async.NewInterval(lc.Sweep, d) 46 } 47 } 48 49 // OptLRU sets the LRU implementation. 50 func OptLRU(lruImplementation LRU) LocalCacheOption { 51 return func(lc *LocalCache) { 52 lc.LRU = lruImplementation 53 } 54 } 55 56 // LocalCache is a memory LocalCache. 57 type LocalCache struct { 58 sync.RWMutex 59 Data map[interface{}]*Value 60 LRU LRU 61 Sweeper *async.Interval 62 } 63 64 // Start starts the sweeper. 65 func (lc *LocalCache) Start() error { 66 return lc.Sweeper.Start() 67 } 68 69 // NotifyStarted returns the underlying started signal. 70 func (lc *LocalCache) NotifyStarted() <-chan struct{} { 71 return lc.Sweeper.NotifyStarted() 72 } 73 74 // Stop stops the sweeper. 75 func (lc *LocalCache) Stop() error { 76 return lc.Sweeper.Stop() 77 } 78 79 // NotifyStopped returns the underlying stopped signal. 80 func (lc *LocalCache) NotifyStopped() <-chan struct{} { 81 return lc.Sweeper.NotifyStopped() 82 } 83 84 type removeHandler struct { 85 Key interface{} 86 Handler func(interface{}, RemovalReason) 87 } 88 89 // Sweep checks keys for expired ttls. 90 // If any values are configured with 'OnSweep' handlers, they will be called 91 // outside holding the critical section. 92 func (lc *LocalCache) Sweep(ctx context.Context) error { 93 lc.Lock() 94 now := time.Now().UTC() 95 96 var keysToRemove []interface{} 97 var handlers []removeHandler 98 lc.LRU.Consume(func(v *Value) bool { 99 if !v.Expires.IsZero() && now.After(v.Expires) { 100 keysToRemove = append(keysToRemove, v.Key) 101 if v.OnRemove != nil { 102 handlers = append(handlers, removeHandler{ 103 Key: v.Key, 104 Handler: v.OnRemove, 105 }) 106 } 107 return true 108 } 109 return false 110 }) 111 112 for _, key := range keysToRemove { 113 delete(lc.Data, key) 114 } 115 lc.Unlock() 116 117 // call the handlers outside the critical section. 118 for _, handler := range handlers { 119 handler.Handler(handler.Key, Expired) 120 } 121 return nil 122 } 123 124 // Set adds a LocalCache item. 125 func (lc *LocalCache) Set(key, value interface{}, options ...ValueOption) { 126 if key == nil { 127 panic("local cache: nil key") 128 } 129 130 if !reflect.TypeOf(key).Comparable() { 131 panic("local cache: key is not comparable") 132 } 133 134 v := Value{ 135 Timestamp: time.Now().UTC(), 136 Key: key, 137 Value: value, 138 } 139 140 for _, opt := range options { 141 opt(&v) 142 } 143 144 lc.Lock() 145 if lc.Data == nil { 146 lc.Data = make(map[interface{}]*Value) 147 } 148 if value, ok := lc.Data[key]; ok { 149 lc.LRU.Fix(&v) 150 *value = v 151 } else { 152 lc.Data[key] = &v 153 lc.LRU.Push(&v) 154 } 155 lc.Unlock() 156 } 157 158 // Get gets a value based on a key. 159 func (lc *LocalCache) Get(key interface{}) (value interface{}, hit bool) { 160 lc.RLock() 161 valueNode, ok := lc.Data[key] 162 lc.RUnlock() 163 if ok { 164 value = valueNode.Value 165 hit = true 166 return 167 } 168 return 169 } 170 171 // GetOrSet gets a value by a key, and in the case of a miss, sets the value from a given value provider lazily. 172 // Hit indicates that the provider was not called. 173 func (lc *LocalCache) GetOrSet(key interface{}, valueProvider func() (interface{}, error), options ...ValueOption) (value interface{}, hit bool, err error) { 174 if key == nil { 175 panic("local cache: nil key") 176 } 177 178 if !reflect.TypeOf(key).Comparable() { 179 panic("local cache: key is not comparable") 180 } 181 182 // check if we already have the value 183 lc.RLock() 184 valueNode, ok := lc.Data[key] 185 lc.RUnlock() 186 187 if ok { 188 value = valueNode.Value 189 hit = true 190 return 191 } 192 193 // call the value provider outside the critical section. 194 // this will create a meaningful gap between releasing the 195 // read lock and grabbing the write lock. 196 value, err = valueProvider() 197 if err != nil { 198 return 199 } 200 201 // we didn't have the value, grab the write lock 202 lc.Lock() 203 defer lc.Unlock() 204 205 // double checked locks for the children 206 // we do this because there may have been a write while we waited 207 // for the exclusive lock. 208 valueNode, ok = lc.Data[key] 209 if ok { 210 value = valueNode.Value 211 hit = true 212 return 213 } 214 215 // set up the value 216 v := Value{ 217 Timestamp: time.Now().UTC(), 218 Key: key, 219 Value: value, 220 } 221 // apply options 222 for _, opt := range options { 223 opt(&v) 224 } 225 226 // upsert 227 if value, ok := lc.Data[key]; ok { 228 lc.LRU.Fix(&v) 229 *value = v 230 } else { 231 lc.Data[key] = &v 232 lc.LRU.Push(&v) 233 } 234 235 return 236 } 237 238 // Has returns if the key is present in the LocalCache. 239 func (lc *LocalCache) Has(key interface{}) (has bool) { 240 lc.RLock() 241 _, has = lc.Data[key] 242 lc.RUnlock() 243 return 244 } 245 246 // Remove removes a specific key. 247 func (lc *LocalCache) Remove(key interface{}) (value interface{}, hit bool) { 248 lc.Lock() 249 valueData, ok := lc.Data[key] 250 if ok { 251 delete(lc.Data, key) 252 lc.LRU.Remove(key) 253 } 254 lc.Unlock() 255 if !ok { 256 return 257 } 258 259 value = valueData.Value 260 hit = true 261 262 if valueData.OnRemove != nil { 263 valueData.OnRemove(key, Removed) 264 } 265 return 266 } 267 268 // Reset removes all items from the cache, leaving an empty cache. 269 // 270 // Reset will call the removed handler for any elements currently in the cache 271 // with a removal reason `Removed`. This will be done outside the critical section. 272 func (lc *LocalCache) Reset() { 273 lc.Lock() 274 var removed []*Value 275 for _, value := range lc.Data { 276 if value.OnRemove != nil { 277 removed = append(removed, value) 278 } 279 } 280 lc.LRU.Reset() // reset the lru queue 281 lc.Data = make(map[interface{}]*Value) // reset the map 282 lc.Unlock() 283 284 // call the remove handlers 285 for _, value := range removed { 286 value.OnRemove(value.Key, Removed) 287 } 288 } 289 290 // Stats returns the LocalCache stats. 291 // 292 // Stats include the number of items held, the age of the items, 293 // and the size in bytes represented by each of the items (not including) 294 // the fields of the cache itself like the LRU queue. 295 func (lc *LocalCache) Stats() (stats Stats) { 296 lc.RLock() 297 defer lc.RUnlock() 298 299 stats.Count = len(lc.Data) 300 now := time.Now().UTC() 301 for _, item := range lc.Data { 302 age := now.Sub(item.Timestamp) 303 if stats.MaxAge < age { 304 stats.MaxAge = age 305 } 306 stats.SizeBytes += int(unsafe.Sizeof(item)) 307 } 308 return 309 }