github.com/shuguocloud/go-zero@v1.3.0/core/collection/cache.go (about) 1 package collection 2 3 import ( 4 "container/list" 5 "sync" 6 "sync/atomic" 7 "time" 8 9 "github.com/shuguocloud/go-zero/core/logx" 10 "github.com/shuguocloud/go-zero/core/mathx" 11 "github.com/shuguocloud/go-zero/core/syncx" 12 ) 13 14 const ( 15 defaultCacheName = "proc" 16 slots = 300 17 statInterval = time.Minute 18 // make the expiry unstable to avoid lots of cached items expire at the same time 19 // make the unstable expiry to be [0.95, 1.05] * seconds 20 expiryDeviation = 0.05 21 ) 22 23 var emptyLruCache = emptyLru{} 24 25 type ( 26 // CacheOption defines the method to customize a Cache. 27 CacheOption func(cache *Cache) 28 29 // A Cache object is a in-memory cache. 30 Cache struct { 31 name string 32 lock sync.Mutex 33 data map[string]interface{} 34 expire time.Duration 35 timingWheel *TimingWheel 36 lruCache lru 37 barrier syncx.SingleFlight 38 unstableExpiry mathx.Unstable 39 stats *cacheStat 40 } 41 ) 42 43 // NewCache returns a Cache with given expire. 44 func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) { 45 cache := &Cache{ 46 data: make(map[string]interface{}), 47 expire: expire, 48 lruCache: emptyLruCache, 49 barrier: syncx.NewSingleFlight(), 50 unstableExpiry: mathx.NewUnstable(expiryDeviation), 51 } 52 53 for _, opt := range opts { 54 opt(cache) 55 } 56 57 if len(cache.name) == 0 { 58 cache.name = defaultCacheName 59 } 60 cache.stats = newCacheStat(cache.name, cache.size) 61 62 timingWheel, err := NewTimingWheel(time.Second, slots, func(k, v interface{}) { 63 key, ok := k.(string) 64 if !ok { 65 return 66 } 67 68 cache.Del(key) 69 }) 70 if err != nil { 71 return nil, err 72 } 73 74 cache.timingWheel = timingWheel 75 return cache, nil 76 } 77 78 // Del deletes the item with the given key from c. 79 func (c *Cache) Del(key string) { 80 c.lock.Lock() 81 delete(c.data, key) 82 c.lruCache.remove(key) 83 c.lock.Unlock() 84 c.timingWheel.RemoveTimer(key) 85 } 86 87 // Get returns the item with the given key from c. 88 func (c *Cache) Get(key string) (interface{}, bool) { 89 value, ok := c.doGet(key) 90 if ok { 91 c.stats.IncrementHit() 92 } else { 93 c.stats.IncrementMiss() 94 } 95 96 return value, ok 97 } 98 99 // Set sets value into c with key. 100 func (c *Cache) Set(key string, value interface{}) { 101 c.lock.Lock() 102 _, ok := c.data[key] 103 c.data[key] = value 104 c.lruCache.add(key) 105 c.lock.Unlock() 106 107 expiry := c.unstableExpiry.AroundDuration(c.expire) 108 if ok { 109 c.timingWheel.MoveTimer(key, expiry) 110 } else { 111 c.timingWheel.SetTimer(key, value, expiry) 112 } 113 } 114 115 // Take returns the item with the given key. 116 // If the item is in c, return it directly. 117 // If not, use fetch method to get the item, set into c and return it. 118 func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}, error) { 119 if val, ok := c.doGet(key); ok { 120 c.stats.IncrementHit() 121 return val, nil 122 } 123 124 var fresh bool 125 val, err := c.barrier.Do(key, func() (interface{}, error) { 126 // because O(1) on map search in memory, and fetch is an IO query 127 // so we do double check, cache might be taken by another call 128 if val, ok := c.doGet(key); ok { 129 return val, nil 130 } 131 132 v, e := fetch() 133 if e != nil { 134 return nil, e 135 } 136 137 fresh = true 138 c.Set(key, v) 139 return v, nil 140 }) 141 if err != nil { 142 return nil, err 143 } 144 145 if fresh { 146 c.stats.IncrementMiss() 147 return val, nil 148 } 149 150 // got the result from previous ongoing query 151 c.stats.IncrementHit() 152 return val, nil 153 } 154 155 func (c *Cache) doGet(key string) (interface{}, bool) { 156 c.lock.Lock() 157 defer c.lock.Unlock() 158 159 value, ok := c.data[key] 160 if ok { 161 c.lruCache.add(key) 162 } 163 164 return value, ok 165 } 166 167 func (c *Cache) onEvict(key string) { 168 // already locked 169 delete(c.data, key) 170 c.timingWheel.RemoveTimer(key) 171 } 172 173 func (c *Cache) size() int { 174 c.lock.Lock() 175 defer c.lock.Unlock() 176 return len(c.data) 177 } 178 179 // WithLimit customizes a Cache with items up to limit. 180 func WithLimit(limit int) CacheOption { 181 return func(cache *Cache) { 182 if limit > 0 { 183 cache.lruCache = newKeyLru(limit, cache.onEvict) 184 } 185 } 186 } 187 188 // WithName customizes a Cache with the given name. 189 func WithName(name string) CacheOption { 190 return func(cache *Cache) { 191 cache.name = name 192 } 193 } 194 195 type ( 196 lru interface { 197 add(key string) 198 remove(key string) 199 } 200 201 emptyLru struct{} 202 203 keyLru struct { 204 limit int 205 evicts *list.List 206 elements map[string]*list.Element 207 onEvict func(key string) 208 } 209 ) 210 211 func (elru emptyLru) add(string) { 212 } 213 214 func (elru emptyLru) remove(string) { 215 } 216 217 func newKeyLru(limit int, onEvict func(key string)) *keyLru { 218 return &keyLru{ 219 limit: limit, 220 evicts: list.New(), 221 elements: make(map[string]*list.Element), 222 onEvict: onEvict, 223 } 224 } 225 226 func (klru *keyLru) add(key string) { 227 if elem, ok := klru.elements[key]; ok { 228 klru.evicts.MoveToFront(elem) 229 return 230 } 231 232 // Add new item 233 elem := klru.evicts.PushFront(key) 234 klru.elements[key] = elem 235 236 // Verify size not exceeded 237 if klru.evicts.Len() > klru.limit { 238 klru.removeOldest() 239 } 240 } 241 242 func (klru *keyLru) remove(key string) { 243 if elem, ok := klru.elements[key]; ok { 244 klru.removeElement(elem) 245 } 246 } 247 248 func (klru *keyLru) removeOldest() { 249 elem := klru.evicts.Back() 250 if elem != nil { 251 klru.removeElement(elem) 252 } 253 } 254 255 func (klru *keyLru) removeElement(e *list.Element) { 256 klru.evicts.Remove(e) 257 key := e.Value.(string) 258 delete(klru.elements, key) 259 klru.onEvict(key) 260 } 261 262 type cacheStat struct { 263 name string 264 hit uint64 265 miss uint64 266 sizeCallback func() int 267 } 268 269 func newCacheStat(name string, sizeCallback func() int) *cacheStat { 270 st := &cacheStat{ 271 name: name, 272 sizeCallback: sizeCallback, 273 } 274 go st.statLoop() 275 return st 276 } 277 278 func (cs *cacheStat) IncrementHit() { 279 atomic.AddUint64(&cs.hit, 1) 280 } 281 282 func (cs *cacheStat) IncrementMiss() { 283 atomic.AddUint64(&cs.miss, 1) 284 } 285 286 func (cs *cacheStat) statLoop() { 287 ticker := time.NewTicker(statInterval) 288 defer ticker.Stop() 289 290 for range ticker.C { 291 hit := atomic.SwapUint64(&cs.hit, 0) 292 miss := atomic.SwapUint64(&cs.miss, 0) 293 total := hit + miss 294 if total == 0 { 295 continue 296 } 297 percent := 100 * float32(hit) / float32(total) 298 logx.Statf("cache(%s) - qpm: %d, hit_ratio: %.1f%%, elements: %d, hit: %d, miss: %d", 299 cs.name, total, percent, cs.sizeCallback(), hit, miss) 300 } 301 }