github.com/lingyao2333/mo-zero@v1.4.1/core/collection/cache.go (about)

     1  package collection
     2  
     3  import (
     4  	"container/list"
     5  	"sync"
     6  	"sync/atomic"
     7  	"time"
     8  
     9  	"github.com/lingyao2333/mo-zero/core/logx"
    10  	"github.com/lingyao2333/mo-zero/core/mathx"
    11  	"github.com/lingyao2333/mo-zero/core/syncx"
    12  )
    13  
    14  const (
    15  	defaultCacheName = "proc"
    16  	slots            = 300
    17  	statInterval     = time.Minute
    18  	// make the expiry unstable to avoid lots of cached items expire at the same time
    19  	// make the unstable expiry to be [0.95, 1.05] * seconds
    20  	expiryDeviation = 0.05
    21  )
    22  
    23  var emptyLruCache = emptyLru{}
    24  
    25  type (
    26  	// CacheOption defines the method to customize a Cache.
    27  	CacheOption func(cache *Cache)
    28  
    29  	// A Cache object is an in-memory cache.
    30  	Cache struct {
    31  		name           string
    32  		lock           sync.Mutex
    33  		data           map[string]interface{}
    34  		expire         time.Duration
    35  		timingWheel    *TimingWheel
    36  		lruCache       lru
    37  		barrier        syncx.SingleFlight
    38  		unstableExpiry mathx.Unstable
    39  		stats          *cacheStat
    40  	}
    41  )
    42  
    43  // NewCache returns a Cache with given expire.
    44  func NewCache(expire time.Duration, opts ...CacheOption) (*Cache, error) {
    45  	cache := &Cache{
    46  		data:           make(map[string]interface{}),
    47  		expire:         expire,
    48  		lruCache:       emptyLruCache,
    49  		barrier:        syncx.NewSingleFlight(),
    50  		unstableExpiry: mathx.NewUnstable(expiryDeviation),
    51  	}
    52  
    53  	for _, opt := range opts {
    54  		opt(cache)
    55  	}
    56  
    57  	if len(cache.name) == 0 {
    58  		cache.name = defaultCacheName
    59  	}
    60  	cache.stats = newCacheStat(cache.name, cache.size)
    61  
    62  	timingWheel, err := NewTimingWheel(time.Second, slots, func(k, v interface{}) {
    63  		key, ok := k.(string)
    64  		if !ok {
    65  			return
    66  		}
    67  
    68  		cache.Del(key)
    69  	})
    70  	if err != nil {
    71  		return nil, err
    72  	}
    73  
    74  	cache.timingWheel = timingWheel
    75  	return cache, nil
    76  }
    77  
    78  // Del deletes the item with the given key from c.
    79  func (c *Cache) Del(key string) {
    80  	c.lock.Lock()
    81  	delete(c.data, key)
    82  	c.lruCache.remove(key)
    83  	c.lock.Unlock()
    84  	c.timingWheel.RemoveTimer(key)
    85  }
    86  
    87  // Get returns the item with the given key from c.
    88  func (c *Cache) Get(key string) (interface{}, bool) {
    89  	value, ok := c.doGet(key)
    90  	if ok {
    91  		c.stats.IncrementHit()
    92  	} else {
    93  		c.stats.IncrementMiss()
    94  	}
    95  
    96  	return value, ok
    97  }
    98  
    99  // Set sets value into c with key.
   100  func (c *Cache) Set(key string, value interface{}) {
   101  	c.SetWithExpire(key, value, c.expire)
   102  }
   103  
   104  // SetWithExpire sets value into c with key and expire with the given value.
   105  func (c *Cache) SetWithExpire(key string, value interface{}, expire time.Duration) {
   106  	c.lock.Lock()
   107  	_, ok := c.data[key]
   108  	c.data[key] = value
   109  	c.lruCache.add(key)
   110  	c.lock.Unlock()
   111  
   112  	expiry := c.unstableExpiry.AroundDuration(expire)
   113  	if ok {
   114  		c.timingWheel.MoveTimer(key, expiry)
   115  	} else {
   116  		c.timingWheel.SetTimer(key, value, expiry)
   117  	}
   118  }
   119  
   120  // Take returns the item with the given key.
   121  // If the item is in c, return it directly.
   122  // If not, use fetch method to get the item, set into c and return it.
   123  func (c *Cache) Take(key string, fetch func() (interface{}, error)) (interface{}, error) {
   124  	if val, ok := c.doGet(key); ok {
   125  		c.stats.IncrementHit()
   126  		return val, nil
   127  	}
   128  
   129  	var fresh bool
   130  	val, err := c.barrier.Do(key, func() (interface{}, error) {
   131  		// because O(1) on map search in memory, and fetch is an IO query
   132  		// so we do double check, cache might be taken by another call
   133  		if val, ok := c.doGet(key); ok {
   134  			return val, nil
   135  		}
   136  
   137  		v, e := fetch()
   138  		if e != nil {
   139  			return nil, e
   140  		}
   141  
   142  		fresh = true
   143  		c.Set(key, v)
   144  		return v, nil
   145  	})
   146  	if err != nil {
   147  		return nil, err
   148  	}
   149  
   150  	if fresh {
   151  		c.stats.IncrementMiss()
   152  		return val, nil
   153  	}
   154  
   155  	// got the result from previous ongoing query
   156  	c.stats.IncrementHit()
   157  	return val, nil
   158  }
   159  
   160  func (c *Cache) doGet(key string) (interface{}, bool) {
   161  	c.lock.Lock()
   162  	defer c.lock.Unlock()
   163  
   164  	value, ok := c.data[key]
   165  	if ok {
   166  		c.lruCache.add(key)
   167  	}
   168  
   169  	return value, ok
   170  }
   171  
   172  func (c *Cache) onEvict(key string) {
   173  	// already locked
   174  	delete(c.data, key)
   175  	c.timingWheel.RemoveTimer(key)
   176  }
   177  
   178  func (c *Cache) size() int {
   179  	c.lock.Lock()
   180  	defer c.lock.Unlock()
   181  	return len(c.data)
   182  }
   183  
   184  // WithLimit customizes a Cache with items up to limit.
   185  func WithLimit(limit int) CacheOption {
   186  	return func(cache *Cache) {
   187  		if limit > 0 {
   188  			cache.lruCache = newKeyLru(limit, cache.onEvict)
   189  		}
   190  	}
   191  }
   192  
   193  // WithName customizes a Cache with the given name.
   194  func WithName(name string) CacheOption {
   195  	return func(cache *Cache) {
   196  		cache.name = name
   197  	}
   198  }
   199  
   200  type (
   201  	lru interface {
   202  		add(key string)
   203  		remove(key string)
   204  	}
   205  
   206  	emptyLru struct{}
   207  
   208  	keyLru struct {
   209  		limit    int
   210  		evicts   *list.List
   211  		elements map[string]*list.Element
   212  		onEvict  func(key string)
   213  	}
   214  )
   215  
   216  func (elru emptyLru) add(string) {
   217  }
   218  
   219  func (elru emptyLru) remove(string) {
   220  }
   221  
   222  func newKeyLru(limit int, onEvict func(key string)) *keyLru {
   223  	return &keyLru{
   224  		limit:    limit,
   225  		evicts:   list.New(),
   226  		elements: make(map[string]*list.Element),
   227  		onEvict:  onEvict,
   228  	}
   229  }
   230  
   231  func (klru *keyLru) add(key string) {
   232  	if elem, ok := klru.elements[key]; ok {
   233  		klru.evicts.MoveToFront(elem)
   234  		return
   235  	}
   236  
   237  	// Add new item
   238  	elem := klru.evicts.PushFront(key)
   239  	klru.elements[key] = elem
   240  
   241  	// Verify size not exceeded
   242  	if klru.evicts.Len() > klru.limit {
   243  		klru.removeOldest()
   244  	}
   245  }
   246  
   247  func (klru *keyLru) remove(key string) {
   248  	if elem, ok := klru.elements[key]; ok {
   249  		klru.removeElement(elem)
   250  	}
   251  }
   252  
   253  func (klru *keyLru) removeOldest() {
   254  	elem := klru.evicts.Back()
   255  	if elem != nil {
   256  		klru.removeElement(elem)
   257  	}
   258  }
   259  
   260  func (klru *keyLru) removeElement(e *list.Element) {
   261  	klru.evicts.Remove(e)
   262  	key := e.Value.(string)
   263  	delete(klru.elements, key)
   264  	klru.onEvict(key)
   265  }
   266  
   267  type cacheStat struct {
   268  	name         string
   269  	hit          uint64
   270  	miss         uint64
   271  	sizeCallback func() int
   272  }
   273  
   274  func newCacheStat(name string, sizeCallback func() int) *cacheStat {
   275  	st := &cacheStat{
   276  		name:         name,
   277  		sizeCallback: sizeCallback,
   278  	}
   279  	go st.statLoop()
   280  	return st
   281  }
   282  
   283  func (cs *cacheStat) IncrementHit() {
   284  	atomic.AddUint64(&cs.hit, 1)
   285  }
   286  
   287  func (cs *cacheStat) IncrementMiss() {
   288  	atomic.AddUint64(&cs.miss, 1)
   289  }
   290  
   291  func (cs *cacheStat) statLoop() {
   292  	ticker := time.NewTicker(statInterval)
   293  	defer ticker.Stop()
   294  
   295  	for range ticker.C {
   296  		hit := atomic.SwapUint64(&cs.hit, 0)
   297  		miss := atomic.SwapUint64(&cs.miss, 0)
   298  		total := hit + miss
   299  		if total == 0 {
   300  			continue
   301  		}
   302  		percent := 100 * float32(hit) / float32(total)
   303  		logx.Statf("cache(%s) - qpm: %d, hit_ratio: %.1f%%, elements: %d, hit: %d, miss: %d",
   304  			cs.name, total, percent, cs.sizeCallback(), hit, miss)
   305  	}
   306  }