github.com/mattermost/mattermost-server/v5@v5.39.3/services/cache/lru_striped.go (about) 1 // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. 2 // See LICENSE.txt for license information. 3 4 package cache 5 6 import ( 7 "fmt" 8 "math" 9 "time" 10 11 "github.com/cespare/xxhash/v2" 12 ) 13 14 // LRUStriped keeps LRU caches in buckets in order to lower mutex contention. 15 // This is achieved by hashing the input key to map it to a dedicated bucket. 16 // Each bucket (an LRU cache) has its own lock that helps distributing the lock 17 // contention on multiple threads/cores, leading to less wait times. 18 // 19 // LRUStriped implements the Cache interface with the same behavior as LRU. 20 // 21 // Note that, because of it's distributed nature, the fixed size cannot be strictly respected 22 // and you may have a tiny bit more space for keys than you defined through LRUOptions. 23 // Bucket size is computed as follows: (size / nbuckets) + (size % nbuckets) 24 // 25 // Because of this size limit per bucket, and because of the nature of the data, you 26 // may have buckets filled unevenly, and because of this, keys will be evicted from the entire 27 // cache where a simple LRU wouldn't have. Example: 28 // 29 // Two buckets B1 and B2, of max size 2 each, meaning, theoretically, a max size of 4: 30 // * Say you have a set of 3 keys, they could fill an entire LRU cache. 31 // * But if all those keys are assigned to a single bucket B1, the first key will be evicted from B1 32 // * B2 will remain empty, even though there was enough memory allocated 33 // 34 // With 4 buckets and random UUIDs as keys, the amount of false evictions is around 5%. 35 // 36 // By default, the number of buckets equals the number of cpus returned from runtime.NumCPU. 37 // 38 // This struct is lock-free and intended to be used without lock. 39 type LRUStriped struct { 40 buckets []*LRU 41 name string 42 invalidateClusterEvent string 43 } 44 45 func (L LRUStriped) hashkeyMapHash(key string) uint64 { 46 return xxhash.Sum64String(key) 47 } 48 49 func (L LRUStriped) keyBucket(key string) *LRU { 50 return L.buckets[L.hashkeyMapHash(key)%uint64(len(L.buckets))] 51 } 52 53 // Purge loops through each LRU cache for purging. Since LRUStriped doesn't use any lock, 54 // each LRU bucket is purged after another one, which means that keys could still 55 // be present after a call to Purge. 56 func (L LRUStriped) Purge() error { 57 for _, lru := range L.buckets { 58 lru.Purge() // errors from purging LRU can be ignored as they always return nil 59 } 60 return nil 61 } 62 63 // Set does the same as LRU.Set 64 func (L LRUStriped) Set(key string, value interface{}) error { 65 return L.keyBucket(key).Set(key, value) 66 } 67 68 // SetWithDefaultExpiry does the same as LRU.SetWithDefaultExpiry 69 func (L LRUStriped) SetWithDefaultExpiry(key string, value interface{}) error { 70 return L.keyBucket(key).SetWithDefaultExpiry(key, value) 71 } 72 73 // SetWithExpiry does the same as LRU.SetWithExpiry 74 func (L LRUStriped) SetWithExpiry(key string, value interface{}, ttl time.Duration) error { 75 return L.keyBucket(key).SetWithExpiry(key, value, ttl) 76 } 77 78 // Get does the same as LRU.Get 79 func (L LRUStriped) Get(key string, value interface{}) error { 80 return L.keyBucket(key).Get(key, value) 81 } 82 83 // Remove does the same as LRU.Remove 84 func (L LRUStriped) Remove(key string) error { 85 return L.keyBucket(key).Remove(key) 86 } 87 88 // Keys does the same as LRU.Keys. However, because this is lock-free, keys might be 89 // inserted or removed from a previously scanned LRU cache. 90 // This is not as precise as using a single LRU instance. 91 func (L LRUStriped) Keys() ([]string, error) { 92 var keys []string 93 for _, lru := range L.buckets { 94 k, _ := lru.Keys() // Keys never returns any error 95 keys = append(keys, k...) 96 } 97 return keys, nil 98 } 99 100 // Len does the same as LRU.Len. As for LRUStriped.Keys, this call cannot be precise. 101 func (L LRUStriped) Len() (int, error) { 102 var size int 103 for _, lru := range L.buckets { 104 s, _ := lru.Len() // Len never returns any error 105 size += s 106 } 107 return size, nil 108 } 109 110 // GetInvalidateClusterEvent does the same as LRU.GetInvalidateClusterEvent 111 func (L LRUStriped) GetInvalidateClusterEvent() string { 112 return L.invalidateClusterEvent 113 } 114 115 // Name does the same as LRU.Name 116 func (L LRUStriped) Name() string { 117 return L.name 118 } 119 120 // NewLRUStriped creates a striped LRU cache using the special LRUOptions.StripedBuckets value. 121 // See LRUStriped and LRUOptions for more details. 122 // 123 // Not that in order to prevent false eviction, this LRU cache adds 10% (computation is rounded up) of the 124 // requested size to the total cache size. 125 func NewLRUStriped(opts LRUOptions) (Cache, error) { 126 if opts.StripedBuckets == 0 { 127 return nil, fmt.Errorf("number of buckets is mandatory") 128 } 129 130 if opts.Size < opts.StripedBuckets { 131 return nil, fmt.Errorf("cache size must at least be equal to the number of buckets") 132 } 133 134 // add 10% to the total size, before splitting 135 opts.Size += int(math.Ceil(float64(opts.Size) * 10.0 / 100.0)) 136 // now this is the size for each bucket 137 opts.Size = (opts.Size / opts.StripedBuckets) + (opts.Size % opts.StripedBuckets) 138 139 buckets := make([]*LRU, opts.StripedBuckets) 140 for i := 0; i < opts.StripedBuckets; i++ { 141 buckets[i] = NewLRU(opts).(*LRU) 142 } 143 144 return LRUStriped{ 145 buckets: buckets, 146 invalidateClusterEvent: opts.InvalidateClusterEvent, 147 name: opts.Name, 148 }, nil 149 }