github.com/Azareal/Gosora@v0.0.0-20210729070923-553e66b59003/common/user_cache.go (about) 1 package common 2 3 import ( 4 "sync" 5 "sync/atomic" 6 ) 7 8 // UserCache is an interface which spits out users from a fast cache rather than the database, whether from memory or from an application like Redis. Users may not be present in the cache but may be in the database 9 type UserCache interface { 10 DeallocOverflow(evictPriority bool) (evicted int) // May cause thread contention, looks for items to evict 11 Get(id int) (*User, error) 12 Getn(id int) *User 13 GetUnsafe(id int) (*User, error) 14 BulkGet(ids []int) (list []*User) 15 Set(item *User) error 16 Add(item *User) error 17 AddUnsafe(item *User) error 18 Remove(id int) error 19 RemoveUnsafe(id int) error 20 Flush() 21 Length() int 22 SetCapacity(cap int) 23 GetCapacity() int 24 } 25 26 // MemoryUserCache stores and pulls users out of the current process' memory 27 type MemoryUserCache struct { 28 items map[int]*User // TODO: Shard this into two? 29 length int64 30 capacity int 31 32 sync.RWMutex 33 } 34 35 // NewMemoryUserCache gives you a new instance of MemoryUserCache 36 func NewMemoryUserCache(cap int) *MemoryUserCache { 37 return &MemoryUserCache{ 38 items: make(map[int]*User), 39 capacity: cap, 40 } 41 } 42 43 // TODO: Avoid deallocating topic list users 44 func (s *MemoryUserCache) DeallocOverflow(evictPriority bool) (evicted int) { 45 toEvict := make([]int, 10) 46 evIndex := 0 47 s.RLock() 48 for _, user := range s.items { 49 if /*user.LastActiveAt < lastActiveCutoff && */ user.Score == 0 && !user.IsMod { 50 if EnableWebsockets && WsHub.HasUser(user.ID) { 51 continue 52 } 53 toEvict[evIndex] = user.ID 54 evIndex++ 55 if evIndex == 10 { 56 break 57 } 58 } 59 } 60 s.RUnlock() 61 62 // Clear some of the less active users now with a bit more aggressiveness 63 if evIndex == 0 && evictPriority { 64 toEvict = make([]int, 20) 65 s.RLock() 66 for _, user := range s.items { 67 if user.Score < 100 && !user.IsMod { 68 if EnableWebsockets && WsHub.HasUser(user.ID) { 69 continue 70 } 71 toEvict[evIndex] = user.ID 72 evIndex++ 73 if evIndex == 20 { 74 break 75 } 76 } 77 } 78 s.RUnlock() 79 } 80 81 // Remove zero IDs from the evictable list, so we don't waste precious cycles locked for those 82 lastZero := -1 83 for i, uid := range toEvict { 84 if uid == 0 { 85 lastZero = i 86 } 87 } 88 if lastZero != -1 { 89 toEvict = toEvict[:lastZero] 90 } 91 92 s.BulkRemove(toEvict) 93 return len(toEvict) 94 } 95 96 // Get fetches a user by ID. Returns ErrNoRows if not present. 97 func (s *MemoryUserCache) Get(id int) (*User, error) { 98 s.RLock() 99 item := s.items[id] 100 s.RUnlock() 101 if item == nil { 102 return item, ErrNoRows 103 } 104 return item, nil 105 } 106 107 func (s *MemoryUserCache) Getn(id int) *User { 108 s.RLock() 109 item := s.items[id] 110 s.RUnlock() 111 return item 112 } 113 114 // BulkGet fetches multiple users by their IDs. Indices without users will be set to nil, so make sure you check for those, we might want to change this behaviour to make it less confusing. 115 func (s *MemoryUserCache) BulkGet(ids []int) (list []*User) { 116 list = make([]*User, len(ids)) 117 s.RLock() 118 for i, id := range ids { 119 list[i] = s.items[id] 120 } 121 s.RUnlock() 122 return list 123 } 124 125 // GetUnsafe fetches a user by ID. Returns ErrNoRows if not present. THIS METHOD IS NOT THREAD-SAFE. 126 func (s *MemoryUserCache) GetUnsafe(id int) (*User, error) { 127 item, ok := s.items[id] 128 if ok { 129 return item, nil 130 } 131 return item, ErrNoRows 132 } 133 134 // Set overwrites the value of a user in the cache, whether it's present or not. May return a capacity overflow error. 135 func (s *MemoryUserCache) Set(item *User) error { 136 s.Lock() 137 user, ok := s.items[item.ID] 138 if ok { 139 s.Unlock() 140 *user = *item 141 } else if int(s.length) >= s.capacity { 142 s.Unlock() 143 return ErrStoreCapacityOverflow 144 } else { 145 s.items[item.ID] = item 146 s.Unlock() 147 atomic.AddInt64(&s.length, 1) 148 } 149 return nil 150 } 151 152 // Add adds a user to the cache, similar to Set, but it's only intended for new items. This method might be deprecated in the near future, use Set. May return a capacity overflow error. 153 // ? Is this redundant if we have Set? Are the efficiency wins worth this? Is this even used? 154 func (s *MemoryUserCache) Add(item *User) error { 155 s.Lock() 156 if int(s.length) >= s.capacity { 157 s.Unlock() 158 return ErrStoreCapacityOverflow 159 } 160 s.items[item.ID] = item 161 s.length = int64(len(s.items)) 162 s.Unlock() 163 return nil 164 } 165 166 // AddUnsafe is the unsafe version of Add. May return a capacity overflow error. THIS METHOD IS NOT THREAD-SAFE. 167 func (s *MemoryUserCache) AddUnsafe(item *User) error { 168 if int(s.length) >= s.capacity { 169 return ErrStoreCapacityOverflow 170 } 171 s.items[item.ID] = item 172 s.length = int64(len(s.items)) 173 return nil 174 } 175 176 // Remove removes a user from the cache by ID, if they exist. Returns ErrNoRows if no items exist. 177 func (s *MemoryUserCache) Remove(id int) error { 178 s.Lock() 179 _, ok := s.items[id] 180 if !ok { 181 s.Unlock() 182 return ErrNoRows 183 } 184 delete(s.items, id) 185 s.Unlock() 186 atomic.AddInt64(&s.length, -1) 187 return nil 188 } 189 190 // RemoveUnsafe is the unsafe version of Remove. THIS METHOD IS NOT THREAD-SAFE. 191 func (s *MemoryUserCache) RemoveUnsafe(id int) error { 192 _, ok := s.items[id] 193 if !ok { 194 return ErrNoRows 195 } 196 delete(s.items, id) 197 atomic.AddInt64(&s.length, -1) 198 return nil 199 } 200 201 func (s *MemoryUserCache) BulkRemove(ids []int) { 202 var rCount int64 203 s.Lock() 204 for _, id := range ids { 205 _, ok := s.items[id] 206 if ok { 207 delete(s.items, id) 208 rCount++ 209 } 210 } 211 s.Unlock() 212 atomic.AddInt64(&s.length, -rCount) 213 } 214 215 // Flush removes all the users from the cache, useful for tests. 216 func (s *MemoryUserCache) Flush() { 217 s.Lock() 218 s.items = make(map[int]*User) 219 s.length = 0 220 s.Unlock() 221 } 222 223 // ! Is this concurrent? 224 // Length returns the number of users in the memory cache 225 func (s *MemoryUserCache) Length() int { 226 return int(s.length) 227 } 228 229 // SetCapacity sets the maximum number of users which this cache can hold 230 func (s *MemoryUserCache) SetCapacity(cap int) { 231 // Ints are moved in a single instruction, so this should be thread-safe 232 s.capacity = cap 233 } 234 235 // GetCapacity returns the maximum number of users this cache can hold 236 func (s *MemoryUserCache) GetCapacity() int { 237 return s.capacity 238 }