github.com/AntonOrnatskyi/goproxy@v0.0.0-20190205095733-4526a9fa18b4/utils/mapx/map.go (about) 1 package mapx 2 3 import ( 4 "encoding/json" 5 "fmt" 6 "runtime/debug" 7 "sync" 8 ) 9 10 var SHARD_COUNT = 32 11 12 // A "thread" safe map of type string:Anything. 13 // To avoid lock bottlenecks this map is dived to several (SHARD_COUNT) map shards. 14 type ConcurrentMap []*ConcurrentMapShared 15 16 // A "thread" safe string to anything map. 17 type ConcurrentMapShared struct { 18 items map[string]interface{} 19 sync.RWMutex // Read Write mutex, guards access to internal map. 20 } 21 22 // Creates a new concurrent map. 23 func NewConcurrentMap() ConcurrentMap { 24 m := make(ConcurrentMap, SHARD_COUNT) 25 for i := 0; i < SHARD_COUNT; i++ { 26 m[i] = &ConcurrentMapShared{items: make(map[string]interface{})} 27 } 28 return m 29 } 30 31 // Returns shard under given key 32 func (m ConcurrentMap) GetShard(key string) *ConcurrentMapShared { 33 return m[uint(fnv32(key))%uint(SHARD_COUNT)] 34 } 35 36 func (m ConcurrentMap) MSet(data map[string]interface{}) { 37 for key, value := range data { 38 shard := m.GetShard(key) 39 shard.Lock() 40 shard.items[key] = value 41 shard.Unlock() 42 } 43 } 44 45 // Sets the given value under the specified key. 46 func (m ConcurrentMap) Set(key string, value interface{}) { 47 // Get map shard. 48 shard := m.GetShard(key) 49 shard.Lock() 50 shard.items[key] = value 51 shard.Unlock() 52 } 53 54 // Callback to return new element to be inserted into the map 55 // It is called while lock is held, therefore it MUST NOT 56 // try to access other keys in same map, as it can lead to deadlock since 57 // Go sync.RWLock is not reentrant 58 type UpsertCb func(exist bool, valueInMap interface{}, newValue interface{}) interface{} 59 60 // Insert or Update - updates existing element or inserts a new one using UpsertCb 61 func (m ConcurrentMap) Upsert(key string, value interface{}, cb UpsertCb) (res interface{}) { 62 shard := m.GetShard(key) 63 shard.Lock() 64 v, ok := shard.items[key] 65 res = cb(ok, v, value) 66 shard.items[key] = res 67 shard.Unlock() 68 return res 69 } 70 71 // Sets the given value under the specified key if no value was associated with it. 72 func (m ConcurrentMap) SetIfAbsent(key string, value interface{}) bool { 73 // Get map shard. 74 shard := m.GetShard(key) 75 shard.Lock() 76 _, ok := shard.items[key] 77 if !ok { 78 shard.items[key] = value 79 } 80 shard.Unlock() 81 return !ok 82 } 83 84 // Retrieves an element from map under given key. 85 func (m ConcurrentMap) Get(key string) (interface{}, bool) { 86 // Get shard 87 shard := m.GetShard(key) 88 shard.RLock() 89 // Get item from shard. 90 val, ok := shard.items[key] 91 shard.RUnlock() 92 return val, ok 93 } 94 95 // Returns the number of elements within the map. 96 func (m ConcurrentMap) Count() int { 97 count := 0 98 for i := 0; i < SHARD_COUNT; i++ { 99 shard := m[i] 100 shard.RLock() 101 count += len(shard.items) 102 shard.RUnlock() 103 } 104 return count 105 } 106 107 // Looks up an item under specified key 108 func (m ConcurrentMap) Has(key string) bool { 109 // Get shard 110 shard := m.GetShard(key) 111 shard.RLock() 112 // See if element is within shard. 113 _, ok := shard.items[key] 114 shard.RUnlock() 115 return ok 116 } 117 118 // Removes an element from the map. 119 func (m ConcurrentMap) Remove(key string) { 120 // Try to get shard. 121 shard := m.GetShard(key) 122 shard.Lock() 123 delete(shard.items, key) 124 shard.Unlock() 125 } 126 127 // Removes an element from the map and returns it 128 func (m ConcurrentMap) Pop(key string) (v interface{}, exists bool) { 129 // Try to get shard. 130 shard := m.GetShard(key) 131 shard.Lock() 132 v, exists = shard.items[key] 133 delete(shard.items, key) 134 shard.Unlock() 135 return v, exists 136 } 137 138 // Checks if map is empty. 139 func (m ConcurrentMap) IsEmpty() bool { 140 return m.Count() == 0 141 } 142 143 // Used by the Iter & IterBuffered functions to wrap two variables together over a channel, 144 type Tuple struct { 145 Key string 146 Val interface{} 147 } 148 149 // Returns an iterator which could be used in a for range loop. 150 // 151 // Deprecated: using IterBuffered() will get a better performence 152 func (m ConcurrentMap) Iter() <-chan Tuple { 153 chans := snapshot(m) 154 ch := make(chan Tuple) 155 go fanIn(chans, ch) 156 return ch 157 } 158 159 // Returns a buffered iterator which could be used in a for range loop. 160 func (m ConcurrentMap) IterBuffered() <-chan Tuple { 161 chans := snapshot(m) 162 total := 0 163 for _, c := range chans { 164 total += cap(c) 165 } 166 ch := make(chan Tuple, total) 167 go fanIn(chans, ch) 168 return ch 169 } 170 171 // Returns a array of channels that contains elements in each shard, 172 // which likely takes a snapshot of `m`. 173 // It returns once the size of each buffered channel is determined, 174 // before all the channels are populated using goroutines. 175 func snapshot(m ConcurrentMap) (chans []chan Tuple) { 176 chans = make([]chan Tuple, SHARD_COUNT) 177 wg := sync.WaitGroup{} 178 wg.Add(SHARD_COUNT) 179 // Foreach shard. 180 for index, shard := range m { 181 go func(index int, shard *ConcurrentMapShared) { 182 // Foreach key, value pair. 183 shard.RLock() 184 chans[index] = make(chan Tuple, len(shard.items)) 185 wg.Done() 186 for key, val := range shard.items { 187 chans[index] <- Tuple{key, val} 188 } 189 shard.RUnlock() 190 close(chans[index]) 191 }(index, shard) 192 } 193 wg.Wait() 194 return chans 195 } 196 197 // fanIn reads elements from channels `chans` into channel `out` 198 func fanIn(chans []chan Tuple, out chan Tuple) { 199 defer func() { 200 if e := recover(); e != nil { 201 fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack())) 202 } 203 }() 204 wg := sync.WaitGroup{} 205 wg.Add(len(chans)) 206 for _, ch := range chans { 207 go func(ch chan Tuple) { 208 defer func() { 209 if e := recover(); e != nil { 210 fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack())) 211 } 212 }() 213 for t := range ch { 214 out <- t 215 } 216 wg.Done() 217 }(ch) 218 } 219 wg.Wait() 220 close(out) 221 } 222 223 // Returns all items as map[string]interface{} 224 func (m ConcurrentMap) Items() map[string]interface{} { 225 tmp := make(map[string]interface{}) 226 227 // Insert items to temporary map. 228 for item := range m.IterBuffered() { 229 tmp[item.Key] = item.Val 230 } 231 232 return tmp 233 } 234 235 // Iterator callback,called for every key,value found in 236 // maps. RLock is held for all calls for a given shard 237 // therefore callback sess consistent view of a shard, 238 // but not across the shards 239 type IterCb func(key string, v interface{}) 240 241 // Callback based iterator, cheapest way to read 242 // all elements in a map. 243 func (m ConcurrentMap) IterCb(fn IterCb) { 244 for idx := range m { 245 shard := (m)[idx] 246 shard.RLock() 247 for key, value := range shard.items { 248 fn(key, value) 249 } 250 shard.RUnlock() 251 } 252 } 253 254 // Return all keys as []string 255 func (m ConcurrentMap) Keys() []string { 256 count := m.Count() 257 ch := make(chan string, count) 258 go func() { 259 defer func() { 260 if e := recover(); e != nil { 261 fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack())) 262 } 263 }() 264 // Foreach shard. 265 wg := sync.WaitGroup{} 266 wg.Add(SHARD_COUNT) 267 for _, shard := range m { 268 go func(shard *ConcurrentMapShared) { 269 defer func() { 270 if e := recover(); e != nil { 271 fmt.Printf("crashed, err: %s\nstack:%s", e, string(debug.Stack())) 272 } 273 }() 274 // Foreach key, value pair. 275 shard.RLock() 276 for key := range shard.items { 277 ch <- key 278 } 279 shard.RUnlock() 280 wg.Done() 281 }(shard) 282 } 283 wg.Wait() 284 close(ch) 285 }() 286 287 // Generate keys 288 keys := make([]string, 0, count) 289 for k := range ch { 290 keys = append(keys, k) 291 } 292 return keys 293 } 294 295 //Reviles ConcurrentMap "private" variables to json marshal. 296 func (m ConcurrentMap) MarshalJSON() ([]byte, error) { 297 // Create a temporary map, which will hold all item spread across shards. 298 tmp := make(map[string]interface{}) 299 300 // Insert items to temporary map. 301 for item := range m.IterBuffered() { 302 tmp[item.Key] = item.Val 303 } 304 return json.Marshal(tmp) 305 } 306 307 func fnv32(key string) uint32 { 308 hash := uint32(2166136261) 309 const prime32 = uint32(16777619) 310 for i := 0; i < len(key); i++ { 311 hash *= prime32 312 hash ^= uint32(key[i]) 313 } 314 return hash 315 } 316 317 // Concurrent map uses Interface{} as its value, therefor JSON Unmarshal 318 // will probably won't know which to type to unmarshal into, in such case 319 // we'll end up with a value of type map[string]interface{}, In most cases this isn't 320 // out value type, this is why we've decided to remove this functionality. 321 322 // func (m *ConcurrentMap) UnmarshalJSON(b []byte) (err error) { 323 // // Reverse process of Marshal. 324 325 // tmp := make(map[string]interface{}) 326 327 // // Unmarshal into a single map. 328 // if err := json.Unmarshal(b, &tmp); err != nil { 329 // return nil 330 // } 331 332 // // foreach key,value pair in temporary map insert into our concurrent map. 333 // for key, val := range tmp { 334 // m.Set(key, val) 335 // } 336 // return nil 337 // }