github.com/min1324/cmap@v1.0.3-0.20220418125848-74e72bbe3be4/map.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package cmap 6 7 import ( 8 "sync" 9 "sync/atomic" 10 "unsafe" 11 ) 12 13 // Map is like a Go map[interface{}]interface{} but is safe for concurrent use 14 // by multiple goroutines without additional locking or coordination. 15 // Loads, stores, and deletes run in amortized constant time. 16 // 17 // The Map type is specialized. Most code should use a plain Go map instead, 18 // with separate locking or coordination, for better type safety and to make it 19 // easier to maintain other invariants along with the map content. 20 // 21 // The Map type is optimized for two common use cases: (1) when the entry for a given 22 // key is only ever written once but read many times, as in caches that only grow, 23 // or (2) when multiple goroutines read, write, and overwrite entries for disjoint 24 // sets of keys. In these two cases, use of a Map may significantly reduce lock 25 // contention compared to a Go map paired with a separate Mutex or RWMutex. 26 // 27 // The zero Map is empty and ready for use. A Map must not be copied after first use. 28 type Map struct { 29 mu sync.Mutex 30 31 // read contains the portion of the map's contents that are safe for 32 // concurrent access (with or without mu held). 33 // 34 // The read field itself is always safe to load, but must only be stored with 35 // mu held. 36 // 37 // Entries stored in read may be updated concurrently without mu, but updating 38 // a previously-expunged entry requires that the entry be copied to the dirty 39 // map and unexpunged with mu held. 40 read atomic.Value // readOnly 41 42 // dirty contains the portion of the map's contents that require mu to be 43 // held. To ensure that the dirty map can be promoted to the read map quickly, 44 // it also includes all of the non-expunged entries in the read map. 45 // 46 // Expunged entries are not stored in the dirty map. An expunged entry in the 47 // clean map must be unexpunged and added to the dirty map before a new value 48 // can be stored to it. 49 // 50 // If the dirty map is nil, the next write to the map will initialize it by 51 // making a shallow copy of the clean map, omitting stale entries. 52 dirty map[interface{}]*entry 53 54 // misses counts the number of loads since the read map was last updated that 55 // needed to lock mu to determine whether the key was present. 56 // 57 // Once enough misses have occurred to cover the cost of copying the dirty 58 // map, the dirty map will be promoted to the read map (in the unamended 59 // state) and the next store to the map will make a new dirty copy. 60 misses int 61 62 count int64 63 } 64 65 // readOnly is an immutable struct stored atomically in the Map.read field. 66 type readOnly struct { 67 m map[interface{}]*entry 68 amended bool // true if the dirty map contains some key not in m. 69 } 70 71 // expunged is an arbitrary pointer that marks entries which have been deleted 72 // from the dirty map. 73 var expunged = unsafe.Pointer(new(interface{})) 74 75 // An entry is a slot in the map corresponding to a particular key. 76 type entry struct { 77 // p points to the interface{} value stored for the entry. 78 // 79 // If p == nil, the entry has been deleted and m.dirty == nil. 80 // 81 // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 82 // is missing from m.dirty. 83 // 84 // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 85 // != nil, in m.dirty[key]. 86 // 87 // An entry can be deleted by atomic replacement with nil: when m.dirty is 88 // next created, it will atomically replace nil with expunged and leave 89 // m.dirty[key] unset. 90 // 91 // An entry's associated value can be updated by atomic replacement, provided 92 // p != expunged. If p == expunged, an entry's associated value can be updated 93 // only after first setting m.dirty[key] = e so that lookups using the dirty 94 // map find the entry. 95 p unsafe.Pointer // *interface{} 96 } 97 98 func newEntry(i interface{}) *entry { 99 return &entry{p: unsafe.Pointer(&i)} 100 } 101 102 // Load returns the value stored in the map for a key, or nil if no 103 // value is present. 104 // The ok result indicates whether value was found in the map. 105 func (m *Map) Load(key interface{}) (value interface{}, ok bool) { 106 read, _ := m.read.Load().(readOnly) 107 e, ok := read.m[key] 108 if !ok && read.amended { 109 m.mu.Lock() 110 // Avoid reporting a spurious miss if m.dirty got promoted while we were 111 // blocked on m.mu. (If further loads of the same key will not miss, it's 112 // not worth copying the dirty map for this key.) 113 read, _ = m.read.Load().(readOnly) 114 e, ok = read.m[key] 115 if !ok && read.amended { 116 e, ok = m.dirty[key] 117 // Regardless of whether the entry was present, record a miss: this key 118 // will take the slow path until the dirty map is promoted to the read 119 // map. 120 m.missLocked() 121 } 122 m.mu.Unlock() 123 } 124 if !ok { 125 return nil, false 126 } 127 return e.load() 128 } 129 130 func (e *entry) load() (value interface{}, ok bool) { 131 p := atomic.LoadPointer(&e.p) 132 if p == nil || p == expunged { 133 return nil, false 134 } 135 return *(*interface{})(p), true 136 } 137 138 // Store sets the value for a key. 139 func (m *Map) Store(key, value interface{}) { 140 read, _ := m.read.Load().(readOnly) 141 if e, ok := read.m[key]; ok && e.tryStore(&value) { 142 return 143 } 144 145 m.mu.Lock() 146 read, _ = m.read.Load().(readOnly) 147 if e, ok := read.m[key]; ok { 148 if e.unexpungeLocked() { 149 // The entry was previously expunged, which implies that there is a 150 // non-nil dirty map and this entry is not in it. 151 m.dirty[key] = e 152 atomic.AddInt64(&m.count, 1) 153 } 154 e.storeLocked(&value) 155 } else if e, ok := m.dirty[key]; ok { 156 e.storeLocked(&value) 157 } else { 158 if !read.amended { 159 // We're adding the first new key to the dirty map. 160 // Make sure it is allocated and mark the read-only map as incomplete. 161 m.dirtyLocked() 162 m.read.Store(readOnly{m: read.m, amended: true}) 163 } 164 m.dirty[key] = newEntry(value) 165 atomic.AddInt64(&m.count, 1) 166 } 167 m.mu.Unlock() 168 } 169 170 // tryStore stores a value if the entry has not been expunged. 171 // 172 // If the entry is expunged, tryStore returns false and leaves the entry 173 // unchanged. 174 func (e *entry) tryStore(i *interface{}) bool { 175 for { 176 p := atomic.LoadPointer(&e.p) 177 if p == expunged { 178 return false 179 } 180 if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 181 return true 182 } 183 } 184 } 185 186 // unexpungeLocked ensures that the entry is not marked as expunged. 187 // 188 // If the entry was previously expunged, it must be added to the dirty map 189 // before m.mu is unlocked. 190 func (e *entry) unexpungeLocked() (wasExpunged bool) { 191 return atomic.CompareAndSwapPointer(&e.p, expunged, nil) 192 } 193 194 // storeLocked unconditionally stores a value to the entry. 195 // 196 // The entry must be known not to be expunged. 197 func (e *entry) storeLocked(i *interface{}) { 198 atomic.StorePointer(&e.p, unsafe.Pointer(i)) 199 } 200 201 // LoadOrStore returns the existing value for the key if present. 202 // Otherwise, it stores and returns the given value. 203 // The loaded result is true if the value was loaded, false if stored. 204 func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 205 // Avoid locking if it's a clean hit. 206 read, _ := m.read.Load().(readOnly) 207 if e, ok := read.m[key]; ok { 208 actual, loaded, ok := e.tryLoadOrStore(value) 209 if ok { 210 return actual, loaded 211 } 212 } 213 214 m.mu.Lock() 215 read, _ = m.read.Load().(readOnly) 216 if e, ok := read.m[key]; ok { 217 if e.unexpungeLocked() { 218 m.dirty[key] = e 219 atomic.AddInt64(&m.count, 1) 220 } 221 actual, loaded, _ = e.tryLoadOrStore(value) 222 } else if e, ok := m.dirty[key]; ok { 223 actual, loaded, _ = e.tryLoadOrStore(value) 224 m.missLocked() 225 } else { 226 if !read.amended { 227 // We're adding the first new key to the dirty map. 228 // Make sure it is allocated and mark the read-only map as incomplete. 229 m.dirtyLocked() 230 m.read.Store(readOnly{m: read.m, amended: true}) 231 } 232 m.dirty[key] = newEntry(value) 233 actual, loaded = value, false 234 atomic.AddInt64(&m.count, 1) 235 } 236 m.mu.Unlock() 237 238 return actual, loaded 239 } 240 241 // tryLoadOrStore atomically loads or stores a value if the entry is not 242 // expunged. 243 // 244 // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 245 // returns with ok==false. 246 func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { 247 p := atomic.LoadPointer(&e.p) 248 if p == expunged { 249 return nil, false, false 250 } 251 if p != nil { 252 return *(*interface{})(p), true, true 253 } 254 255 // Copy the interface after the first load to make this method more amenable 256 // to escape analysis: if we hit the "load" path or the entry is expunged, we 257 // shouldn't bother heap-allocating. 258 ic := i 259 for { 260 if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 261 return i, false, true 262 } 263 p = atomic.LoadPointer(&e.p) 264 if p == expunged { 265 return nil, false, false 266 } 267 if p != nil { 268 return *(*interface{})(p), true, true 269 } 270 } 271 } 272 273 // LoadAndDelete deletes the value for a key, returning the previous value if any. 274 // The loaded result reports whether the key was present. 275 func (m *Map) LoadAndDelete(key interface{}) (value interface{}, loaded bool) { 276 read, _ := m.read.Load().(readOnly) 277 e, ok := read.m[key] 278 if !ok && read.amended { 279 m.mu.Lock() 280 read, _ = m.read.Load().(readOnly) 281 e, ok = read.m[key] 282 if !ok && read.amended { 283 e, ok = m.dirty[key] 284 delete(m.dirty, key) 285 atomic.AddInt64(&m.count, ^int64(0)) 286 // Regardless of whether the entry was present, record a miss: this key 287 // will take the slow path until the dirty map is promoted to the read 288 // map. 289 m.missLocked() 290 } 291 m.mu.Unlock() 292 } 293 if ok { 294 return e.delete() 295 } 296 return nil, false 297 } 298 299 // Delete deletes the value for a key. 300 func (m *Map) Delete(key interface{}) { 301 m.LoadAndDelete(key) 302 } 303 304 func (e *entry) delete() (value interface{}, ok bool) { 305 for { 306 p := atomic.LoadPointer(&e.p) 307 if p == nil || p == expunged { 308 return nil, false 309 } 310 if atomic.CompareAndSwapPointer(&e.p, p, nil) { 311 return *(*interface{})(p), true 312 } 313 } 314 } 315 316 // Range calls f sequentially for each key and value present in the map. 317 // If f returns false, range stops the iteration. 318 // 319 // Range does not necessarily correspond to any consistent snapshot of the Map's 320 // contents: no key will be visited more than once, but if the value for any key 321 // is stored or deleted concurrently, Range may reflect any mapping for that key 322 // from any point during the Range call. 323 // 324 // Range may be O(N) with the number of elements in the map even if f returns 325 // false after a constant number of calls. 326 func (m *Map) Range(f func(key, value interface{}) bool) { 327 // We need to be able to iterate over all of the keys that were already 328 // present at the start of the call to Range. 329 // If read.amended is false, then read.m satisfies that property without 330 // requiring us to hold m.mu for a long time. 331 read, _ := m.read.Load().(readOnly) 332 if read.amended { 333 // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 334 // (assuming the caller does not break out early), so a call to Range 335 // amortizes an entire copy of the map: we can promote the dirty copy 336 // immediately! 337 m.mu.Lock() 338 read, _ = m.read.Load().(readOnly) 339 if read.amended { 340 read = readOnly{m: m.dirty} 341 m.read.Store(read) 342 m.dirty = nil 343 m.misses = 0 344 } 345 m.mu.Unlock() 346 } 347 348 for k, e := range read.m { 349 v, ok := e.load() 350 if !ok { 351 continue 352 } 353 if !f(k, v) { 354 break 355 } 356 } 357 } 358 359 // Count returns the number of elements within the map. 360 func (m *Map) Count() int64 { 361 return atomic.LoadInt64(&m.count) 362 } 363 364 func (m *Map) missLocked() { 365 m.misses++ 366 if m.misses < len(m.dirty) { 367 return 368 } 369 m.read.Store(readOnly{m: m.dirty}) 370 m.dirty = nil 371 m.misses = 0 372 } 373 374 func (m *Map) dirtyLocked() { 375 if m.dirty != nil { 376 return 377 } 378 379 read, _ := m.read.Load().(readOnly) 380 m.dirty = make(map[interface{}]*entry, len(read.m)) 381 for k, e := range read.m { 382 if !e.tryExpungeLocked() { 383 m.dirty[k] = e 384 } 385 } 386 } 387 388 func (e *entry) tryExpungeLocked() (isExpunged bool) { 389 p := atomic.LoadPointer(&e.p) 390 for p == nil { 391 if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { 392 return true 393 } 394 p = atomic.LoadPointer(&e.p) 395 } 396 return p == expunged 397 }