github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/sync/map.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package sync 6 7 import ( 8 "sync/atomic" 9 "unsafe" 10 ) 11 12 // Map is a concurrent map with amortized-constant-time loads, stores, and deletes. 13 // It is safe for multiple goroutines to call a Map's methods concurrently. 14 // 15 // The zero Map is valid and empty. 16 // 17 // A Map must not be copied after first use. 18 type Map struct { 19 mu Mutex 20 21 // read contains the portion of the map's contents that are safe for 22 // concurrent access (with or without mu held). 23 // 24 // The read field itself is always safe to load, but must only be stored with 25 // mu held. 26 // 27 // Entries stored in read may be updated concurrently without mu, but updating 28 // a previously-expunged entry requires that the entry be copied to the dirty 29 // map and unexpunged with mu held. 30 read atomic.Value // readOnly 31 32 // dirty contains the portion of the map's contents that require mu to be 33 // held. To ensure that the dirty map can be promoted to the read map quickly, 34 // it also includes all of the non-expunged entries in the read map. 35 // 36 // Expunged entries are not stored in the dirty map. An expunged entry in the 37 // clean map must be unexpunged and added to the dirty map before a new value 38 // can be stored to it. 39 // 40 // If the dirty map is nil, the next write to the map will initialize it by 41 // making a shallow copy of the clean map, omitting stale entries. 42 dirty map[interface{}]*entry 43 44 // misses counts the number of loads since the read map was last updated that 45 // needed to lock mu to determine whether the key was present. 46 // 47 // Once enough misses have occurred to cover the cost of copying the dirty 48 // map, the dirty map will be promoted to the read map (in the unamended 49 // state) and the next store to the map will make a new dirty copy. 50 misses int 51 } 52 53 // readOnly is an immutable struct stored atomically in the Map.read field. 54 type readOnly struct { 55 m map[interface{}]*entry 56 amended bool // true if the dirty map contains some key not in m. 57 } 58 59 // expunged is an arbitrary pointer that marks entries which have been deleted 60 // from the dirty map. 61 var expunged = unsafe.Pointer(new(interface{})) 62 63 // An entry is a slot in the map corresponding to a particular key. 64 type entry struct { 65 // p points to the interface{} value stored for the entry. 66 // 67 // If p == nil, the entry has been deleted and m.dirty == nil. 68 // 69 // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 70 // is missing from m.dirty. 71 // 72 // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 73 // != nil, in m.dirty[key]. 74 // 75 // An entry can be deleted by atomic replacement with nil: when m.dirty is 76 // next created, it will atomically replace nil with expunged and leave 77 // m.dirty[key] unset. 78 // 79 // An entry's associated value can be updated by atomic replacement, provided 80 // p != expunged. If p == expunged, an entry's associated value can be updated 81 // only after first setting m.dirty[key] = e so that lookups using the dirty 82 // map find the entry. 83 p unsafe.Pointer // *interface{} 84 } 85 86 func newEntry(i interface{}) *entry { 87 return &entry{p: unsafe.Pointer(&i)} 88 } 89 90 // Load returns the value stored in the map for a key, or nil if no 91 // value is present. 92 // The ok result indicates whether value was found in the map. 93 func (m *Map) Load(key interface{}) (value interface{}, ok bool) { 94 read, _ := m.read.Load().(readOnly) 95 e, ok := read.m[key] 96 if !ok && read.amended { 97 m.mu.Lock() 98 // Avoid reporting a spurious miss if m.dirty got promoted while we were 99 // blocked on m.mu. (If further loads of the same key will not miss, it's 100 // not worth copying the dirty map for this key.) 101 read, _ = m.read.Load().(readOnly) 102 e, ok = read.m[key] 103 if !ok && read.amended { 104 e, ok = m.dirty[key] 105 // Regardless of whether the entry was present, record a miss: this key 106 // will take the slow path until the dirty map is promoted to the read 107 // map. 108 m.missLocked() 109 } 110 m.mu.Unlock() 111 } 112 if !ok { 113 return nil, false 114 } 115 return e.load() 116 } 117 118 func (e *entry) load() (value interface{}, ok bool) { 119 p := atomic.LoadPointer(&e.p) 120 if p == nil || p == expunged { 121 return nil, false 122 } 123 return *(*interface{})(p), true 124 } 125 126 // Store sets the value for a key. 127 func (m *Map) Store(key, value interface{}) { 128 read, _ := m.read.Load().(readOnly) 129 if e, ok := read.m[key]; ok && e.tryStore(&value) { 130 return 131 } 132 133 m.mu.Lock() 134 read, _ = m.read.Load().(readOnly) 135 if e, ok := read.m[key]; ok { 136 if e.unexpungeLocked() { 137 // The entry was previously expunged, which implies that there is a 138 // non-nil dirty map and this entry is not in it. 139 m.dirty[key] = e 140 } 141 e.storeLocked(&value) 142 } else if e, ok := m.dirty[key]; ok { 143 e.storeLocked(&value) 144 } else { 145 if !read.amended { 146 // We're adding the first new key to the dirty map. 147 // Make sure it is allocated and mark the read-only map as incomplete. 148 m.dirtyLocked() 149 m.read.Store(readOnly{m: read.m, amended: true}) 150 } 151 m.dirty[key] = newEntry(value) 152 } 153 m.mu.Unlock() 154 } 155 156 // tryStore stores a value if the entry has not been expunged. 157 // 158 // If the entry is expunged, tryStore returns false and leaves the entry 159 // unchanged. 160 func (e *entry) tryStore(i *interface{}) bool { 161 p := atomic.LoadPointer(&e.p) 162 if p == expunged { 163 return false 164 } 165 for { 166 if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 167 return true 168 } 169 p = atomic.LoadPointer(&e.p) 170 if p == expunged { 171 return false 172 } 173 } 174 } 175 176 // unexpungeLocked ensures that the entry is not marked as expunged. 177 // 178 // If the entry was previously expunged, it must be added to the dirty map 179 // before m.mu is unlocked. 180 func (e *entry) unexpungeLocked() (wasExpunged bool) { 181 return atomic.CompareAndSwapPointer(&e.p, expunged, nil) 182 } 183 184 // storeLocked unconditionally stores a value to the entry. 185 // 186 // The entry must be known not to be expunged. 187 func (e *entry) storeLocked(i *interface{}) { 188 atomic.StorePointer(&e.p, unsafe.Pointer(i)) 189 } 190 191 // LoadOrStore returns the existing value for the key if present. 192 // Otherwise, it stores and returns the given value. 193 // The loaded result is true if the value was loaded, false if stored. 194 func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 195 // Avoid locking if it's a clean hit. 196 read, _ := m.read.Load().(readOnly) 197 if e, ok := read.m[key]; ok { 198 actual, loaded, ok := e.tryLoadOrStore(value) 199 if ok { 200 return actual, loaded 201 } 202 } 203 204 m.mu.Lock() 205 read, _ = m.read.Load().(readOnly) 206 if e, ok := read.m[key]; ok { 207 if e.unexpungeLocked() { 208 m.dirty[key] = e 209 } 210 actual, loaded, _ = e.tryLoadOrStore(value) 211 } else if e, ok := m.dirty[key]; ok { 212 actual, loaded, _ = e.tryLoadOrStore(value) 213 m.missLocked() 214 } else { 215 if !read.amended { 216 // We're adding the first new key to the dirty map. 217 // Make sure it is allocated and mark the read-only map as incomplete. 218 m.dirtyLocked() 219 m.read.Store(readOnly{m: read.m, amended: true}) 220 } 221 m.dirty[key] = newEntry(value) 222 actual, loaded = value, false 223 } 224 m.mu.Unlock() 225 226 return actual, loaded 227 } 228 229 // tryLoadOrStore atomically loads or stores a value if the entry is not 230 // expunged. 231 // 232 // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 233 // returns with ok==false. 234 func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { 235 p := atomic.LoadPointer(&e.p) 236 if p == expunged { 237 return nil, false, false 238 } 239 if p != nil { 240 return *(*interface{})(p), true, true 241 } 242 243 // Copy the interface after the first load to make this method more amenable 244 // to escape analysis: if we hit the "load" path or the entry is expunged, we 245 // shouldn't bother heap-allocating. 246 ic := i 247 for { 248 if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 249 return i, false, true 250 } 251 p = atomic.LoadPointer(&e.p) 252 if p == expunged { 253 return nil, false, false 254 } 255 if p != nil { 256 return *(*interface{})(p), true, true 257 } 258 } 259 } 260 261 // Delete deletes the value for a key. 262 func (m *Map) Delete(key interface{}) { 263 read, _ := m.read.Load().(readOnly) 264 e, ok := read.m[key] 265 if !ok && read.amended { 266 m.mu.Lock() 267 read, _ = m.read.Load().(readOnly) 268 e, ok = read.m[key] 269 if !ok && read.amended { 270 delete(m.dirty, key) 271 } 272 m.mu.Unlock() 273 } 274 if ok { 275 e.delete() 276 } 277 } 278 279 func (e *entry) delete() (hadValue bool) { 280 for { 281 p := atomic.LoadPointer(&e.p) 282 if p == nil || p == expunged { 283 return false 284 } 285 if atomic.CompareAndSwapPointer(&e.p, p, nil) { 286 return true 287 } 288 } 289 } 290 291 // Range calls f sequentially for each key and value present in the map. 292 // If f returns false, range stops the iteration. 293 // 294 // Range does not necessarily correspond to any consistent snapshot of the Map's 295 // contents: no key will be visited more than once, but if the value for any key 296 // is stored or deleted concurrently, Range may reflect any mapping for that key 297 // from any point during the Range call. 298 // 299 // Range may be O(N) with the number of elements in the map even if f returns 300 // false after a constant number of calls. 301 func (m *Map) Range(f func(key, value interface{}) bool) { 302 // We need to be able to iterate over all of the keys that were already 303 // present at the start of the call to Range. 304 // If read.amended is false, then read.m satisfies that property without 305 // requiring us to hold m.mu for a long time. 306 read, _ := m.read.Load().(readOnly) 307 if read.amended { 308 // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 309 // (assuming the caller does not break out early), so a call to Range 310 // amortizes an entire copy of the map: we can promote the dirty copy 311 // immediately! 312 m.mu.Lock() 313 read, _ = m.read.Load().(readOnly) 314 if read.amended { 315 read = readOnly{m: m.dirty} 316 m.read.Store(read) 317 m.dirty = nil 318 m.misses = 0 319 } 320 m.mu.Unlock() 321 } 322 323 for k, e := range read.m { 324 v, ok := e.load() 325 if !ok { 326 continue 327 } 328 if !f(k, v) { 329 break 330 } 331 } 332 } 333 334 func (m *Map) missLocked() { 335 m.misses++ 336 if m.misses < len(m.dirty) { 337 return 338 } 339 m.read.Store(readOnly{m: m.dirty}) 340 m.dirty = nil 341 m.misses = 0 342 } 343 344 func (m *Map) dirtyLocked() { 345 if m.dirty != nil { 346 return 347 } 348 349 read, _ := m.read.Load().(readOnly) 350 m.dirty = make(map[interface{}]*entry, len(read.m)) 351 for k, e := range read.m { 352 if !e.tryExpungeLocked() { 353 m.dirty[k] = e 354 } 355 } 356 } 357 358 func (e *entry) tryExpungeLocked() (isExpunged bool) { 359 p := atomic.LoadPointer(&e.p) 360 for p == nil { 361 if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { 362 return true 363 } 364 p = atomic.LoadPointer(&e.p) 365 } 366 return p == expunged 367 }