github.com/hlts2/go@v0.0.0-20170904000733-812b34efaed8/src/sync/map.go (about) 1 // Copyright 2016 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package sync 6 7 import ( 8 "sync/atomic" 9 "unsafe" 10 ) 11 12 // Map is a concurrent map with amortized-constant-time loads, stores, and deletes. 13 // It is safe for multiple goroutines to call a Map's methods concurrently. 14 // 15 // It is optimized for use in concurrent loops with keys that are 16 // stable over time, and either few steady-state stores, or stores 17 // localized to one goroutine per key. 18 // 19 // For use cases that do not share these attributes, it will likely have 20 // comparable or worse performance and worse type safety than an ordinary 21 // map paired with a read-write mutex. 22 // 23 // The zero Map is valid and empty. 24 // 25 // A Map must not be copied after first use. 26 type Map struct { 27 mu Mutex 28 29 // read contains the portion of the map's contents that are safe for 30 // concurrent access (with or without mu held). 31 // 32 // The read field itself is always safe to load, but must only be stored with 33 // mu held. 34 // 35 // Entries stored in read may be updated concurrently without mu, but updating 36 // a previously-expunged entry requires that the entry be copied to the dirty 37 // map and unexpunged with mu held. 38 read atomic.Value // readOnly 39 40 // dirty contains the portion of the map's contents that require mu to be 41 // held. To ensure that the dirty map can be promoted to the read map quickly, 42 // it also includes all of the non-expunged entries in the read map. 43 // 44 // Expunged entries are not stored in the dirty map. An expunged entry in the 45 // clean map must be unexpunged and added to the dirty map before a new value 46 // can be stored to it. 47 // 48 // If the dirty map is nil, the next write to the map will initialize it by 49 // making a shallow copy of the clean map, omitting stale entries. 50 dirty map[interface{}]*entry 51 52 // misses counts the number of loads since the read map was last updated that 53 // needed to lock mu to determine whether the key was present. 54 // 55 // Once enough misses have occurred to cover the cost of copying the dirty 56 // map, the dirty map will be promoted to the read map (in the unamended 57 // state) and the next store to the map will make a new dirty copy. 58 misses int 59 } 60 61 // readOnly is an immutable struct stored atomically in the Map.read field. 62 type readOnly struct { 63 m map[interface{}]*entry 64 amended bool // true if the dirty map contains some key not in m. 65 } 66 67 // expunged is an arbitrary pointer that marks entries which have been deleted 68 // from the dirty map. 69 var expunged = unsafe.Pointer(new(interface{})) 70 71 // An entry is a slot in the map corresponding to a particular key. 72 type entry struct { 73 // p points to the interface{} value stored for the entry. 74 // 75 // If p == nil, the entry has been deleted and m.dirty == nil. 76 // 77 // If p == expunged, the entry has been deleted, m.dirty != nil, and the entry 78 // is missing from m.dirty. 79 // 80 // Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty 81 // != nil, in m.dirty[key]. 82 // 83 // An entry can be deleted by atomic replacement with nil: when m.dirty is 84 // next created, it will atomically replace nil with expunged and leave 85 // m.dirty[key] unset. 86 // 87 // An entry's associated value can be updated by atomic replacement, provided 88 // p != expunged. If p == expunged, an entry's associated value can be updated 89 // only after first setting m.dirty[key] = e so that lookups using the dirty 90 // map find the entry. 91 p unsafe.Pointer // *interface{} 92 } 93 94 func newEntry(i interface{}) *entry { 95 return &entry{p: unsafe.Pointer(&i)} 96 } 97 98 // Load returns the value stored in the map for a key, or nil if no 99 // value is present. 100 // The ok result indicates whether value was found in the map. 101 func (m *Map) Load(key interface{}) (value interface{}, ok bool) { 102 read, _ := m.read.Load().(readOnly) 103 e, ok := read.m[key] 104 if !ok && read.amended { 105 m.mu.Lock() 106 // Avoid reporting a spurious miss if m.dirty got promoted while we were 107 // blocked on m.mu. (If further loads of the same key will not miss, it's 108 // not worth copying the dirty map for this key.) 109 read, _ = m.read.Load().(readOnly) 110 e, ok = read.m[key] 111 if !ok && read.amended { 112 e, ok = m.dirty[key] 113 // Regardless of whether the entry was present, record a miss: this key 114 // will take the slow path until the dirty map is promoted to the read 115 // map. 116 m.missLocked() 117 } 118 m.mu.Unlock() 119 } 120 if !ok { 121 return nil, false 122 } 123 return e.load() 124 } 125 126 func (e *entry) load() (value interface{}, ok bool) { 127 p := atomic.LoadPointer(&e.p) 128 if p == nil || p == expunged { 129 return nil, false 130 } 131 return *(*interface{})(p), true 132 } 133 134 // Store sets the value for a key. 135 func (m *Map) Store(key, value interface{}) { 136 read, _ := m.read.Load().(readOnly) 137 if e, ok := read.m[key]; ok && e.tryStore(&value) { 138 return 139 } 140 141 m.mu.Lock() 142 read, _ = m.read.Load().(readOnly) 143 if e, ok := read.m[key]; ok { 144 if e.unexpungeLocked() { 145 // The entry was previously expunged, which implies that there is a 146 // non-nil dirty map and this entry is not in it. 147 m.dirty[key] = e 148 } 149 e.storeLocked(&value) 150 } else if e, ok := m.dirty[key]; ok { 151 e.storeLocked(&value) 152 } else { 153 if !read.amended { 154 // We're adding the first new key to the dirty map. 155 // Make sure it is allocated and mark the read-only map as incomplete. 156 m.dirtyLocked() 157 m.read.Store(readOnly{m: read.m, amended: true}) 158 } 159 m.dirty[key] = newEntry(value) 160 } 161 m.mu.Unlock() 162 } 163 164 // tryStore stores a value if the entry has not been expunged. 165 // 166 // If the entry is expunged, tryStore returns false and leaves the entry 167 // unchanged. 168 func (e *entry) tryStore(i *interface{}) bool { 169 p := atomic.LoadPointer(&e.p) 170 if p == expunged { 171 return false 172 } 173 for { 174 if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) { 175 return true 176 } 177 p = atomic.LoadPointer(&e.p) 178 if p == expunged { 179 return false 180 } 181 } 182 } 183 184 // unexpungeLocked ensures that the entry is not marked as expunged. 185 // 186 // If the entry was previously expunged, it must be added to the dirty map 187 // before m.mu is unlocked. 188 func (e *entry) unexpungeLocked() (wasExpunged bool) { 189 return atomic.CompareAndSwapPointer(&e.p, expunged, nil) 190 } 191 192 // storeLocked unconditionally stores a value to the entry. 193 // 194 // The entry must be known not to be expunged. 195 func (e *entry) storeLocked(i *interface{}) { 196 atomic.StorePointer(&e.p, unsafe.Pointer(i)) 197 } 198 199 // LoadOrStore returns the existing value for the key if present. 200 // Otherwise, it stores and returns the given value. 201 // The loaded result is true if the value was loaded, false if stored. 202 func (m *Map) LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) { 203 // Avoid locking if it's a clean hit. 204 read, _ := m.read.Load().(readOnly) 205 if e, ok := read.m[key]; ok { 206 actual, loaded, ok := e.tryLoadOrStore(value) 207 if ok { 208 return actual, loaded 209 } 210 } 211 212 m.mu.Lock() 213 read, _ = m.read.Load().(readOnly) 214 if e, ok := read.m[key]; ok { 215 if e.unexpungeLocked() { 216 m.dirty[key] = e 217 } 218 actual, loaded, _ = e.tryLoadOrStore(value) 219 } else if e, ok := m.dirty[key]; ok { 220 actual, loaded, _ = e.tryLoadOrStore(value) 221 m.missLocked() 222 } else { 223 if !read.amended { 224 // We're adding the first new key to the dirty map. 225 // Make sure it is allocated and mark the read-only map as incomplete. 226 m.dirtyLocked() 227 m.read.Store(readOnly{m: read.m, amended: true}) 228 } 229 m.dirty[key] = newEntry(value) 230 actual, loaded = value, false 231 } 232 m.mu.Unlock() 233 234 return actual, loaded 235 } 236 237 // tryLoadOrStore atomically loads or stores a value if the entry is not 238 // expunged. 239 // 240 // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and 241 // returns with ok==false. 242 func (e *entry) tryLoadOrStore(i interface{}) (actual interface{}, loaded, ok bool) { 243 p := atomic.LoadPointer(&e.p) 244 if p == expunged { 245 return nil, false, false 246 } 247 if p != nil { 248 return *(*interface{})(p), true, true 249 } 250 251 // Copy the interface after the first load to make this method more amenable 252 // to escape analysis: if we hit the "load" path or the entry is expunged, we 253 // shouldn't bother heap-allocating. 254 ic := i 255 for { 256 if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) { 257 return i, false, true 258 } 259 p = atomic.LoadPointer(&e.p) 260 if p == expunged { 261 return nil, false, false 262 } 263 if p != nil { 264 return *(*interface{})(p), true, true 265 } 266 } 267 } 268 269 // Delete deletes the value for a key. 270 func (m *Map) Delete(key interface{}) { 271 read, _ := m.read.Load().(readOnly) 272 e, ok := read.m[key] 273 if !ok && read.amended { 274 m.mu.Lock() 275 read, _ = m.read.Load().(readOnly) 276 e, ok = read.m[key] 277 if !ok && read.amended { 278 delete(m.dirty, key) 279 } 280 m.mu.Unlock() 281 } 282 if ok { 283 e.delete() 284 } 285 } 286 287 func (e *entry) delete() (hadValue bool) { 288 for { 289 p := atomic.LoadPointer(&e.p) 290 if p == nil || p == expunged { 291 return false 292 } 293 if atomic.CompareAndSwapPointer(&e.p, p, nil) { 294 return true 295 } 296 } 297 } 298 299 // Range calls f sequentially for each key and value present in the map. 300 // If f returns false, range stops the iteration. 301 // 302 // Range does not necessarily correspond to any consistent snapshot of the Map's 303 // contents: no key will be visited more than once, but if the value for any key 304 // is stored or deleted concurrently, Range may reflect any mapping for that key 305 // from any point during the Range call. 306 // 307 // Range may be O(N) with the number of elements in the map even if f returns 308 // false after a constant number of calls. 309 func (m *Map) Range(f func(key, value interface{}) bool) { 310 // We need to be able to iterate over all of the keys that were already 311 // present at the start of the call to Range. 312 // If read.amended is false, then read.m satisfies that property without 313 // requiring us to hold m.mu for a long time. 314 read, _ := m.read.Load().(readOnly) 315 if read.amended { 316 // m.dirty contains keys not in read.m. Fortunately, Range is already O(N) 317 // (assuming the caller does not break out early), so a call to Range 318 // amortizes an entire copy of the map: we can promote the dirty copy 319 // immediately! 320 m.mu.Lock() 321 read, _ = m.read.Load().(readOnly) 322 if read.amended { 323 read = readOnly{m: m.dirty} 324 m.read.Store(read) 325 m.dirty = nil 326 m.misses = 0 327 } 328 m.mu.Unlock() 329 } 330 331 for k, e := range read.m { 332 v, ok := e.load() 333 if !ok { 334 continue 335 } 336 if !f(k, v) { 337 break 338 } 339 } 340 } 341 342 func (m *Map) missLocked() { 343 m.misses++ 344 if m.misses < len(m.dirty) { 345 return 346 } 347 m.read.Store(readOnly{m: m.dirty}) 348 m.dirty = nil 349 m.misses = 0 350 } 351 352 func (m *Map) dirtyLocked() { 353 if m.dirty != nil { 354 return 355 } 356 357 read, _ := m.read.Load().(readOnly) 358 m.dirty = make(map[interface{}]*entry, len(read.m)) 359 for k, e := range read.m { 360 if !e.tryExpungeLocked() { 361 m.dirty[k] = e 362 } 363 } 364 } 365 366 func (e *entry) tryExpungeLocked() (isExpunged bool) { 367 p := atomic.LoadPointer(&e.p) 368 for p == nil { 369 if atomic.CompareAndSwapPointer(&e.p, nil, expunged) { 370 return true 371 } 372 p = atomic.LoadPointer(&e.p) 373 } 374 return p == expunged 375 }