github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/sync/map.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package sync
     6  
     7  import (
     8  	"sync/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // Map is like a Go map[interface{}]interface{} but is safe for concurrent use
    13  // by multiple goroutines without additional locking or coordination.
    14  // Loads, stores, and deletes run in amortized constant time.
    15  //
    16  // The Map type is specialized. Most code should use a plain Go map instead,
    17  // with separate locking or coordination, for better type safety and to make it
    18  // easier to maintain other invariants along with the map content.
    19  //
    20  // The Map type is optimized for two common use cases: (1) when the entry for a given
    21  // key is only ever written once but read many times, as in caches that only grow,
    22  // or (2) when multiple goroutines read, write, and overwrite entries for disjoint
    23  // sets of keys. In these two cases, use of a Map may significantly reduce lock
    24  // contention compared to a Go map paired with a separate Mutex or RWMutex.
    25  //
    26  // The zero Map is empty and ready for use. A Map must not be copied after first use.
    27  //
    28  // In the terminology of the Go memory model, Map arranges that a write operation
    29  // “synchronizes before” any read operation that observes the effect of the write, where
    30  // read and write operations are defined as follows.
    31  // Load, LoadAndDelete, LoadOrStore are read operations;
    32  // Delete, LoadAndDelete, and Store are write operations;
    33  // and LoadOrStore is a write operation when it returns loaded set to false.
    34  type Map struct {
    35  	mu Mutex
    36  
    37  	// read contains the portion of the map's contents that are safe for
    38  	// concurrent access (with or without mu held).
    39  	//
    40  	// The read field itself is always safe to load, but must only be stored with
    41  	// mu held.
    42  	//
    43  	// Entries stored in read may be updated concurrently without mu, but updating
    44  	// a previously-expunged entry requires that the entry be copied to the dirty
    45  	// map and unexpunged with mu held.
    46  	read atomic.Pointer[readOnly]
    47  
    48  	// dirty contains the portion of the map's contents that require mu to be
    49  	// held. To ensure that the dirty map can be promoted to the read map quickly,
    50  	// it also includes all of the non-expunged entries in the read map.
    51  	//
    52  	// Expunged entries are not stored in the dirty map. An expunged entry in the
    53  	// clean map must be unexpunged and added to the dirty map before a new value
    54  	// can be stored to it.
    55  	//
    56  	// If the dirty map is nil, the next write to the map will initialize it by
    57  	// making a shallow copy of the clean map, omitting stale entries.
    58  	dirty map[any]*entry
    59  
    60  	// misses counts the number of loads since the read map was last updated that
    61  	// needed to lock mu to determine whether the key was present.
    62  	//
    63  	// Once enough misses have occurred to cover the cost of copying the dirty
    64  	// map, the dirty map will be promoted to the read map (in the unamended
    65  	// state) and the next store to the map will make a new dirty copy.
    66  	misses int
    67  }
    68  
    69  // readOnly is an immutable struct stored atomically in the Map.read field.
    70  type readOnly struct {
    71  	m       map[any]*entry
    72  	amended bool // true if the dirty map contains some key not in m.
    73  }
    74  
    75  // expunged is an arbitrary pointer that marks entries which have been deleted
    76  // from the dirty map.
    77  var expunged = unsafe.Pointer(new(any))
    78  
    79  // An entry is a slot in the map corresponding to a particular key.
    80  type entry struct {
    81  	// p points to the interface{} value stored for the entry.
    82  	//
    83  	// If p == nil, the entry has been deleted, and either m.dirty == nil or
    84  	// m.dirty[key] is e.
    85  	//
    86  	// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
    87  	// is missing from m.dirty.
    88  	//
    89  	// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
    90  	// != nil, in m.dirty[key].
    91  	//
    92  	// An entry can be deleted by atomic replacement with nil: when m.dirty is
    93  	// next created, it will atomically replace nil with expunged and leave
    94  	// m.dirty[key] unset.
    95  	//
    96  	// An entry's associated value can be updated by atomic replacement, provided
    97  	// p != expunged. If p == expunged, an entry's associated value can be updated
    98  	// only after first setting m.dirty[key] = e so that lookups using the dirty
    99  	// map find the entry.
   100  	p unsafe.Pointer // *interface{}
   101  }
   102  
   103  func newEntry(i any) *entry {
   104  	return &entry{p: unsafe.Pointer(&i)}
   105  }
   106  
   107  func (m *Map) loadReadOnly() readOnly {
   108  	if p := m.read.Load(); p != nil {
   109  		return *p
   110  	}
   111  	return readOnly{}
   112  }
   113  
   114  // Load returns the value stored in the map for a key, or nil if no
   115  // value is present.
   116  // The ok result indicates whether value was found in the map.
   117  func (m *Map) Load(key any) (value any, ok bool) {
   118  	read := m.loadReadOnly()
   119  	e, ok := read.m[key]
   120  	if !ok && read.amended {
   121  		m.mu.Lock()
   122  		// Avoid reporting a spurious miss if m.dirty got promoted while we were
   123  		// blocked on m.mu. (If further loads of the same key will not miss, it's
   124  		// not worth copying the dirty map for this key.)
   125  		read = m.loadReadOnly()
   126  		e, ok = read.m[key]
   127  		if !ok && read.amended {
   128  			e, ok = m.dirty[key]
   129  			// Regardless of whether the entry was present, record a miss: this key
   130  			// will take the slow path until the dirty map is promoted to the read
   131  			// map.
   132  			m.missLocked()
   133  		}
   134  		m.mu.Unlock()
   135  	}
   136  	if !ok {
   137  		return nil, false
   138  	}
   139  	return e.load()
   140  }
   141  
   142  func (e *entry) load() (value any, ok bool) {
   143  	p := atomic.LoadPointer(&e.p)
   144  	if p == nil || p == expunged {
   145  		return nil, false
   146  	}
   147  	return *(*any)(p), true
   148  }
   149  
   150  // Store sets the value for a key.
   151  func (m *Map) Store(key, value any) {
   152  	read := m.loadReadOnly()
   153  	if e, ok := read.m[key]; ok && e.tryStore(&value) {
   154  		return
   155  	}
   156  
   157  	m.mu.Lock()
   158  	read = m.loadReadOnly()
   159  	if e, ok := read.m[key]; ok {
   160  		if e.unexpungeLocked() {
   161  			// The entry was previously expunged, which implies that there is a
   162  			// non-nil dirty map and this entry is not in it.
   163  			m.dirty[key] = e
   164  		}
   165  		e.storeLocked(&value)
   166  	} else if e, ok := m.dirty[key]; ok {
   167  		e.storeLocked(&value)
   168  	} else {
   169  		if !read.amended {
   170  			// We're adding the first new key to the dirty map.
   171  			// Make sure it is allocated and mark the read-only map as incomplete.
   172  			m.dirtyLocked()
   173  			m.read.Store(&readOnly{m: read.m, amended: true})
   174  		}
   175  		m.dirty[key] = newEntry(value)
   176  	}
   177  	m.mu.Unlock()
   178  }
   179  
   180  // tryStore stores a value if the entry has not been expunged.
   181  //
   182  // If the entry is expunged, tryStore returns false and leaves the entry
   183  // unchanged.
   184  func (e *entry) tryStore(i *any) bool {
   185  	for {
   186  		p := atomic.LoadPointer(&e.p)
   187  		if p == expunged {
   188  			return false
   189  		}
   190  		if atomic.CompareAndSwapPointer(&e.p, p, unsafe.Pointer(i)) {
   191  			return true
   192  		}
   193  	}
   194  }
   195  
   196  // unexpungeLocked ensures that the entry is not marked as expunged.
   197  //
   198  // If the entry was previously expunged, it must be added to the dirty map
   199  // before m.mu is unlocked.
   200  func (e *entry) unexpungeLocked() (wasExpunged bool) {
   201  	return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
   202  }
   203  
   204  // storeLocked unconditionally stores a value to the entry.
   205  //
   206  // The entry must be known not to be expunged.
   207  func (e *entry) storeLocked(i *any) {
   208  	atomic.StorePointer(&e.p, unsafe.Pointer(i))
   209  }
   210  
   211  // LoadOrStore returns the existing value for the key if present.
   212  // Otherwise, it stores and returns the given value.
   213  // The loaded result is true if the value was loaded, false if stored.
   214  func (m *Map) LoadOrStore(key, value any) (actual any, loaded bool) {
   215  	// Avoid locking if it's a clean hit.
   216  	read := m.loadReadOnly()
   217  	if e, ok := read.m[key]; ok {
   218  		actual, loaded, ok := e.tryLoadOrStore(value)
   219  		if ok {
   220  			return actual, loaded
   221  		}
   222  	}
   223  
   224  	m.mu.Lock()
   225  	read = m.loadReadOnly()
   226  	if e, ok := read.m[key]; ok {
   227  		if e.unexpungeLocked() {
   228  			m.dirty[key] = e
   229  		}
   230  		actual, loaded, _ = e.tryLoadOrStore(value)
   231  	} else if e, ok := m.dirty[key]; ok {
   232  		actual, loaded, _ = e.tryLoadOrStore(value)
   233  		m.missLocked()
   234  	} else {
   235  		if !read.amended {
   236  			// We're adding the first new key to the dirty map.
   237  			// Make sure it is allocated and mark the read-only map as incomplete.
   238  			m.dirtyLocked()
   239  			m.read.Store(&readOnly{m: read.m, amended: true})
   240  		}
   241  		m.dirty[key] = newEntry(value)
   242  		actual, loaded = value, false
   243  	}
   244  	m.mu.Unlock()
   245  
   246  	return actual, loaded
   247  }
   248  
   249  // tryLoadOrStore atomically loads or stores a value if the entry is not
   250  // expunged.
   251  //
   252  // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
   253  // returns with ok==false.
   254  func (e *entry) tryLoadOrStore(i any) (actual any, loaded, ok bool) {
   255  	p := atomic.LoadPointer(&e.p)
   256  	if p == expunged {
   257  		return nil, false, false
   258  	}
   259  	if p != nil {
   260  		return *(*any)(p), true, true
   261  	}
   262  
   263  	// Copy the interface after the first load to make this method more amenable
   264  	// to escape analysis: if we hit the "load" path or the entry is expunged, we
   265  	// shouldn't bother heap-allocating.
   266  	ic := i
   267  	for {
   268  		if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
   269  			return i, false, true
   270  		}
   271  		p = atomic.LoadPointer(&e.p)
   272  		if p == expunged {
   273  			return nil, false, false
   274  		}
   275  		if p != nil {
   276  			return *(*any)(p), true, true
   277  		}
   278  	}
   279  }
   280  
   281  // LoadAndDelete deletes the value for a key, returning the previous value if any.
   282  // The loaded result reports whether the key was present.
   283  func (m *Map) LoadAndDelete(key any) (value any, loaded bool) {
   284  	read := m.loadReadOnly()
   285  	e, ok := read.m[key]
   286  	if !ok && read.amended {
   287  		m.mu.Lock()
   288  		read = m.loadReadOnly()
   289  		e, ok = read.m[key]
   290  		if !ok && read.amended {
   291  			e, ok = m.dirty[key]
   292  			delete(m.dirty, key)
   293  			// Regardless of whether the entry was present, record a miss: this key
   294  			// will take the slow path until the dirty map is promoted to the read
   295  			// map.
   296  			m.missLocked()
   297  		}
   298  		m.mu.Unlock()
   299  	}
   300  	if ok {
   301  		return e.delete()
   302  	}
   303  	return nil, false
   304  }
   305  
   306  // Delete deletes the value for a key.
   307  func (m *Map) Delete(key any) {
   308  	m.LoadAndDelete(key)
   309  }
   310  
   311  func (e *entry) delete() (value any, ok bool) {
   312  	for {
   313  		p := atomic.LoadPointer(&e.p)
   314  		if p == nil || p == expunged {
   315  			return nil, false
   316  		}
   317  		if atomic.CompareAndSwapPointer(&e.p, p, nil) {
   318  			return *(*any)(p), true
   319  		}
   320  	}
   321  }
   322  
   323  // Range calls f sequentially for each key and value present in the map.
   324  // If f returns false, range stops the iteration.
   325  //
   326  // Range does not necessarily correspond to any consistent snapshot of the Map's
   327  // contents: no key will be visited more than once, but if the value for any key
   328  // is stored or deleted concurrently (including by f), Range may reflect any
   329  // mapping for that key from any point during the Range call. Range does not
   330  // block other methods on the receiver; even f itself may call any method on m.
   331  //
   332  // Range may be O(N) with the number of elements in the map even if f returns
   333  // false after a constant number of calls.
   334  func (m *Map) Range(f func(key, value any) bool) {
   335  	// We need to be able to iterate over all of the keys that were already
   336  	// present at the start of the call to Range.
   337  	// If read.amended is false, then read.m satisfies that property without
   338  	// requiring us to hold m.mu for a long time.
   339  	read := m.loadReadOnly()
   340  	if read.amended {
   341  		// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
   342  		// (assuming the caller does not break out early), so a call to Range
   343  		// amortizes an entire copy of the map: we can promote the dirty copy
   344  		// immediately!
   345  		m.mu.Lock()
   346  		read = m.loadReadOnly()
   347  		if read.amended {
   348  			read = readOnly{m: m.dirty}
   349  			m.read.Store(&read)
   350  			m.dirty = nil
   351  			m.misses = 0
   352  		}
   353  		m.mu.Unlock()
   354  	}
   355  
   356  	for k, e := range read.m {
   357  		v, ok := e.load()
   358  		if !ok {
   359  			continue
   360  		}
   361  		if !f(k, v) {
   362  			break
   363  		}
   364  	}
   365  }
   366  
   367  func (m *Map) missLocked() {
   368  	m.misses++
   369  	if m.misses < len(m.dirty) {
   370  		return
   371  	}
   372  	m.read.Store(&readOnly{m: m.dirty})
   373  	m.dirty = nil
   374  	m.misses = 0
   375  }
   376  
   377  func (m *Map) dirtyLocked() {
   378  	if m.dirty != nil {
   379  		return
   380  	}
   381  
   382  	read := m.loadReadOnly()
   383  	m.dirty = make(map[any]*entry, len(read.m))
   384  	for k, e := range read.m {
   385  		if !e.tryExpungeLocked() {
   386  			m.dirty[k] = e
   387  		}
   388  	}
   389  }
   390  
   391  func (e *entry) tryExpungeLocked() (isExpunged bool) {
   392  	p := atomic.LoadPointer(&e.p)
   393  	for p == nil {
   394  		if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
   395  			return true
   396  		}
   397  		p = atomic.LoadPointer(&e.p)
   398  	}
   399  	return p == expunged
   400  }