github.com/diamondburned/arikawa/v2@v2.1.0/internal/moreatomic/syncmod/syncmod.go (about)

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Package syncmod contains a clone of package sync's map.go file with unused
     6  // methods removed and some tweaks with LoadOrStore.
     7  package syncmod
     8  
     9  import (
    10  	"sync"
    11  	"sync/atomic"
    12  	"unsafe"
    13  )
    14  
    15  // Map is like a Go map[interface{}]interface{} but is safe for concurrent use
    16  // by multiple goroutines without additional locking or coordination.
    17  // Loads, stores, and deletes run in amortized constant time.
    18  //
    19  // The Map type is specialized. Most code should use a plain Go map instead,
    20  // with separate locking or coordination, for better type safety and to make it
    21  // easier to maintain other invariants along with the map content.
    22  //
    23  // The Map type is optimized for two common use cases: (1) when the entry for a given
    24  // key is only ever written once but read many times, as in caches that only grow,
    25  // or (2) when multiple goroutines read, write, and overwrite entries for disjoint
    26  // sets of keys. In these two cases, use of a Map may significantly reduce lock
    27  // contention compared to a Go map paired with a separate Mutex or RWMutex.
    28  //
    29  // The zero Map is empty and ready for use. A Map must not be copied after first use.
    30  type Map struct {
    31  	New func() interface{}
    32  
    33  	mu sync.Mutex
    34  
    35  	// read contains the portion of the map's contents that are safe for
    36  	// concurrent access (with or without mu held).
    37  	//
    38  	// The read field itself is always safe to load, but must only be stored with
    39  	// mu held.
    40  	//
    41  	// Entries stored in read may be updated concurrently without mu, but updating
    42  	// a previously-expunged entry requires that the entry be copied to the dirty
    43  	// map and unexpunged with mu held.
    44  	read atomic.Value // readOnly
    45  
    46  	// dirty contains the portion of the map's contents that require mu to be
    47  	// held. To ensure that the dirty map can be promoted to the read map quickly,
    48  	// it also includes all of the non-expunged entries in the read map.
    49  	//
    50  	// Expunged entries are not stored in the dirty map. An expunged entry in the
    51  	// clean map must be unexpunged and added to the dirty map before a new value
    52  	// can be stored to it.
    53  	//
    54  	// If the dirty map is nil, the next write to the map will initialize it by
    55  	// making a shallow copy of the clean map, omitting stale entries.
    56  	dirty map[interface{}]*entry
    57  
    58  	// misses counts the number of loads since the read map was last updated that
    59  	// needed to lock mu to determine whether the key was present.
    60  	//
    61  	// Once enough misses have occurred to cover the cost of copying the dirty
    62  	// map, the dirty map will be promoted to the read map (in the unamended
    63  	// state) and the next store to the map will make a new dirty copy.
    64  	misses int
    65  }
    66  
    67  // readOnly is an immutable struct stored atomically in the Map.read field.
    68  type readOnly struct {
    69  	m       map[interface{}]*entry
    70  	amended bool // true if the dirty map contains some key not in m.
    71  }
    72  
    73  // expunged is an arbitrary pointer that marks entries which have been deleted
    74  // from the dirty map.
    75  var expunged = unsafe.Pointer(new(interface{}))
    76  
    77  // An entry is a slot in the map corresponding to a particular key.
    78  type entry struct {
    79  	// p points to the interface{} value stored for the entry.
    80  	//
    81  	// If p == nil, the entry has been deleted and m.dirty == nil.
    82  	//
    83  	// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
    84  	// is missing from m.dirty.
    85  	//
    86  	// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
    87  	// != nil, in m.dirty[key].
    88  	//
    89  	// An entry can be deleted by atomic replacement with nil: when m.dirty is
    90  	// next created, it will atomically replace nil with expunged and leave
    91  	// m.dirty[key] unset.
    92  	//
    93  	// An entry's associated value can be updated by atomic replacement, provided
    94  	// p != expunged. If p == expunged, an entry's associated value can be updated
    95  	// only after first setting m.dirty[key] = e so that lookups using the dirty
    96  	// map find the entry.
    97  	p unsafe.Pointer // *interface{}
    98  }
    99  
   100  func newEntry(i interface{}) *entry {
   101  	return &entry{p: unsafe.Pointer(&i)}
   102  }
   103  
   104  // Load returns the value stored in the map for a key, or nil if no
   105  // value is present.
   106  // The ok result indicates whether value was found in the map.
   107  func (m *Map) Load(key interface{}) (value interface{}, ok bool) {
   108  	read, _ := m.read.Load().(readOnly)
   109  	e, ok := read.m[key]
   110  	if !ok && read.amended {
   111  		m.mu.Lock()
   112  		// Avoid reporting a spurious miss if m.dirty got promoted while we were
   113  		// blocked on m.mu. (If further loads of the same key will not miss, it's
   114  		// not worth copying the dirty map for this key.)
   115  		read, _ = m.read.Load().(readOnly)
   116  		e, ok = read.m[key]
   117  		if !ok && read.amended {
   118  			e, ok = m.dirty[key]
   119  			// Regardless of whether the entry was present, record a miss: this key
   120  			// will take the slow path until the dirty map is promoted to the read
   121  			// map.
   122  			m.missLocked()
   123  		}
   124  		m.mu.Unlock()
   125  	}
   126  	if !ok {
   127  		return nil, false
   128  	}
   129  	return e.load()
   130  }
   131  
   132  func (e *entry) load() (value interface{}, ok bool) {
   133  	p := atomic.LoadPointer(&e.p)
   134  	if p == nil || p == expunged {
   135  		return nil, false
   136  	}
   137  	return *(*interface{})(p), true
   138  }
   139  
   140  // unexpungeLocked ensures that the entry is not marked as expunged.
   141  //
   142  // If the entry was previously expunged, it must be added to the dirty map
   143  // before m.mu is unlocked.
   144  func (e *entry) unexpungeLocked() (wasExpunged bool) {
   145  	return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
   146  }
   147  
   148  // LoadOrStore returns the existing value for the key if present.
   149  // Otherwise, it stores and returns the given value.
   150  // The loaded result is true if the value was loaded, false if stored.
   151  func (m *Map) LoadOrStore(k interface{}) (actual interface{}, loaded bool) {
   152  	// Avoid locking if it's a clean hit.
   153  	read, _ := m.read.Load().(readOnly)
   154  	if e, ok := read.m[k]; ok {
   155  		actual, loaded, ok = e.tryLoadOrStore(nil, m.New)
   156  		if ok {
   157  			return actual, loaded
   158  		}
   159  	}
   160  
   161  	m.mu.Lock()
   162  	read, _ = m.read.Load().(readOnly)
   163  	if e, ok := read.m[k]; ok {
   164  		if e.unexpungeLocked() {
   165  			m.dirty[k] = e
   166  		}
   167  		actual, loaded, _ = e.tryLoadOrStore(actual, m.New)
   168  	} else if e, ok := m.dirty[k]; ok {
   169  		actual, loaded, _ = e.tryLoadOrStore(actual, m.New)
   170  		m.missLocked()
   171  	} else {
   172  		if !read.amended {
   173  			// We're adding the first new key to the dirty map.
   174  			// Make sure it is allocated and mark the read-only map as incomplete.
   175  			m.dirtyLocked()
   176  			m.read.Store(readOnly{m: read.m, amended: true})
   177  		}
   178  		// This will likely allocate if the first tryLoadOrStore sees an
   179  		// expunged value and this else branch is hit.
   180  		if actual == nil {
   181  			actual = m.New()
   182  		}
   183  		m.dirty[k] = newEntry(actual)
   184  		loaded = false
   185  	}
   186  	m.mu.Unlock()
   187  
   188  	return actual, loaded
   189  }
   190  
   191  // tryLoadOrStore atomically loads or stores a value if the entry is not
   192  // expunged.
   193  //
   194  // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
   195  // returns with ok==false.
   196  func (e *entry) tryLoadOrStore(
   197  	i interface{}, newFn func() interface{}) (actual interface{}, loaded, ok bool) {
   198  
   199  	p := atomic.LoadPointer(&e.p)
   200  	if p == expunged {
   201  		return nil, false, false
   202  	}
   203  	if p != nil {
   204  		return *(*interface{})(p), true, true
   205  	}
   206  
   207  	if i == nil {
   208  		i = newFn()
   209  	}
   210  
   211  	// Copy the interface after the first load to make this method more amenable
   212  	// to escape analysis: if we hit the "load" path or the entry is expunged, we
   213  	// shouldn't bother heap-allocating.
   214  	ic := i
   215  
   216  	for {
   217  		if atomic.CompareAndSwapPointer(&e.p, nil, unsafe.Pointer(&ic)) {
   218  			return i, false, true
   219  		}
   220  		p = atomic.LoadPointer(&e.p)
   221  		if p == expunged {
   222  			return i, false, false
   223  		}
   224  		if p != nil {
   225  			return *(*interface{})(p), true, true
   226  		}
   227  	}
   228  }
   229  
   230  func (m *Map) missLocked() {
   231  	m.misses++
   232  	if m.misses < len(m.dirty) {
   233  		return
   234  	}
   235  	m.read.Store(readOnly{m: m.dirty})
   236  	m.dirty = nil
   237  	m.misses = 0
   238  }
   239  
   240  func (m *Map) dirtyLocked() {
   241  	if m.dirty != nil {
   242  		return
   243  	}
   244  
   245  	read, _ := m.read.Load().(readOnly)
   246  	m.dirty = make(map[interface{}]*entry, len(read.m))
   247  	for k, e := range read.m {
   248  		if !e.tryExpungeLocked() {
   249  			m.dirty[k] = e
   250  		}
   251  	}
   252  }
   253  
   254  func (e *entry) tryExpungeLocked() (isExpunged bool) {
   255  	p := atomic.LoadPointer(&e.p)
   256  	for p == nil {
   257  		if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
   258  			return true
   259  		}
   260  		p = atomic.LoadPointer(&e.p)
   261  	}
   262  	return p == expunged
   263  }