github.com/cockroachdb/cockroachdb-parser@v0.23.3-0.20240213214944-911057d40c9a/pkg/util/syncutil/int_map.go (about)

     1  // Copyright 2019 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  // Copyright 2016 The Go Authors. All rights reserved.
    12  // Use of this source code is governed by a BSD-style
    13  // license that can be found in licenses/BSD-golang.txt.
    14  
    15  // This code originated in Go's sync package.
    16  
    17  package syncutil
    18  
    19  import (
    20  	"sync/atomic"
    21  	"unsafe"
    22  )
    23  
    24  // IntMap is a concurrent map with amortized-constant-time loads, stores, and
    25  // deletes.  It is safe for multiple goroutines to call a Map's methods
    26  // concurrently.
    27  //
    28  // It is optimized for use in concurrent loops with keys that are
    29  // stable over time, and either few steady-state stores, or stores
    30  // localized to one goroutine per key.
    31  //
    32  // For use cases that do not share these attributes, it will likely have
    33  // comparable or worse performance and worse type safety than an ordinary
    34  // map paired with a read-write mutex.
    35  //
    36  // Nil values are not supported; to use an IntMap as a set store a
    37  // dummy non-nil pointer instead of nil.
    38  //
    39  // The zero Map is valid and empty.
    40  //
    41  // A Map must not be copied after first use.
    42  type IntMap struct {
    43  	mu Mutex
    44  
    45  	// read contains the portion of the map's contents that are safe for
    46  	// concurrent access (with or without mu held).
    47  	//
    48  	// The read field itself is always safe to load, but must only be stored with
    49  	// mu held.
    50  	//
    51  	// Entries stored in read may be updated concurrently without mu, but updating
    52  	// a previously-expunged entry requires that the entry be copied to the dirty
    53  	// map and unexpunged with mu held.
    54  	read unsafe.Pointer // *readOnly
    55  
    56  	// dirty contains the portion of the map's contents that require mu to be
    57  	// held. To ensure that the dirty map can be promoted to the read map quickly,
    58  	// it also includes all of the non-expunged entries in the read map.
    59  	//
    60  	// Expunged entries are not stored in the dirty map. An expunged entry in the
    61  	// clean map must be unexpunged and added to the dirty map before a new value
    62  	// can be stored to it.
    63  	//
    64  	// If the dirty map is nil, the next write to the map will initialize it by
    65  	// making a shallow copy of the clean map, omitting stale entries.
    66  	dirty map[int64]*entry
    67  
    68  	// misses counts the number of loads since the read map was last updated that
    69  	// needed to lock mu to determine whether the key was present.
    70  	//
    71  	// Once enough misses have occurred to cover the cost of copying the dirty
    72  	// map, the dirty map will be promoted to the read map (in the unamended
    73  	// state) and the next store to the map will make a new dirty copy.
    74  	misses int
    75  }
    76  
    77  // readOnly is an immutable struct stored atomically in the Map.read field.
    78  type readOnly struct {
    79  	m       map[int64]*entry
    80  	amended bool // true if the dirty map contains some key not in m.
    81  }
    82  
    83  // expunged is an arbitrary pointer that marks entries which have been deleted
    84  // from the dirty map.
    85  var expunged = unsafe.Pointer(new(int))
    86  
    87  // An entry is a slot in the map corresponding to a particular key.
    88  type entry struct {
    89  	// p points to the value stored for the entry.
    90  	//
    91  	// If p == nil, the entry has been deleted and m.dirty == nil.
    92  	//
    93  	// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
    94  	// is missing from m.dirty.
    95  	//
    96  	// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
    97  	// != nil, in m.dirty[key].
    98  	//
    99  	// An entry can be deleted by atomic replacement with nil: when m.dirty is
   100  	// next created, it will atomically replace nil with expunged and leave
   101  	// m.dirty[key] unset.
   102  	//
   103  	// An entry's associated value can be updated by atomic replacement, provided
   104  	// p != expunged. If p == expunged, an entry's associated value can be updated
   105  	// only after first setting m.dirty[key] = e so that lookups using the dirty
   106  	// map find the entry.
   107  	p unsafe.Pointer
   108  }
   109  
   110  func newEntry(r unsafe.Pointer) *entry {
   111  	return &entry{p: r}
   112  }
   113  
   114  // Load returns the value stored in the map for a key, or nil if no
   115  // value is present.
   116  // The ok result indicates whether value was found in the map.
   117  func (m *IntMap) Load(key int64) (value unsafe.Pointer, ok bool) {
   118  	read := m.getRead()
   119  	e, ok := read.m[key]
   120  	if !ok && read.amended {
   121  		func() {
   122  			m.mu.Lock()
   123  			defer m.mu.Unlock()
   124  			// Avoid reporting a spurious miss if m.dirty got promoted while we were
   125  			// blocked on m.mu. (If further loads of the same key will not miss, it's
   126  			// not worth copying the dirty map for this key.)
   127  			read = m.getRead()
   128  			e, ok = read.m[key]
   129  			if !ok && read.amended {
   130  				e, ok = m.dirty[key]
   131  				// Regardless of whether the entry was present, record a miss: this key
   132  				// will take the slow path until the dirty map is promoted to the read
   133  				// map.
   134  				m.missLocked()
   135  			}
   136  		}()
   137  	}
   138  	if !ok {
   139  		return nil, false
   140  	}
   141  	return e.load()
   142  }
   143  
   144  func (e *entry) load() (value unsafe.Pointer, ok bool) {
   145  	p := atomic.LoadPointer(&e.p)
   146  	if p == nil || p == expunged {
   147  		return nil, false
   148  	}
   149  	return p, true
   150  }
   151  
   152  // Store sets the value for a key.
   153  func (m *IntMap) Store(key int64, value unsafe.Pointer) {
   154  	read := m.getRead()
   155  	if e, ok := read.m[key]; ok && e.tryStore(value) {
   156  		return
   157  	}
   158  
   159  	m.mu.Lock()
   160  	defer m.mu.Unlock()
   161  	read = m.getRead()
   162  	if e, ok := read.m[key]; ok {
   163  		if e.unexpungeLocked() {
   164  			// The entry was previously expunged, which implies that there is a
   165  			// non-nil dirty map and this entry is not in it.
   166  			m.dirty[key] = e
   167  		}
   168  		e.storeLocked(value)
   169  	} else if e, ok := m.dirty[key]; ok {
   170  		e.storeLocked(value)
   171  	} else {
   172  		if !read.amended {
   173  			// We're adding the first new key to the dirty map.
   174  			// Make sure it is allocated and mark the read-only map as incomplete.
   175  			m.dirtyLocked()
   176  			atomic.StorePointer(&m.read, unsafe.Pointer(&readOnly{m: read.m, amended: true}))
   177  		}
   178  		m.dirty[key] = newEntry(value)
   179  	}
   180  }
   181  
   182  // tryStore stores a value if the entry has not been expunged.
   183  //
   184  // If the entry is expunged, tryStore returns false and leaves the entry
   185  // unchanged.
   186  func (e *entry) tryStore(r unsafe.Pointer) bool {
   187  	p := atomic.LoadPointer(&e.p)
   188  	if p == expunged {
   189  		return false
   190  	}
   191  	for {
   192  		if atomic.CompareAndSwapPointer(&e.p, p, r) {
   193  			return true
   194  		}
   195  		p = atomic.LoadPointer(&e.p)
   196  		if p == expunged {
   197  			return false
   198  		}
   199  	}
   200  }
   201  
   202  // unexpungeLocked ensures that the entry is not marked as expunged.
   203  //
   204  // If the entry was previously expunged, it must be added to the dirty map
   205  // before m.mu is unlocked.
   206  func (e *entry) unexpungeLocked() (wasExpunged bool) {
   207  	return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
   208  }
   209  
   210  // storeLocked unconditionally stores a value to the entry.
   211  //
   212  // The entry must be known not to be expunged.
   213  func (e *entry) storeLocked(r unsafe.Pointer) {
   214  	atomic.StorePointer(&e.p, r)
   215  }
   216  
   217  // LoadOrStore returns the existing value for the key if present.
   218  // Otherwise, it stores and returns the given value.
   219  // The loaded result is true if the value was loaded, false if stored.
   220  func (m *IntMap) LoadOrStore(key int64, value unsafe.Pointer) (actual unsafe.Pointer, loaded bool) {
   221  	// Avoid locking if it's a clean hit.
   222  	read := m.getRead()
   223  	if e, ok := read.m[key]; ok {
   224  		actual, loaded, ok = e.tryLoadOrStore(value)
   225  		if ok {
   226  			return actual, loaded
   227  		}
   228  	}
   229  
   230  	m.mu.Lock()
   231  	defer m.mu.Unlock()
   232  	read = m.getRead()
   233  	if e, ok := read.m[key]; ok {
   234  		if e.unexpungeLocked() {
   235  			m.dirty[key] = e
   236  		}
   237  		actual, loaded, _ = e.tryLoadOrStore(value)
   238  	} else if e, ok := m.dirty[key]; ok {
   239  		actual, loaded, _ = e.tryLoadOrStore(value)
   240  		m.missLocked()
   241  	} else {
   242  		if !read.amended {
   243  			// We're adding the first new key to the dirty map.
   244  			// Make sure it is allocated and mark the read-only map as incomplete.
   245  			m.dirtyLocked()
   246  			atomic.StorePointer(&m.read, unsafe.Pointer(&readOnly{m: read.m, amended: true}))
   247  		}
   248  		m.dirty[key] = newEntry(value)
   249  		actual, loaded = value, false
   250  	}
   251  
   252  	return actual, loaded
   253  }
   254  
   255  // tryLoadOrStore atomically loads or stores a value if the entry is not
   256  // expunged.
   257  //
   258  // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
   259  // returns with ok==false.
   260  func (e *entry) tryLoadOrStore(r unsafe.Pointer) (actual unsafe.Pointer, loaded, ok bool) {
   261  	p := atomic.LoadPointer(&e.p)
   262  	if p == expunged {
   263  		return nil, false, false
   264  	}
   265  	if p != nil {
   266  		return p, true, true
   267  	}
   268  
   269  	for {
   270  		if atomic.CompareAndSwapPointer(&e.p, nil, r) {
   271  			return r, false, true
   272  		}
   273  		p = atomic.LoadPointer(&e.p)
   274  		if p == expunged {
   275  			return nil, false, false
   276  		}
   277  		if p != nil {
   278  			return p, true, true
   279  		}
   280  	}
   281  }
   282  
   283  // Delete deletes the value for a key.
   284  func (m *IntMap) Delete(key int64) {
   285  	read := m.getRead()
   286  	e, ok := read.m[key]
   287  	if !ok && read.amended {
   288  		func() {
   289  			m.mu.Lock()
   290  			defer m.mu.Unlock()
   291  			read = m.getRead()
   292  			e, ok = read.m[key]
   293  			if !ok && read.amended {
   294  				delete(m.dirty, key)
   295  			}
   296  		}()
   297  	}
   298  	if ok {
   299  		e.delete()
   300  	}
   301  }
   302  
   303  func (e *entry) delete() (hadValue bool) {
   304  	for {
   305  		p := atomic.LoadPointer(&e.p)
   306  		if p == nil || p == expunged {
   307  			return false
   308  		}
   309  		if atomic.CompareAndSwapPointer(&e.p, p, nil) {
   310  			return true
   311  		}
   312  	}
   313  }
   314  
   315  // Range calls f sequentially for each key and value present in the map.
   316  // If f returns false, range stops the iteration.
   317  //
   318  // Range does not necessarily correspond to any consistent snapshot of the Map's
   319  // contents: no key will be visited more than once, but if the value for any key
   320  // is stored or deleted concurrently, Range may reflect any mapping for that key
   321  // from any point during the Range call.
   322  //
   323  // Range may be O(N) with the number of elements in the map even if f returns
   324  // false after a constant number of calls.
   325  func (m *IntMap) Range(f func(key int64, value unsafe.Pointer) bool) {
   326  	// We need to be able to iterate over all of the keys that were already
   327  	// present at the start of the call to Range.
   328  	// If read.amended is false, then read.m satisfies that property without
   329  	// requiring us to hold m.mu for a long time.
   330  	read := m.getRead()
   331  	if read.amended {
   332  		// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
   333  		// (assuming the caller does not break out early), so a call to Range
   334  		// amortizes an entire copy of the map: we can promote the dirty copy
   335  		// immediately!
   336  		func() {
   337  			m.mu.Lock()
   338  			defer m.mu.Unlock()
   339  			read = m.getRead()
   340  			if read.amended {
   341  				// Don't let read escape directly, otherwise it will allocate even
   342  				// when read.amended is false. Instead, constrain the allocation to
   343  				// just this branch.
   344  				newRead := &readOnly{m: m.dirty}
   345  				atomic.StorePointer(&m.read, unsafe.Pointer(newRead))
   346  				read = *newRead
   347  				m.dirty = nil
   348  				m.misses = 0
   349  			}
   350  		}()
   351  	}
   352  
   353  	for k, e := range read.m {
   354  		v, ok := e.load()
   355  		if !ok {
   356  			continue
   357  		}
   358  		if !f(k, v) {
   359  			break
   360  		}
   361  	}
   362  }
   363  
   364  func (m *IntMap) missLocked() {
   365  	m.misses++
   366  	if m.misses < len(m.dirty) {
   367  		return
   368  	}
   369  	atomic.StorePointer(&m.read, unsafe.Pointer(&readOnly{m: m.dirty}))
   370  	m.dirty = nil
   371  	m.misses = 0
   372  }
   373  
   374  func (m *IntMap) dirtyLocked() {
   375  	if m.dirty != nil {
   376  		return
   377  	}
   378  
   379  	read := m.getRead()
   380  	m.dirty = make(map[int64]*entry, len(read.m))
   381  	for k, e := range read.m {
   382  		if !e.tryExpungeLocked() {
   383  			m.dirty[k] = e
   384  		}
   385  	}
   386  }
   387  
   388  func (m *IntMap) getRead() readOnly {
   389  	read := (*readOnly)(atomic.LoadPointer(&m.read))
   390  	if read == nil {
   391  		return readOnly{}
   392  	}
   393  	return *read
   394  }
   395  
   396  func (e *entry) tryExpungeLocked() (isExpunged bool) {
   397  	p := atomic.LoadPointer(&e.p)
   398  	for p == nil {
   399  		if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
   400  			return true
   401  		}
   402  		p = atomic.LoadPointer(&e.p)
   403  	}
   404  	return p == expunged
   405  }