github.com/cockroachdb/cockroach@v20.2.0-alpha.1+incompatible/pkg/util/syncutil/int_map.go (about)

     1  // Copyright 2019 The Cockroach Authors.
     2  //
     3  // Use of this software is governed by the Business Source License
     4  // included in the file licenses/BSL.txt.
     5  //
     6  // As of the Change Date specified in that file, in accordance with
     7  // the Business Source License, use of this software will be governed
     8  // by the Apache License, Version 2.0, included in the file
     9  // licenses/APL.txt.
    10  
    11  // Copyright 2016 The Go Authors. All rights reserved.
    12  // Use of this source code is governed by a BSD-style
    13  // license that can be found in licenses/BSD-golang.txt.
    14  
    15  // This code originated in Go's sync package.
    16  
    17  package syncutil
    18  
    19  import (
    20  	"sync/atomic"
    21  	"unsafe"
    22  )
    23  
    24  // IntMap is a concurrent map with amortized-constant-time loads, stores, and
    25  // deletes.  It is safe for multiple goroutines to call a Map's methods
    26  // concurrently.
    27  //
    28  // It is optimized for use in concurrent loops with keys that are
    29  // stable over time, and either few steady-state stores, or stores
    30  // localized to one goroutine per key.
    31  //
    32  // For use cases that do not share these attributes, it will likely have
    33  // comparable or worse performance and worse type safety than an ordinary
    34  // map paired with a read-write mutex.
    35  //
    36  // Nil values are not supported; to use an IntMap as a set store a
    37  // dummy non-nil pointer instead of nil.
    38  //
    39  // The zero Map is valid and empty.
    40  //
    41  // A Map must not be copied after first use.
    42  type IntMap struct {
    43  	mu Mutex
    44  
    45  	// read contains the portion of the map's contents that are safe for
    46  	// concurrent access (with or without mu held).
    47  	//
    48  	// The read field itself is always safe to load, but must only be stored with
    49  	// mu held.
    50  	//
    51  	// Entries stored in read may be updated concurrently without mu, but updating
    52  	// a previously-expunged entry requires that the entry be copied to the dirty
    53  	// map and unexpunged with mu held.
    54  	read unsafe.Pointer // *readOnly
    55  
    56  	// dirty contains the portion of the map's contents that require mu to be
    57  	// held. To ensure that the dirty map can be promoted to the read map quickly,
    58  	// it also includes all of the non-expunged entries in the read map.
    59  	//
    60  	// Expunged entries are not stored in the dirty map. An expunged entry in the
    61  	// clean map must be unexpunged and added to the dirty map before a new value
    62  	// can be stored to it.
    63  	//
    64  	// If the dirty map is nil, the next write to the map will initialize it by
    65  	// making a shallow copy of the clean map, omitting stale entries.
    66  	dirty map[int64]*entry
    67  
    68  	// misses counts the number of loads since the read map was last updated that
    69  	// needed to lock mu to determine whether the key was present.
    70  	//
    71  	// Once enough misses have occurred to cover the cost of copying the dirty
    72  	// map, the dirty map will be promoted to the read map (in the unamended
    73  	// state) and the next store to the map will make a new dirty copy.
    74  	misses int
    75  }
    76  
    77  // readOnly is an immutable struct stored atomically in the Map.read field.
    78  type readOnly struct {
    79  	m       map[int64]*entry
    80  	amended bool // true if the dirty map contains some key not in m.
    81  }
    82  
    83  // expunged is an arbitrary pointer that marks entries which have been deleted
    84  // from the dirty map.
    85  var expunged = unsafe.Pointer(new(int))
    86  
    87  // An entry is a slot in the map corresponding to a particular key.
    88  type entry struct {
    89  	// p points to the value stored for the entry.
    90  	//
    91  	// If p == nil, the entry has been deleted and m.dirty == nil.
    92  	//
    93  	// If p == expunged, the entry has been deleted, m.dirty != nil, and the entry
    94  	// is missing from m.dirty.
    95  	//
    96  	// Otherwise, the entry is valid and recorded in m.read.m[key] and, if m.dirty
    97  	// != nil, in m.dirty[key].
    98  	//
    99  	// An entry can be deleted by atomic replacement with nil: when m.dirty is
   100  	// next created, it will atomically replace nil with expunged and leave
   101  	// m.dirty[key] unset.
   102  	//
   103  	// An entry's associated value can be updated by atomic replacement, provided
   104  	// p != expunged. If p == expunged, an entry's associated value can be updated
   105  	// only after first setting m.dirty[key] = e so that lookups using the dirty
   106  	// map find the entry.
   107  	p unsafe.Pointer
   108  }
   109  
   110  func newEntry(r unsafe.Pointer) *entry {
   111  	return &entry{p: r}
   112  }
   113  
   114  // Load returns the value stored in the map for a key, or nil if no
   115  // value is present.
   116  // The ok result indicates whether value was found in the map.
   117  func (m *IntMap) Load(key int64) (value unsafe.Pointer, ok bool) {
   118  	read := m.getRead()
   119  	e, ok := read.m[key]
   120  	if !ok && read.amended {
   121  		m.mu.Lock()
   122  		// Avoid reporting a spurious miss if m.dirty got promoted while we were
   123  		// blocked on m.mu. (If further loads of the same key will not miss, it's
   124  		// not worth copying the dirty map for this key.)
   125  		read = m.getRead()
   126  		e, ok = read.m[key]
   127  		if !ok && read.amended {
   128  			e, ok = m.dirty[key]
   129  			// Regardless of whether the entry was present, record a miss: this key
   130  			// will take the slow path until the dirty map is promoted to the read
   131  			// map.
   132  			m.missLocked()
   133  		}
   134  		m.mu.Unlock()
   135  	}
   136  	if !ok {
   137  		return nil, false
   138  	}
   139  	return e.load()
   140  }
   141  
   142  func (e *entry) load() (value unsafe.Pointer, ok bool) {
   143  	p := atomic.LoadPointer(&e.p)
   144  	if p == nil || p == expunged {
   145  		return nil, false
   146  	}
   147  	return p, true
   148  }
   149  
   150  // Store sets the value for a key.
   151  func (m *IntMap) Store(key int64, value unsafe.Pointer) {
   152  	read := m.getRead()
   153  	if e, ok := read.m[key]; ok && e.tryStore(value) {
   154  		return
   155  	}
   156  
   157  	m.mu.Lock()
   158  	read = m.getRead()
   159  	if e, ok := read.m[key]; ok {
   160  		if e.unexpungeLocked() {
   161  			// The entry was previously expunged, which implies that there is a
   162  			// non-nil dirty map and this entry is not in it.
   163  			m.dirty[key] = e
   164  		}
   165  		e.storeLocked(value)
   166  	} else if e, ok := m.dirty[key]; ok {
   167  		e.storeLocked(value)
   168  	} else {
   169  		if !read.amended {
   170  			// We're adding the first new key to the dirty map.
   171  			// Make sure it is allocated and mark the read-only map as incomplete.
   172  			m.dirtyLocked()
   173  			atomic.StorePointer(&m.read, unsafe.Pointer(&readOnly{m: read.m, amended: true}))
   174  		}
   175  		m.dirty[key] = newEntry(value)
   176  	}
   177  	m.mu.Unlock()
   178  }
   179  
   180  // tryStore stores a value if the entry has not been expunged.
   181  //
   182  // If the entry is expunged, tryStore returns false and leaves the entry
   183  // unchanged.
   184  func (e *entry) tryStore(r unsafe.Pointer) bool {
   185  	p := atomic.LoadPointer(&e.p)
   186  	if p == expunged {
   187  		return false
   188  	}
   189  	for {
   190  		if atomic.CompareAndSwapPointer(&e.p, p, r) {
   191  			return true
   192  		}
   193  		p = atomic.LoadPointer(&e.p)
   194  		if p == expunged {
   195  			return false
   196  		}
   197  	}
   198  }
   199  
   200  // unexpungeLocked ensures that the entry is not marked as expunged.
   201  //
   202  // If the entry was previously expunged, it must be added to the dirty map
   203  // before m.mu is unlocked.
   204  func (e *entry) unexpungeLocked() (wasExpunged bool) {
   205  	return atomic.CompareAndSwapPointer(&e.p, expunged, nil)
   206  }
   207  
   208  // storeLocked unconditionally stores a value to the entry.
   209  //
   210  // The entry must be known not to be expunged.
   211  func (e *entry) storeLocked(r unsafe.Pointer) {
   212  	atomic.StorePointer(&e.p, r)
   213  }
   214  
   215  // LoadOrStore returns the existing value for the key if present.
   216  // Otherwise, it stores and returns the given value.
   217  // The loaded result is true if the value was loaded, false if stored.
   218  func (m *IntMap) LoadOrStore(key int64, value unsafe.Pointer) (actual unsafe.Pointer, loaded bool) {
   219  	// Avoid locking if it's a clean hit.
   220  	read := m.getRead()
   221  	if e, ok := read.m[key]; ok {
   222  		actual, loaded, ok = e.tryLoadOrStore(value)
   223  		if ok {
   224  			return actual, loaded
   225  		}
   226  	}
   227  
   228  	m.mu.Lock()
   229  	read = m.getRead()
   230  	if e, ok := read.m[key]; ok {
   231  		if e.unexpungeLocked() {
   232  			m.dirty[key] = e
   233  		}
   234  		actual, loaded, _ = e.tryLoadOrStore(value)
   235  	} else if e, ok := m.dirty[key]; ok {
   236  		actual, loaded, _ = e.tryLoadOrStore(value)
   237  		m.missLocked()
   238  	} else {
   239  		if !read.amended {
   240  			// We're adding the first new key to the dirty map.
   241  			// Make sure it is allocated and mark the read-only map as incomplete.
   242  			m.dirtyLocked()
   243  			atomic.StorePointer(&m.read, unsafe.Pointer(&readOnly{m: read.m, amended: true}))
   244  		}
   245  		m.dirty[key] = newEntry(value)
   246  		actual, loaded = value, false
   247  	}
   248  	m.mu.Unlock()
   249  
   250  	return actual, loaded
   251  }
   252  
   253  // tryLoadOrStore atomically loads or stores a value if the entry is not
   254  // expunged.
   255  //
   256  // If the entry is expunged, tryLoadOrStore leaves the entry unchanged and
   257  // returns with ok==false.
   258  func (e *entry) tryLoadOrStore(r unsafe.Pointer) (actual unsafe.Pointer, loaded, ok bool) {
   259  	p := atomic.LoadPointer(&e.p)
   260  	if p == expunged {
   261  		return nil, false, false
   262  	}
   263  	if p != nil {
   264  		return p, true, true
   265  	}
   266  
   267  	for {
   268  		if atomic.CompareAndSwapPointer(&e.p, nil, r) {
   269  			return r, false, true
   270  		}
   271  		p = atomic.LoadPointer(&e.p)
   272  		if p == expunged {
   273  			return nil, false, false
   274  		}
   275  		if p != nil {
   276  			return p, true, true
   277  		}
   278  	}
   279  }
   280  
   281  // Delete deletes the value for a key.
   282  func (m *IntMap) Delete(key int64) {
   283  	read := m.getRead()
   284  	e, ok := read.m[key]
   285  	if !ok && read.amended {
   286  		m.mu.Lock()
   287  		read = m.getRead()
   288  		e, ok = read.m[key]
   289  		if !ok && read.amended {
   290  			delete(m.dirty, key)
   291  		}
   292  		m.mu.Unlock()
   293  	}
   294  	if ok {
   295  		e.delete()
   296  	}
   297  }
   298  
   299  func (e *entry) delete() (hadValue bool) {
   300  	for {
   301  		p := atomic.LoadPointer(&e.p)
   302  		if p == nil || p == expunged {
   303  			return false
   304  		}
   305  		if atomic.CompareAndSwapPointer(&e.p, p, nil) {
   306  			return true
   307  		}
   308  	}
   309  }
   310  
   311  // Range calls f sequentially for each key and value present in the map.
   312  // If f returns false, range stops the iteration.
   313  //
   314  // Range does not necessarily correspond to any consistent snapshot of the Map's
   315  // contents: no key will be visited more than once, but if the value for any key
   316  // is stored or deleted concurrently, Range may reflect any mapping for that key
   317  // from any point during the Range call.
   318  //
   319  // Range may be O(N) with the number of elements in the map even if f returns
   320  // false after a constant number of calls.
   321  func (m *IntMap) Range(f func(key int64, value unsafe.Pointer) bool) {
   322  	// We need to be able to iterate over all of the keys that were already
   323  	// present at the start of the call to Range.
   324  	// If read.amended is false, then read.m satisfies that property without
   325  	// requiring us to hold m.mu for a long time.
   326  	read := m.getRead()
   327  	if read.amended {
   328  		// m.dirty contains keys not in read.m. Fortunately, Range is already O(N)
   329  		// (assuming the caller does not break out early), so a call to Range
   330  		// amortizes an entire copy of the map: we can promote the dirty copy
   331  		// immediately!
   332  		m.mu.Lock()
   333  		read = m.getRead()
   334  		if read.amended {
   335  			// Don't let read escape directly, otherwise it will allocate even
   336  			// when read.amended is false. Instead, constrain the allocation to
   337  			// just this branch.
   338  			newRead := &readOnly{m: m.dirty}
   339  			atomic.StorePointer(&m.read, unsafe.Pointer(newRead))
   340  			read = *newRead
   341  			m.dirty = nil
   342  			m.misses = 0
   343  		}
   344  		m.mu.Unlock()
   345  	}
   346  
   347  	for k, e := range read.m {
   348  		v, ok := e.load()
   349  		if !ok {
   350  			continue
   351  		}
   352  		if !f(k, v) {
   353  			break
   354  		}
   355  	}
   356  }
   357  
   358  func (m *IntMap) missLocked() {
   359  	m.misses++
   360  	if m.misses < len(m.dirty) {
   361  		return
   362  	}
   363  	atomic.StorePointer(&m.read, unsafe.Pointer(&readOnly{m: m.dirty}))
   364  	m.dirty = nil
   365  	m.misses = 0
   366  }
   367  
   368  func (m *IntMap) dirtyLocked() {
   369  	if m.dirty != nil {
   370  		return
   371  	}
   372  
   373  	read := m.getRead()
   374  	m.dirty = make(map[int64]*entry, len(read.m))
   375  	for k, e := range read.m {
   376  		if !e.tryExpungeLocked() {
   377  			m.dirty[k] = e
   378  		}
   379  	}
   380  }
   381  
   382  func (m *IntMap) getRead() readOnly {
   383  	read := (*readOnly)(atomic.LoadPointer(&m.read))
   384  	if read == nil {
   385  		return readOnly{}
   386  	}
   387  	return *read
   388  }
   389  
   390  func (e *entry) tryExpungeLocked() (isExpunged bool) {
   391  	p := atomic.LoadPointer(&e.p)
   392  	for p == nil {
   393  		if atomic.CompareAndSwapPointer(&e.p, nil, expunged) {
   394  			return true
   395  		}
   396  		p = atomic.LoadPointer(&e.p)
   397  	}
   398  	return p == expunged
   399  }