github.com/puzpuzpuz/xsync/v2@v2.5.2-0.20231021165734-92b8269e19a9/map.go (about)

     1  package xsync
     2  
     3  import (
     4  	"fmt"
     5  	"hash/maphash"
     6  	"math"
     7  	"runtime"
     8  	"strings"
     9  	"sync"
    10  	"sync/atomic"
    11  	"unsafe"
    12  )
    13  
    14  type mapResizeHint int
    15  
    16  const (
    17  	mapGrowHint   mapResizeHint = 0
    18  	mapShrinkHint mapResizeHint = 1
    19  	mapClearHint  mapResizeHint = 2
    20  )
    21  
    22  const (
    23  	// number of entries per bucket; 3 entries lead to size of 64B
    24  	// (one cache line) on 64-bit machines
    25  	entriesPerMapBucket = 3
    26  	// threshold fraction of table occupation to start a table shrinking
    27  	// when deleting the last entry in a bucket chain
    28  	mapShrinkFraction = 128
    29  	// map load factor to trigger a table resize during insertion;
    30  	// a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen
    31  	// key-value pairs (this is a soft limit)
    32  	mapLoadFactor = 0.75
    33  	// minimal table size, i.e. number of buckets; thus, minimal map
    34  	// capacity can be calculated as entriesPerMapBucket*minMapTableLen
    35  	minMapTableLen = 32
    36  	// minimal table capacity
    37  	minMapTableCap = minMapTableLen * entriesPerMapBucket
    38  	// minimum counter stripes to use
    39  	minMapCounterLen = 8
    40  	// maximum counter stripes to use; stands for around 4KB of memory
    41  	maxMapCounterLen = 32
    42  )
    43  
    44  var (
    45  	topHashMask       = uint64((1<<20)-1) << 44
    46  	topHashEntryMasks = [3]uint64{
    47  		topHashMask,
    48  		topHashMask >> 20,
    49  		topHashMask >> 40,
    50  	}
    51  )
    52  
    53  // Map is like a Go map[string]interface{} but is safe for concurrent
    54  // use by multiple goroutines without additional locking or
    55  // coordination. It follows the interface of sync.Map with
    56  // a number of valuable extensions like Compute or Size.
    57  //
    58  // A Map must not be copied after first use.
    59  //
    60  // Map uses a modified version of Cache-Line Hash Table (CLHT)
    61  // data structure: https://github.com/LPD-EPFL/CLHT
    62  //
    63  // CLHT is built around idea to organize the hash table in
    64  // cache-line-sized buckets, so that on all modern CPUs update
    65  // operations complete with at most one cache-line transfer.
    66  // Also, Get operations involve no write to memory, as well as no
    67  // mutexes or any other sort of locks. Due to this design, in all
    68  // considered scenarios Map outperforms sync.Map.
    69  //
    70  // One important difference with sync.Map is that only string keys
    71  // are supported. That's because Golang standard library does not
    72  // expose the built-in hash functions for interface{} values.
    73  type Map struct {
    74  	totalGrowths int64
    75  	totalShrinks int64
    76  	resizing     int64          // resize in progress flag; updated atomically
    77  	resizeMu     sync.Mutex     // only used along with resizeCond
    78  	resizeCond   sync.Cond      // used to wake up resize waiters (concurrent modifications)
    79  	table        unsafe.Pointer // *mapTable
    80  }
    81  
    82  type mapTable struct {
    83  	buckets []bucketPadded
    84  	// striped counter for number of table entries;
    85  	// used to determine if a table shrinking is needed
    86  	// occupies min(buckets_memory/1024, 64KB) of memory
    87  	size []counterStripe
    88  	seed maphash.Seed
    89  }
    90  
    91  type counterStripe struct {
    92  	c int64
    93  	//lint:ignore U1000 prevents false sharing
    94  	pad [cacheLineSize - 8]byte
    95  }
    96  
    97  type bucketPadded struct {
    98  	//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
    99  	pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte
   100  	bucket
   101  }
   102  
   103  type bucket struct {
   104  	next   unsafe.Pointer // *bucketPadded
   105  	keys   [entriesPerMapBucket]unsafe.Pointer
   106  	values [entriesPerMapBucket]unsafe.Pointer
   107  	// topHashMutex is a 2-in-1 value.
   108  	//
   109  	// It contains packed top 20 bits (20 MSBs) of hash codes for keys
   110  	// stored in the bucket:
   111  	// | key 0's top hash | key 1's top hash | key 2's top hash | bitmap for keys | mutex |
   112  	// |      20 bits     |      20 bits     |      20 bits     |     3 bits      | 1 bit |
   113  	//
   114  	// The least significant bit is used for the mutex (TTAS spinlock).
   115  	topHashMutex uint64
   116  }
   117  
   118  type rangeEntry struct {
   119  	key   unsafe.Pointer
   120  	value unsafe.Pointer
   121  }
   122  
   123  // NewMap creates a new Map instance.
   124  func NewMap() *Map {
   125  	return NewMapPresized(minMapTableCap)
   126  }
   127  
   128  // NewMapPresized creates a new Map instance with capacity enough to hold
   129  // sizeHint entries. If sizeHint is zero or negative, the value is ignored.
   130  func NewMapPresized(sizeHint int) *Map {
   131  	m := &Map{}
   132  	m.resizeCond = *sync.NewCond(&m.resizeMu)
   133  	var table *mapTable
   134  	if sizeHint <= minMapTableCap {
   135  		table = newMapTable(minMapTableLen)
   136  	} else {
   137  		tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
   138  		table = newMapTable(int(tableLen))
   139  	}
   140  	atomic.StorePointer(&m.table, unsafe.Pointer(table))
   141  	return m
   142  }
   143  
   144  func newMapTable(tableLen int) *mapTable {
   145  	buckets := make([]bucketPadded, tableLen)
   146  	counterLen := tableLen >> 10
   147  	if counterLen < minMapCounterLen {
   148  		counterLen = minMapCounterLen
   149  	} else if counterLen > maxMapCounterLen {
   150  		counterLen = maxMapCounterLen
   151  	}
   152  	counter := make([]counterStripe, counterLen)
   153  	t := &mapTable{
   154  		buckets: buckets,
   155  		size:    counter,
   156  		seed:    maphash.MakeSeed(),
   157  	}
   158  	return t
   159  }
   160  
   161  // Load returns the value stored in the map for a key, or nil if no
   162  // value is present.
   163  // The ok result indicates whether value was found in the map.
   164  func (m *Map) Load(key string) (value interface{}, ok bool) {
   165  	table := (*mapTable)(atomic.LoadPointer(&m.table))
   166  	hash := hashString(table.seed, key)
   167  	bidx := uint64(len(table.buckets)-1) & hash
   168  	b := &table.buckets[bidx]
   169  	for {
   170  		topHashes := atomic.LoadUint64(&b.topHashMutex)
   171  		for i := 0; i < entriesPerMapBucket; i++ {
   172  			if !topHashMatch(hash, topHashes, i) {
   173  				continue
   174  			}
   175  		atomic_snapshot:
   176  			// Start atomic snapshot.
   177  			vp := atomic.LoadPointer(&b.values[i])
   178  			kp := atomic.LoadPointer(&b.keys[i])
   179  			if kp != nil && vp != nil {
   180  				if key == derefKey(kp) {
   181  					if uintptr(vp) == uintptr(atomic.LoadPointer(&b.values[i])) {
   182  						// Atomic snapshot succeeded.
   183  						return derefValue(vp), true
   184  					}
   185  					// Concurrent update/remove. Go for another spin.
   186  					goto atomic_snapshot
   187  				}
   188  			}
   189  		}
   190  		bptr := atomic.LoadPointer(&b.next)
   191  		if bptr == nil {
   192  			return
   193  		}
   194  		b = (*bucketPadded)(bptr)
   195  	}
   196  }
   197  
   198  // Store sets the value for a key.
   199  func (m *Map) Store(key string, value interface{}) {
   200  	m.doCompute(
   201  		key,
   202  		func(interface{}, bool) (interface{}, bool) {
   203  			return value, false
   204  		},
   205  		false,
   206  		false,
   207  	)
   208  }
   209  
   210  // LoadOrStore returns the existing value for the key if present.
   211  // Otherwise, it stores and returns the given value.
   212  // The loaded result is true if the value was loaded, false if stored.
   213  func (m *Map) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) {
   214  	return m.doCompute(
   215  		key,
   216  		func(interface{}, bool) (interface{}, bool) {
   217  			return value, false
   218  		},
   219  		true,
   220  		false,
   221  	)
   222  }
   223  
   224  // LoadAndStore returns the existing value for the key if present,
   225  // while setting the new value for the key.
   226  // It stores the new value and returns the existing one, if present.
   227  // The loaded result is true if the existing value was loaded,
   228  // false otherwise.
   229  func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, loaded bool) {
   230  	return m.doCompute(
   231  		key,
   232  		func(interface{}, bool) (interface{}, bool) {
   233  			return value, false
   234  		},
   235  		false,
   236  		false,
   237  	)
   238  }
   239  
   240  // LoadOrCompute returns the existing value for the key if present.
   241  // Otherwise, it computes the value using the provided function and
   242  // returns the computed value. The loaded result is true if the value
   243  // was loaded, false if stored.
   244  func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
   245  	return m.doCompute(
   246  		key,
   247  		func(interface{}, bool) (interface{}, bool) {
   248  			return valueFn(), false
   249  		},
   250  		true,
   251  		false,
   252  	)
   253  }
   254  
   255  // Compute either sets the computed new value for the key or deletes
   256  // the value for the key. When the delete result of the valueFn function
   257  // is set to true, the value will be deleted, if it exists. When delete
   258  // is set to false, the value is updated to the newValue.
   259  // The ok result indicates whether value was computed and stored, thus, is
   260  // present in the map. The actual result contains the new value in cases where
   261  // the value was computed and stored. See the example for a few use cases.
   262  func (m *Map) Compute(
   263  	key string,
   264  	valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool),
   265  ) (actual interface{}, ok bool) {
   266  	return m.doCompute(key, valueFn, false, true)
   267  }
   268  
   269  // LoadAndDelete deletes the value for a key, returning the previous
   270  // value if any. The loaded result reports whether the key was
   271  // present.
   272  func (m *Map) LoadAndDelete(key string) (value interface{}, loaded bool) {
   273  	return m.doCompute(
   274  		key,
   275  		func(value interface{}, loaded bool) (interface{}, bool) {
   276  			return value, true
   277  		},
   278  		false,
   279  		false,
   280  	)
   281  }
   282  
   283  // Delete deletes the value for a key.
   284  func (m *Map) Delete(key string) {
   285  	m.doCompute(
   286  		key,
   287  		func(value interface{}, loaded bool) (interface{}, bool) {
   288  			return value, true
   289  		},
   290  		false,
   291  		false,
   292  	)
   293  }
   294  
   295  func (m *Map) doCompute(
   296  	key string,
   297  	valueFn func(oldValue interface{}, loaded bool) (interface{}, bool),
   298  	loadIfExists, computeOnly bool,
   299  ) (interface{}, bool) {
   300  	// Read-only path.
   301  	if loadIfExists {
   302  		if v, ok := m.Load(key); ok {
   303  			return v, !computeOnly
   304  		}
   305  	}
   306  	// Write path.
   307  	for {
   308  	compute_attempt:
   309  		var (
   310  			emptyb       *bucketPadded
   311  			emptyidx     int
   312  			hintNonEmpty int
   313  		)
   314  		table := (*mapTable)(atomic.LoadPointer(&m.table))
   315  		tableLen := len(table.buckets)
   316  		hash := hashString(table.seed, key)
   317  		bidx := uint64(len(table.buckets)-1) & hash
   318  		rootb := &table.buckets[bidx]
   319  		lockBucket(&rootb.topHashMutex)
   320  		if m.newerTableExists(table) {
   321  			// Someone resized the table. Go for another attempt.
   322  			unlockBucket(&rootb.topHashMutex)
   323  			goto compute_attempt
   324  		}
   325  		if m.resizeInProgress() {
   326  			// Resize is in progress. Wait, then go for another attempt.
   327  			unlockBucket(&rootb.topHashMutex)
   328  			m.waitForResize()
   329  			goto compute_attempt
   330  		}
   331  		b := rootb
   332  		for {
   333  			topHashes := atomic.LoadUint64(&b.topHashMutex)
   334  			for i := 0; i < entriesPerMapBucket; i++ {
   335  				if b.keys[i] == nil {
   336  					if emptyb == nil {
   337  						emptyb = b
   338  						emptyidx = i
   339  					}
   340  					continue
   341  				}
   342  				if !topHashMatch(hash, topHashes, i) {
   343  					hintNonEmpty++
   344  					continue
   345  				}
   346  				if key == derefKey(b.keys[i]) {
   347  					vp := b.values[i]
   348  					if loadIfExists {
   349  						unlockBucket(&rootb.topHashMutex)
   350  						return derefValue(vp), !computeOnly
   351  					}
   352  					// In-place update/delete.
   353  					// We get a copy of the value via an interface{} on each call,
   354  					// thus the live value pointers are unique. Otherwise atomic
   355  					// snapshot won't be correct in case of multiple Store calls
   356  					// using the same value.
   357  					oldValue := derefValue(vp)
   358  					newValue, del := valueFn(oldValue, true)
   359  					if del {
   360  						// Deletion.
   361  						// First we update the value, then the key.
   362  						// This is important for atomic snapshot states.
   363  						atomic.StoreUint64(&b.topHashMutex, eraseTopHash(topHashes, i))
   364  						atomic.StorePointer(&b.values[i], nil)
   365  						atomic.StorePointer(&b.keys[i], nil)
   366  						leftEmpty := false
   367  						if hintNonEmpty == 0 {
   368  							leftEmpty = isEmptyBucket(b)
   369  						}
   370  						unlockBucket(&rootb.topHashMutex)
   371  						table.addSize(bidx, -1)
   372  						// Might need to shrink the table.
   373  						if leftEmpty {
   374  							m.resize(table, mapShrinkHint)
   375  						}
   376  						return oldValue, !computeOnly
   377  					}
   378  					nvp := unsafe.Pointer(&newValue)
   379  					if assertionsEnabled && vp == nvp {
   380  						panic("non-unique value pointer")
   381  					}
   382  					atomic.StorePointer(&b.values[i], nvp)
   383  					unlockBucket(&rootb.topHashMutex)
   384  					if computeOnly {
   385  						// Compute expects the new value to be returned.
   386  						return newValue, true
   387  					}
   388  					// LoadAndStore expects the old value to be returned.
   389  					return oldValue, true
   390  				}
   391  				hintNonEmpty++
   392  			}
   393  			if b.next == nil {
   394  				if emptyb != nil {
   395  					// Insertion into an existing bucket.
   396  					var zeroedV interface{}
   397  					newValue, del := valueFn(zeroedV, false)
   398  					if del {
   399  						unlockBucket(&rootb.topHashMutex)
   400  						return zeroedV, false
   401  					}
   402  					// First we update the value, then the key.
   403  					// This is important for atomic snapshot states.
   404  					topHashes = atomic.LoadUint64(&emptyb.topHashMutex)
   405  					atomic.StoreUint64(&emptyb.topHashMutex, storeTopHash(hash, topHashes, emptyidx))
   406  					atomic.StorePointer(&emptyb.values[emptyidx], unsafe.Pointer(&newValue))
   407  					atomic.StorePointer(&emptyb.keys[emptyidx], unsafe.Pointer(&key))
   408  					unlockBucket(&rootb.topHashMutex)
   409  					table.addSize(bidx, 1)
   410  					return newValue, computeOnly
   411  				}
   412  				growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
   413  				if table.sumSize() > int64(growThreshold) {
   414  					// Need to grow the table. Then go for another attempt.
   415  					unlockBucket(&rootb.topHashMutex)
   416  					m.resize(table, mapGrowHint)
   417  					goto compute_attempt
   418  				}
   419  				// Insertion into a new bucket.
   420  				var zeroedV interface{}
   421  				newValue, del := valueFn(zeroedV, false)
   422  				if del {
   423  					unlockBucket(&rootb.topHashMutex)
   424  					return newValue, false
   425  				}
   426  				// Create and append the bucket.
   427  				newb := new(bucketPadded)
   428  				newb.keys[0] = unsafe.Pointer(&key)
   429  				newb.values[0] = unsafe.Pointer(&newValue)
   430  				newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
   431  				atomic.StorePointer(&b.next, unsafe.Pointer(newb))
   432  				unlockBucket(&rootb.topHashMutex)
   433  				table.addSize(bidx, 1)
   434  				return newValue, computeOnly
   435  			}
   436  			b = (*bucketPadded)(b.next)
   437  		}
   438  	}
   439  }
   440  
   441  func (m *Map) newerTableExists(table *mapTable) bool {
   442  	curTablePtr := atomic.LoadPointer(&m.table)
   443  	return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
   444  }
   445  
   446  func (m *Map) resizeInProgress() bool {
   447  	return atomic.LoadInt64(&m.resizing) == 1
   448  }
   449  
   450  func (m *Map) waitForResize() {
   451  	m.resizeMu.Lock()
   452  	for m.resizeInProgress() {
   453  		m.resizeCond.Wait()
   454  	}
   455  	m.resizeMu.Unlock()
   456  }
   457  
   458  func (m *Map) resize(table *mapTable, hint mapResizeHint) {
   459  	var shrinkThreshold int64
   460  	tableLen := len(table.buckets)
   461  	// Fast path for shrink attempts.
   462  	if hint == mapShrinkHint {
   463  		shrinkThreshold = int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
   464  		if tableLen == minMapTableLen || table.sumSize() > shrinkThreshold {
   465  			return
   466  		}
   467  	}
   468  	// Slow path.
   469  	if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
   470  		// Someone else started resize. Wait for it to finish.
   471  		m.waitForResize()
   472  		return
   473  	}
   474  	var newTable *mapTable
   475  	switch hint {
   476  	case mapGrowHint:
   477  		// Grow the table with factor of 2.
   478  		atomic.AddInt64(&m.totalGrowths, 1)
   479  		newTable = newMapTable(tableLen << 1)
   480  	case mapShrinkHint:
   481  		if table.sumSize() <= shrinkThreshold {
   482  			// Shrink the table with factor of 2.
   483  			atomic.AddInt64(&m.totalShrinks, 1)
   484  			newTable = newMapTable(tableLen >> 1)
   485  		} else {
   486  			// No need to shrink. Wake up all waiters and give up.
   487  			m.resizeMu.Lock()
   488  			atomic.StoreInt64(&m.resizing, 0)
   489  			m.resizeCond.Broadcast()
   490  			m.resizeMu.Unlock()
   491  			return
   492  		}
   493  	case mapClearHint:
   494  		newTable = newMapTable(minMapTableLen)
   495  	default:
   496  		panic(fmt.Sprintf("unexpected resize hint: %d", hint))
   497  	}
   498  	// Copy the data only if we're not clearing the map.
   499  	if hint != mapClearHint {
   500  		for i := 0; i < tableLen; i++ {
   501  			copied := copyBucket(&table.buckets[i], newTable)
   502  			newTable.addSizePlain(uint64(i), copied)
   503  		}
   504  	}
   505  	// Publish the new table and wake up all waiters.
   506  	atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
   507  	m.resizeMu.Lock()
   508  	atomic.StoreInt64(&m.resizing, 0)
   509  	m.resizeCond.Broadcast()
   510  	m.resizeMu.Unlock()
   511  }
   512  
   513  func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) {
   514  	rootb := b
   515  	lockBucket(&rootb.topHashMutex)
   516  	for {
   517  		for i := 0; i < entriesPerMapBucket; i++ {
   518  			if b.keys[i] != nil {
   519  				k := derefKey(b.keys[i])
   520  				hash := hashString(destTable.seed, k)
   521  				bidx := uint64(len(destTable.buckets)-1) & hash
   522  				destb := &destTable.buckets[bidx]
   523  				appendToBucket(hash, b.keys[i], b.values[i], destb)
   524  				copied++
   525  			}
   526  		}
   527  		if b.next == nil {
   528  			unlockBucket(&rootb.topHashMutex)
   529  			return
   530  		}
   531  		b = (*bucketPadded)(b.next)
   532  	}
   533  }
   534  
   535  func appendToBucket(hash uint64, keyPtr, valPtr unsafe.Pointer, b *bucketPadded) {
   536  	for {
   537  		for i := 0; i < entriesPerMapBucket; i++ {
   538  			if b.keys[i] == nil {
   539  				b.keys[i] = keyPtr
   540  				b.values[i] = valPtr
   541  				b.topHashMutex = storeTopHash(hash, b.topHashMutex, i)
   542  				return
   543  			}
   544  		}
   545  		if b.next == nil {
   546  			newb := new(bucketPadded)
   547  			newb.keys[0] = keyPtr
   548  			newb.values[0] = valPtr
   549  			newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
   550  			b.next = unsafe.Pointer(newb)
   551  			return
   552  		}
   553  		b = (*bucketPadded)(b.next)
   554  	}
   555  }
   556  
   557  func isEmptyBucket(rootb *bucketPadded) bool {
   558  	b := rootb
   559  	for {
   560  		for i := 0; i < entriesPerMapBucket; i++ {
   561  			if b.keys[i] != nil {
   562  				return false
   563  			}
   564  		}
   565  		if b.next == nil {
   566  			return true
   567  		}
   568  		b = (*bucketPadded)(b.next)
   569  	}
   570  }
   571  
   572  // Range calls f sequentially for each key and value present in the
   573  // map. If f returns false, range stops the iteration.
   574  //
   575  // Range does not necessarily correspond to any consistent snapshot
   576  // of the Map's contents: no key will be visited more than once, but
   577  // if the value for any key is stored or deleted concurrently, Range
   578  // may reflect any mapping for that key from any point during the
   579  // Range call.
   580  //
   581  // It is safe to modify the map while iterating it. However, the
   582  // concurrent modification rule apply, i.e. the changes may be not
   583  // reflected in the subsequently iterated entries.
   584  func (m *Map) Range(f func(key string, value interface{}) bool) {
   585  	var zeroEntry rangeEntry
   586  	// Pre-allocate array big enough to fit entries for most hash tables.
   587  	bentries := make([]rangeEntry, 0, 16*entriesPerMapBucket)
   588  	tablep := atomic.LoadPointer(&m.table)
   589  	table := *(*mapTable)(tablep)
   590  	for i := range table.buckets {
   591  		rootb := &table.buckets[i]
   592  		b := rootb
   593  		// Prevent concurrent modifications and copy all entries into
   594  		// the intermediate slice.
   595  		lockBucket(&rootb.topHashMutex)
   596  		for {
   597  			for i := 0; i < entriesPerMapBucket; i++ {
   598  				if b.keys[i] != nil {
   599  					bentries = append(bentries, rangeEntry{
   600  						key:   b.keys[i],
   601  						value: b.values[i],
   602  					})
   603  				}
   604  			}
   605  			if b.next == nil {
   606  				unlockBucket(&rootb.topHashMutex)
   607  				break
   608  			}
   609  			b = (*bucketPadded)(b.next)
   610  		}
   611  		// Call the function for all copied entries.
   612  		for j := range bentries {
   613  			k := derefKey(bentries[j].key)
   614  			v := derefValue(bentries[j].value)
   615  			if !f(k, v) {
   616  				return
   617  			}
   618  			// Remove the reference to avoid preventing the copied
   619  			// entries from being GCed until this method finishes.
   620  			bentries[j] = zeroEntry
   621  		}
   622  		bentries = bentries[:0]
   623  	}
   624  }
   625  
   626  // Clear deletes all keys and values currently stored in the map.
   627  func (m *Map) Clear() {
   628  	table := (*mapTable)(atomic.LoadPointer(&m.table))
   629  	m.resize(table, mapClearHint)
   630  }
   631  
   632  // Size returns current size of the map.
   633  func (m *Map) Size() int {
   634  	table := (*mapTable)(atomic.LoadPointer(&m.table))
   635  	return int(table.sumSize())
   636  }
   637  
   638  func derefKey(keyPtr unsafe.Pointer) string {
   639  	return *(*string)(keyPtr)
   640  }
   641  
   642  func derefValue(valuePtr unsafe.Pointer) interface{} {
   643  	return *(*interface{})(valuePtr)
   644  }
   645  
   646  func lockBucket(mu *uint64) {
   647  	for {
   648  		var v uint64
   649  		for {
   650  			v = atomic.LoadUint64(mu)
   651  			if v&1 != 1 {
   652  				break
   653  			}
   654  			runtime.Gosched()
   655  		}
   656  		if atomic.CompareAndSwapUint64(mu, v, v|1) {
   657  			return
   658  		}
   659  		runtime.Gosched()
   660  	}
   661  }
   662  
   663  func unlockBucket(mu *uint64) {
   664  	v := atomic.LoadUint64(mu)
   665  	atomic.StoreUint64(mu, v&^1)
   666  }
   667  
   668  func topHashMatch(hash, topHashes uint64, idx int) bool {
   669  	if topHashes&(1<<(idx+1)) == 0 {
   670  		// Entry is not present.
   671  		return false
   672  	}
   673  	hash = hash & topHashMask
   674  	topHashes = (topHashes & topHashEntryMasks[idx]) << (20 * idx)
   675  	return hash == topHashes
   676  }
   677  
   678  func storeTopHash(hash, topHashes uint64, idx int) uint64 {
   679  	// Zero out top hash at idx.
   680  	topHashes = topHashes &^ topHashEntryMasks[idx]
   681  	// Chop top 20 MSBs of the given hash and position them at idx.
   682  	hash = (hash & topHashMask) >> (20 * idx)
   683  	// Store the MSBs.
   684  	topHashes = topHashes | hash
   685  	// Mark the entry as present.
   686  	return topHashes | (1 << (idx + 1))
   687  }
   688  
   689  func eraseTopHash(topHashes uint64, idx int) uint64 {
   690  	return topHashes &^ (1 << (idx + 1))
   691  }
   692  
   693  func (table *mapTable) addSize(bucketIdx uint64, delta int) {
   694  	cidx := uint64(len(table.size)-1) & bucketIdx
   695  	atomic.AddInt64(&table.size[cidx].c, int64(delta))
   696  }
   697  
   698  func (table *mapTable) addSizePlain(bucketIdx uint64, delta int) {
   699  	cidx := uint64(len(table.size)-1) & bucketIdx
   700  	table.size[cidx].c += int64(delta)
   701  }
   702  
   703  func (table *mapTable) sumSize() int64 {
   704  	sum := int64(0)
   705  	for i := range table.size {
   706  		sum += atomic.LoadInt64(&table.size[i].c)
   707  	}
   708  	return sum
   709  }
   710  
   711  type mapStats struct {
   712  	RootBuckets  int
   713  	TotalBuckets int
   714  	EmptyBuckets int
   715  	Capacity     int
   716  	Size         int // calculated number of entries
   717  	Counter      int // number of entries according to table counter
   718  	CounterLen   int // number of counter stripes
   719  	MinEntries   int // min entries per chain of buckets
   720  	MaxEntries   int // max entries per chain of buckets
   721  	TotalGrowths int64
   722  	TotalShrinks int64
   723  }
   724  
   725  func (s *mapStats) ToString() string {
   726  	var sb strings.Builder
   727  	sb.WriteString("\n---\n")
   728  	sb.WriteString(fmt.Sprintf("RootBuckets:  %d\n", s.RootBuckets))
   729  	sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets))
   730  	sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets))
   731  	sb.WriteString(fmt.Sprintf("Capacity:     %d\n", s.Capacity))
   732  	sb.WriteString(fmt.Sprintf("Size:         %d\n", s.Size))
   733  	sb.WriteString(fmt.Sprintf("Counter:      %d\n", s.Counter))
   734  	sb.WriteString(fmt.Sprintf("CounterLen:   %d\n", s.CounterLen))
   735  	sb.WriteString(fmt.Sprintf("MinEntries:   %d\n", s.MinEntries))
   736  	sb.WriteString(fmt.Sprintf("MaxEntries:   %d\n", s.MaxEntries))
   737  	sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths))
   738  	sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks))
   739  	sb.WriteString("---\n")
   740  	return sb.String()
   741  }
   742  
   743  // O(N) operation; use for debug purposes only
   744  func (m *Map) stats() mapStats {
   745  	stats := mapStats{
   746  		TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
   747  		TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
   748  		MinEntries:   math.MaxInt32,
   749  	}
   750  	table := (*mapTable)(atomic.LoadPointer(&m.table))
   751  	stats.RootBuckets = len(table.buckets)
   752  	stats.Counter = int(table.sumSize())
   753  	stats.CounterLen = len(table.size)
   754  	for i := range table.buckets {
   755  		nentries := 0
   756  		b := &table.buckets[i]
   757  		stats.TotalBuckets++
   758  		for {
   759  			nentriesLocal := 0
   760  			stats.Capacity += entriesPerMapBucket
   761  			for i := 0; i < entriesPerMapBucket; i++ {
   762  				if atomic.LoadPointer(&b.keys[i]) != nil {
   763  					stats.Size++
   764  					nentriesLocal++
   765  				}
   766  			}
   767  			nentries += nentriesLocal
   768  			if nentriesLocal == 0 {
   769  				stats.EmptyBuckets++
   770  			}
   771  			if b.next == nil {
   772  				break
   773  			}
   774  			b = (*bucketPadded)(b.next)
   775  			stats.TotalBuckets++
   776  		}
   777  		if nentries < stats.MinEntries {
   778  			stats.MinEntries = nentries
   779  		}
   780  		if nentries > stats.MaxEntries {
   781  			stats.MaxEntries = nentries
   782  		}
   783  	}
   784  	return stats
   785  }