github.com/puzpuzpuz/xsync/v2@v2.5.2-0.20231021165734-92b8269e19a9/mapof.go (about)

     1  //go:build go1.18
     2  // +build go1.18
     3  
     4  package xsync
     5  
     6  import (
     7  	"fmt"
     8  	"hash/maphash"
     9  	"math"
    10  	"sync"
    11  	"sync/atomic"
    12  	"unsafe"
    13  )
    14  
    15  // MapOf is like a Go map[string]V but is safe for concurrent
    16  // use by multiple goroutines without additional locking or
    17  // coordination. It follows the interface of sync.Map with
    18  // a number of valuable extensions like Compute or Size.
    19  //
    20  // A MapOf must not be copied after first use.
    21  //
    22  // MapOf uses a modified version of Cache-Line Hash Table (CLHT)
    23  // data structure: https://github.com/LPD-EPFL/CLHT
    24  //
    25  // CLHT is built around idea to organize the hash table in
    26  // cache-line-sized buckets, so that on all modern CPUs update
    27  // operations complete with at most one cache-line transfer.
    28  // Also, Get operations involve no write to memory, as well as no
    29  // mutexes or any other sort of locks. Due to this design, in all
    30  // considered scenarios MapOf outperforms sync.Map.
    31  type MapOf[K comparable, V any] struct {
    32  	totalGrowths int64
    33  	totalShrinks int64
    34  	resizing     int64          // resize in progress flag; updated atomically
    35  	resizeMu     sync.Mutex     // only used along with resizeCond
    36  	resizeCond   sync.Cond      // used to wake up resize waiters (concurrent modifications)
    37  	table        unsafe.Pointer // *mapOfTable
    38  	hasher       func(maphash.Seed, K) uint64
    39  }
    40  
    41  type mapOfTable[K comparable, V any] struct {
    42  	buckets []bucketOfPadded
    43  	// striped counter for number of table entries;
    44  	// used to determine if a table shrinking is needed
    45  	// occupies min(buckets_memory/1024, 64KB) of memory
    46  	size []counterStripe
    47  	seed maphash.Seed
    48  }
    49  
    50  // bucketOfPadded is a CL-sized map bucket holding up to
    51  // entriesPerMapBucket entries.
    52  type bucketOfPadded struct {
    53  	//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
    54  	pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte
    55  	bucketOf
    56  }
    57  
    58  type bucketOf struct {
    59  	hashes  [entriesPerMapBucket]uint64
    60  	entries [entriesPerMapBucket]unsafe.Pointer // *entryOf
    61  	next    unsafe.Pointer                      // *bucketOfPadded
    62  	mu      sync.Mutex
    63  }
    64  
    65  // entryOf is an immutable map entry.
    66  type entryOf[K comparable, V any] struct {
    67  	key   K
    68  	value V
    69  }
    70  
    71  // NewMapOf creates a new MapOf instance with string keys.
    72  func NewMapOf[V any]() *MapOf[string, V] {
    73  	return NewTypedMapOfPresized[string, V](hashString, minMapTableCap)
    74  }
    75  
    76  // NewMapOfPresized creates a new MapOf instance with string keys and capacity
    77  // enough to hold sizeHint entries. If sizeHint is zero or negative, the value
    78  // is ignored.
    79  func NewMapOfPresized[V any](sizeHint int) *MapOf[string, V] {
    80  	return NewTypedMapOfPresized[string, V](hashString, sizeHint)
    81  }
    82  
    83  // IntegerConstraint represents any integer type.
    84  type IntegerConstraint interface {
    85  	// Recreation of golang.org/x/exp/constraints.Integer to avoid taking a dependency on an
    86  	// experimental package.
    87  	~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
    88  }
    89  
    90  // NewIntegerMapOf creates a new MapOf instance with integer typed keys.
    91  func NewIntegerMapOf[K IntegerConstraint, V any]() *MapOf[K, V] {
    92  	return NewTypedMapOfPresized[K, V](hashUint64[K], minMapTableCap)
    93  }
    94  
    95  // NewIntegerMapOfPresized creates a new MapOf instance with integer typed keys
    96  // and capacity enough to hold sizeHint entries. If sizeHint is zero or
    97  // negative, the value is ignored.
    98  func NewIntegerMapOfPresized[K IntegerConstraint, V any](sizeHint int) *MapOf[K, V] {
    99  	return NewTypedMapOfPresized[K, V](hashUint64[K], sizeHint)
   100  }
   101  
   102  // NewTypedMapOf creates a new MapOf instance with arbitrarily typed keys.
   103  //
   104  // Keys are hashed to uint64 using the hasher function. It is strongly
   105  // recommended to use the hash/maphash package to implement hasher. See the
   106  // example for how to do that.
   107  func NewTypedMapOf[K comparable, V any](hasher func(maphash.Seed, K) uint64) *MapOf[K, V] {
   108  	return NewTypedMapOfPresized[K, V](hasher, minMapTableCap)
   109  }
   110  
   111  // NewTypedMapOfPresized creates a new MapOf instance with arbitrarily typed
   112  // keys and capacity enough to hold sizeHint entries. If sizeHint is zero or
   113  // negative, the value is ignored.
   114  //
   115  // Keys are hashed to uint64 using the hasher function. It is strongly
   116  // recommended to use the hash/maphash package to implement hasher. See the
   117  // example for how to do that.
   118  func NewTypedMapOfPresized[K comparable, V any](hasher func(maphash.Seed, K) uint64, sizeHint int) *MapOf[K, V] {
   119  	m := &MapOf[K, V]{}
   120  	m.resizeCond = *sync.NewCond(&m.resizeMu)
   121  	m.hasher = hasher
   122  	var table *mapOfTable[K, V]
   123  	if sizeHint <= minMapTableCap {
   124  		table = newMapOfTable[K, V](minMapTableLen)
   125  	} else {
   126  		tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
   127  		table = newMapOfTable[K, V](int(tableLen))
   128  	}
   129  	atomic.StorePointer(&m.table, unsafe.Pointer(table))
   130  	return m
   131  }
   132  
   133  // NewUniversalMapOf creates a new MapOf instance with arbitrarily typed comparable keys.
   134  // The only limitation is that key type should not contain interfaces inside.
   135  func NewUniversalMapOf[K comparable, V any]() *MapOf[K, V] {
   136  	return NewTypedMapOfPresized[K, V](makeHashFunc[K](), minMapTableCap)
   137  }
   138  
   139  // NewUniversalMapOfPresized creates a new MapOf instance with arbitrarily typed
   140  // comparable keys and capacity enough to hold sizeHint entries. If sizeHint is zero or
   141  // negative, the value is ignored.
   142  // The only limitation is that key type should not contain interfaces inside.
   143  func NewUniversalMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] {
   144  	return NewTypedMapOfPresized[K, V](makeHashFunc[K](), sizeHint)
   145  }
   146  
   147  func newMapOfTable[K comparable, V any](tableLen int) *mapOfTable[K, V] {
   148  	buckets := make([]bucketOfPadded, tableLen)
   149  	counterLen := tableLen >> 10
   150  	if counterLen < minMapCounterLen {
   151  		counterLen = minMapCounterLen
   152  	} else if counterLen > maxMapCounterLen {
   153  		counterLen = maxMapCounterLen
   154  	}
   155  	counter := make([]counterStripe, counterLen)
   156  	t := &mapOfTable[K, V]{
   157  		buckets: buckets,
   158  		size:    counter,
   159  		seed:    maphash.MakeSeed(),
   160  	}
   161  	return t
   162  }
   163  
   164  // Load returns the value stored in the map for a key, or nil if no
   165  // value is present.
   166  // The ok result indicates whether value was found in the map.
   167  func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
   168  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   169  	hash := shiftHash(m.hasher(table.seed, key))
   170  	bidx := uint64(len(table.buckets)-1) & hash
   171  	b := &table.buckets[bidx]
   172  	for {
   173  		for i := 0; i < entriesPerMapBucket; i++ {
   174  			// We treat the hash code only as a hint, so there is no
   175  			// need to get an atomic snapshot.
   176  			h := atomic.LoadUint64(&b.hashes[i])
   177  			if h == uint64(0) || h != hash {
   178  				continue
   179  			}
   180  			eptr := atomic.LoadPointer(&b.entries[i])
   181  			if eptr == nil {
   182  				continue
   183  			}
   184  			e := (*entryOf[K, V])(eptr)
   185  			if e.key == key {
   186  				return e.value, true
   187  			}
   188  		}
   189  		bptr := atomic.LoadPointer(&b.next)
   190  		if bptr == nil {
   191  			return
   192  		}
   193  		b = (*bucketOfPadded)(bptr)
   194  	}
   195  }
   196  
   197  // Store sets the value for a key.
   198  func (m *MapOf[K, V]) Store(key K, value V) {
   199  	m.doCompute(
   200  		key,
   201  		func(V, bool) (V, bool) {
   202  			return value, false
   203  		},
   204  		false,
   205  		false,
   206  	)
   207  }
   208  
   209  // LoadOrStore returns the existing value for the key if present.
   210  // Otherwise, it stores and returns the given value.
   211  // The loaded result is true if the value was loaded, false if stored.
   212  func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
   213  	return m.doCompute(
   214  		key,
   215  		func(V, bool) (V, bool) {
   216  			return value, false
   217  		},
   218  		true,
   219  		false,
   220  	)
   221  }
   222  
   223  // LoadAndStore returns the existing value for the key if present,
   224  // while setting the new value for the key.
   225  // It stores the new value and returns the existing one, if present.
   226  // The loaded result is true if the existing value was loaded,
   227  // false otherwise.
   228  func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
   229  	return m.doCompute(
   230  		key,
   231  		func(V, bool) (V, bool) {
   232  			return value, false
   233  		},
   234  		false,
   235  		false,
   236  	)
   237  }
   238  
   239  // LoadOrCompute returns the existing value for the key if present.
   240  // Otherwise, it computes the value using the provided function and
   241  // returns the computed value. The loaded result is true if the value
   242  // was loaded, false if stored.
   243  func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) {
   244  	return m.doCompute(
   245  		key,
   246  		func(V, bool) (V, bool) {
   247  			return valueFn(), false
   248  		},
   249  		true,
   250  		false,
   251  	)
   252  }
   253  
   254  // Compute either sets the computed new value for the key or deletes
   255  // the value for the key. When the delete result of the valueFn function
   256  // is set to true, the value will be deleted, if it exists. When delete
   257  // is set to false, the value is updated to the newValue.
   258  // The ok result indicates whether value was computed and stored, thus, is
   259  // present in the map. The actual result contains the new value in cases where
   260  // the value was computed and stored. See the example for a few use cases.
   261  func (m *MapOf[K, V]) Compute(
   262  	key K,
   263  	valueFn func(oldValue V, loaded bool) (newValue V, delete bool),
   264  ) (actual V, ok bool) {
   265  	return m.doCompute(key, valueFn, false, true)
   266  }
   267  
   268  // LoadAndDelete deletes the value for a key, returning the previous
   269  // value if any. The loaded result reports whether the key was
   270  // present.
   271  func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
   272  	return m.doCompute(
   273  		key,
   274  		func(value V, loaded bool) (V, bool) {
   275  			return value, true
   276  		},
   277  		false,
   278  		false,
   279  	)
   280  }
   281  
   282  // Delete deletes the value for a key.
   283  func (m *MapOf[K, V]) Delete(key K) {
   284  	m.doCompute(
   285  		key,
   286  		func(value V, loaded bool) (V, bool) {
   287  			return value, true
   288  		},
   289  		false,
   290  		false,
   291  	)
   292  }
   293  
   294  func (m *MapOf[K, V]) doCompute(
   295  	key K,
   296  	valueFn func(oldValue V, loaded bool) (V, bool),
   297  	loadIfExists, computeOnly bool,
   298  ) (V, bool) {
   299  	// Read-only path.
   300  	if loadIfExists {
   301  		if v, ok := m.Load(key); ok {
   302  			return v, !computeOnly
   303  		}
   304  	}
   305  	// Write path.
   306  	for {
   307  	compute_attempt:
   308  		var (
   309  			emptyb       *bucketOfPadded
   310  			emptyidx     int
   311  			hintNonEmpty int
   312  		)
   313  		table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   314  		tableLen := len(table.buckets)
   315  		hash := shiftHash(m.hasher(table.seed, key))
   316  		bidx := uint64(len(table.buckets)-1) & hash
   317  		rootb := &table.buckets[bidx]
   318  		rootb.mu.Lock()
   319  		if m.newerTableExists(table) {
   320  			// Someone resized the table. Go for another attempt.
   321  			rootb.mu.Unlock()
   322  			goto compute_attempt
   323  		}
   324  		if m.resizeInProgress() {
   325  			// Resize is in progress. Wait, then go for another attempt.
   326  			rootb.mu.Unlock()
   327  			m.waitForResize()
   328  			goto compute_attempt
   329  		}
   330  		b := rootb
   331  		for {
   332  			for i := 0; i < entriesPerMapBucket; i++ {
   333  				h := atomic.LoadUint64(&b.hashes[i])
   334  				if h == uint64(0) {
   335  					if emptyb == nil {
   336  						emptyb = b
   337  						emptyidx = i
   338  					}
   339  					continue
   340  				}
   341  				if h != hash {
   342  					hintNonEmpty++
   343  					continue
   344  				}
   345  				e := (*entryOf[K, V])(b.entries[i])
   346  				if e.key == key {
   347  					if loadIfExists {
   348  						rootb.mu.Unlock()
   349  						return e.value, !computeOnly
   350  					}
   351  					// In-place update/delete.
   352  					// We get a copy of the value via an interface{} on each call,
   353  					// thus the live value pointers are unique. Otherwise atomic
   354  					// snapshot won't be correct in case of multiple Store calls
   355  					// using the same value.
   356  					oldv := e.value
   357  					newv, del := valueFn(oldv, true)
   358  					if del {
   359  						// Deletion.
   360  						// First we update the hash, then the entry.
   361  						atomic.StoreUint64(&b.hashes[i], uint64(0))
   362  						atomic.StorePointer(&b.entries[i], nil)
   363  						leftEmpty := false
   364  						if hintNonEmpty == 0 {
   365  							leftEmpty = isEmptyBucketOf(b)
   366  						}
   367  						rootb.mu.Unlock()
   368  						table.addSize(bidx, -1)
   369  						// Might need to shrink the table.
   370  						if leftEmpty {
   371  							m.resize(table, mapShrinkHint)
   372  						}
   373  						return oldv, !computeOnly
   374  					}
   375  					newe := new(entryOf[K, V])
   376  					newe.key = key
   377  					newe.value = newv
   378  					atomic.StorePointer(&b.entries[i], unsafe.Pointer(newe))
   379  					rootb.mu.Unlock()
   380  					if computeOnly {
   381  						// Compute expects the new value to be returned.
   382  						return newv, true
   383  					}
   384  					// LoadAndStore expects the old value to be returned.
   385  					return oldv, true
   386  				}
   387  				hintNonEmpty++
   388  			}
   389  			if b.next == nil {
   390  				if emptyb != nil {
   391  					// Insertion into an existing bucket.
   392  					var zeroedV V
   393  					newValue, del := valueFn(zeroedV, false)
   394  					if del {
   395  						rootb.mu.Unlock()
   396  						return zeroedV, false
   397  					}
   398  					newe := new(entryOf[K, V])
   399  					newe.key = key
   400  					newe.value = newValue
   401  					// First we update the hash, then the entry.
   402  					atomic.StoreUint64(&emptyb.hashes[emptyidx], hash)
   403  					atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe))
   404  					rootb.mu.Unlock()
   405  					table.addSize(bidx, 1)
   406  					return newValue, computeOnly
   407  				}
   408  				growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
   409  				if table.sumSize() > int64(growThreshold) {
   410  					// Need to grow the table. Then go for another attempt.
   411  					rootb.mu.Unlock()
   412  					m.resize(table, mapGrowHint)
   413  					goto compute_attempt
   414  				}
   415  				// Insertion into a new bucket.
   416  				var zeroedV V
   417  				newValue, del := valueFn(zeroedV, false)
   418  				if del {
   419  					rootb.mu.Unlock()
   420  					return newValue, false
   421  				}
   422  				// Create and append the bucket.
   423  				newb := new(bucketOfPadded)
   424  				newb.hashes[0] = hash
   425  				newe := new(entryOf[K, V])
   426  				newe.key = key
   427  				newe.value = newValue
   428  				newb.entries[0] = unsafe.Pointer(newe)
   429  				atomic.StorePointer(&b.next, unsafe.Pointer(newb))
   430  				rootb.mu.Unlock()
   431  				table.addSize(bidx, 1)
   432  				return newValue, computeOnly
   433  			}
   434  			b = (*bucketOfPadded)(b.next)
   435  		}
   436  	}
   437  }
   438  
   439  func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool {
   440  	curTablePtr := atomic.LoadPointer(&m.table)
   441  	return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
   442  }
   443  
   444  func (m *MapOf[K, V]) resizeInProgress() bool {
   445  	return atomic.LoadInt64(&m.resizing) == 1
   446  }
   447  
   448  func (m *MapOf[K, V]) waitForResize() {
   449  	m.resizeMu.Lock()
   450  	for m.resizeInProgress() {
   451  		m.resizeCond.Wait()
   452  	}
   453  	m.resizeMu.Unlock()
   454  }
   455  
   456  func (m *MapOf[K, V]) resize(table *mapOfTable[K, V], hint mapResizeHint) {
   457  	var shrinkThreshold int64
   458  	tableLen := len(table.buckets)
   459  	// Fast path for shrink attempts.
   460  	if hint == mapShrinkHint {
   461  		shrinkThreshold = int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
   462  		if tableLen == minMapTableLen || table.sumSize() > shrinkThreshold {
   463  			return
   464  		}
   465  	}
   466  	// Slow path.
   467  	if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
   468  		// Someone else started resize. Wait for it to finish.
   469  		m.waitForResize()
   470  		return
   471  	}
   472  	var newTable *mapOfTable[K, V]
   473  	switch hint {
   474  	case mapGrowHint:
   475  		// Grow the table with factor of 2.
   476  		atomic.AddInt64(&m.totalGrowths, 1)
   477  		newTable = newMapOfTable[K, V](tableLen << 1)
   478  	case mapShrinkHint:
   479  		if table.sumSize() <= shrinkThreshold {
   480  			// Shrink the table with factor of 2.
   481  			atomic.AddInt64(&m.totalShrinks, 1)
   482  			newTable = newMapOfTable[K, V](tableLen >> 1)
   483  		} else {
   484  			// No need to shrink. Wake up all waiters and give up.
   485  			m.resizeMu.Lock()
   486  			atomic.StoreInt64(&m.resizing, 0)
   487  			m.resizeCond.Broadcast()
   488  			m.resizeMu.Unlock()
   489  			return
   490  		}
   491  	case mapClearHint:
   492  		newTable = newMapOfTable[K, V](minMapTableLen)
   493  	default:
   494  		panic(fmt.Sprintf("unexpected resize hint: %d", hint))
   495  	}
   496  	// Copy the data only if we're not clearing the map.
   497  	if hint != mapClearHint {
   498  		for i := 0; i < tableLen; i++ {
   499  			copied := copyBucketOf(&table.buckets[i], newTable, m.hasher)
   500  			newTable.addSizePlain(uint64(i), copied)
   501  		}
   502  	}
   503  	// Publish the new table and wake up all waiters.
   504  	atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
   505  	m.resizeMu.Lock()
   506  	atomic.StoreInt64(&m.resizing, 0)
   507  	m.resizeCond.Broadcast()
   508  	m.resizeMu.Unlock()
   509  }
   510  
   511  func copyBucketOf[K comparable, V any](
   512  	b *bucketOfPadded,
   513  	destTable *mapOfTable[K, V],
   514  	hasher func(maphash.Seed, K) uint64,
   515  ) (copied int) {
   516  	rootb := b
   517  	rootb.mu.Lock()
   518  	for {
   519  		for i := 0; i < entriesPerMapBucket; i++ {
   520  			if b.entries[i] != nil {
   521  				e := (*entryOf[K, V])(b.entries[i])
   522  				hash := shiftHash(hasher(destTable.seed, e.key))
   523  				bidx := uint64(len(destTable.buckets)-1) & hash
   524  				destb := &destTable.buckets[bidx]
   525  				appendToBucketOf(hash, b.entries[i], destb)
   526  				copied++
   527  			}
   528  		}
   529  		if b.next == nil {
   530  			rootb.mu.Unlock()
   531  			return
   532  		}
   533  		b = (*bucketOfPadded)(b.next)
   534  	}
   535  }
   536  
   537  // Range calls f sequentially for each key and value present in the
   538  // map. If f returns false, range stops the iteration.
   539  //
   540  // Range does not necessarily correspond to any consistent snapshot
   541  // of the Map's contents: no key will be visited more than once, but
   542  // if the value for any key is stored or deleted concurrently, Range
   543  // may reflect any mapping for that key from any point during the
   544  // Range call.
   545  //
   546  // It is safe to modify the map while iterating it. However, the
   547  // concurrent modification rule apply, i.e. the changes may be not
   548  // reflected in the subsequently iterated entries.
   549  func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
   550  	var zeroPtr unsafe.Pointer
   551  	// Pre-allocate array big enough to fit entries for most hash tables.
   552  	bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapBucket)
   553  	tablep := atomic.LoadPointer(&m.table)
   554  	table := *(*mapOfTable[K, V])(tablep)
   555  	for i := range table.buckets {
   556  		rootb := &table.buckets[i]
   557  		b := rootb
   558  		// Prevent concurrent modifications and copy all entries into
   559  		// the intermediate slice.
   560  		rootb.mu.Lock()
   561  		for {
   562  			for i := 0; i < entriesPerMapBucket; i++ {
   563  				if b.entries[i] != nil {
   564  					bentries = append(bentries, b.entries[i])
   565  				}
   566  			}
   567  			if b.next == nil {
   568  				rootb.mu.Unlock()
   569  				break
   570  			}
   571  			b = (*bucketOfPadded)(b.next)
   572  		}
   573  		// Call the function for all copied entries.
   574  		for j := range bentries {
   575  			entry := (*entryOf[K, V])(bentries[j])
   576  			if !f(entry.key, entry.value) {
   577  				return
   578  			}
   579  			// Remove the reference to avoid preventing the copied
   580  			// entries from being GCed until this method finishes.
   581  			bentries[j] = zeroPtr
   582  		}
   583  		bentries = bentries[:0]
   584  	}
   585  }
   586  
   587  // Clear deletes all keys and values currently stored in the map.
   588  func (m *MapOf[K, V]) Clear() {
   589  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   590  	m.resize(table, mapClearHint)
   591  }
   592  
   593  // Size returns current size of the map.
   594  func (m *MapOf[K, V]) Size() int {
   595  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   596  	return int(table.sumSize())
   597  }
   598  
   599  func appendToBucketOf(hash uint64, entryPtr unsafe.Pointer, b *bucketOfPadded) {
   600  	for {
   601  		for i := 0; i < entriesPerMapBucket; i++ {
   602  			if b.entries[i] == nil {
   603  				b.hashes[i] = hash
   604  				b.entries[i] = entryPtr
   605  				return
   606  			}
   607  		}
   608  		if b.next == nil {
   609  			newb := new(bucketOfPadded)
   610  			newb.hashes[0] = hash
   611  			newb.entries[0] = entryPtr
   612  			b.next = unsafe.Pointer(newb)
   613  			return
   614  		}
   615  		b = (*bucketOfPadded)(b.next)
   616  	}
   617  }
   618  
   619  func isEmptyBucketOf(rootb *bucketOfPadded) bool {
   620  	b := rootb
   621  	for {
   622  		for i := 0; i < entriesPerMapBucket; i++ {
   623  			if b.entries[i] != nil {
   624  				return false
   625  			}
   626  		}
   627  		if b.next == nil {
   628  			return true
   629  		}
   630  		b = (*bucketOfPadded)(b.next)
   631  	}
   632  }
   633  
   634  func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) {
   635  	cidx := uint64(len(table.size)-1) & bucketIdx
   636  	atomic.AddInt64(&table.size[cidx].c, int64(delta))
   637  }
   638  
   639  func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
   640  	cidx := uint64(len(table.size)-1) & bucketIdx
   641  	table.size[cidx].c += int64(delta)
   642  }
   643  
   644  func (table *mapOfTable[K, V]) sumSize() int64 {
   645  	sum := int64(0)
   646  	for i := range table.size {
   647  		sum += atomic.LoadInt64(&table.size[i].c)
   648  	}
   649  	return sum
   650  }
   651  
   652  func shiftHash(h uint64) uint64 {
   653  	// uint64(0) is a reserved value which stands for an empty slot.
   654  	if h == uint64(0) {
   655  		return uint64(1)
   656  	}
   657  	return h
   658  }
   659  
   660  // O(N) operation; use for debug purposes only
   661  func (m *MapOf[K, V]) stats() mapStats {
   662  	stats := mapStats{
   663  		TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
   664  		TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
   665  		MinEntries:   math.MaxInt32,
   666  	}
   667  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   668  	stats.RootBuckets = len(table.buckets)
   669  	stats.Counter = int(table.sumSize())
   670  	stats.CounterLen = len(table.size)
   671  	for i := range table.buckets {
   672  		nentries := 0
   673  		b := &table.buckets[i]
   674  		stats.TotalBuckets++
   675  		for {
   676  			nentriesLocal := 0
   677  			stats.Capacity += entriesPerMapBucket
   678  			for i := 0; i < entriesPerMapBucket; i++ {
   679  				if atomic.LoadPointer(&b.entries[i]) != nil {
   680  					stats.Size++
   681  					nentriesLocal++
   682  				}
   683  			}
   684  			nentries += nentriesLocal
   685  			if nentriesLocal == 0 {
   686  				stats.EmptyBuckets++
   687  			}
   688  			if b.next == nil {
   689  				break
   690  			}
   691  			b = (*bucketOfPadded)(b.next)
   692  			stats.TotalBuckets++
   693  		}
   694  		if nentries < stats.MinEntries {
   695  			stats.MinEntries = nentries
   696  		}
   697  		if nentries > stats.MaxEntries {
   698  			stats.MaxEntries = nentries
   699  		}
   700  	}
   701  	return stats
   702  }