github.com/fufuok/utils@v1.0.10/xsync/mapof.go (about)

     1  //go:build go1.18
     2  // +build go1.18
     3  
     4  package xsync
     5  
     6  import (
     7  	"fmt"
     8  	"math"
     9  	"sync"
    10  	"sync/atomic"
    11  	"unsafe"
    12  )
    13  
    14  // MapOf is like a Go map[K]V but is safe for concurrent
    15  // use by multiple goroutines without additional locking or
    16  // coordination. It follows the interface of sync.Map with
    17  // a number of valuable extensions like Compute or Size.
    18  //
    19  // A MapOf must not be copied after first use.
    20  //
    21  // MapOf uses a modified version of Cache-Line Hash Table (CLHT)
    22  // data structure: https://github.com/LPD-EPFL/CLHT
    23  //
    24  // CLHT is built around idea to organize the hash table in
    25  // cache-line-sized buckets, so that on all modern CPUs update
    26  // operations complete with at most one cache-line transfer.
    27  // Also, Get operations involve no write to memory, as well as no
    28  // mutexes or any other sort of locks. Due to this design, in all
    29  // considered scenarios MapOf outperforms sync.Map.
    30  type MapOf[K comparable, V any] struct {
    31  	totalGrowths int64
    32  	totalShrinks int64
    33  	resizing     int64          // resize in progress flag; updated atomically
    34  	resizeMu     sync.Mutex     // only used along with resizeCond
    35  	resizeCond   sync.Cond      // used to wake up resize waiters (concurrent modifications)
    36  	table        unsafe.Pointer // *mapOfTable
    37  	hasher       func(K, uint64) uint64
    38  	minTableLen  int
    39  }
    40  
    41  type mapOfTable[K comparable, V any] struct {
    42  	buckets []bucketOfPadded
    43  	// striped counter for number of table entries;
    44  	// used to determine if a table shrinking is needed
    45  	// occupies min(buckets_memory/1024, 64KB) of memory
    46  	size []counterStripe
    47  	seed uint64
    48  }
    49  
    50  // bucketOfPadded is a CL-sized map bucket holding up to
    51  // entriesPerMapBucket entries.
    52  type bucketOfPadded struct {
    53  	//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
    54  	pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte
    55  	bucketOf
    56  }
    57  
    58  type bucketOf struct {
    59  	hashes  [entriesPerMapBucket]uint64
    60  	entries [entriesPerMapBucket]unsafe.Pointer // *entryOf
    61  	next    unsafe.Pointer                      // *bucketOfPadded
    62  	mu      sync.Mutex
    63  }
    64  
    65  // entryOf is an immutable map entry.
    66  type entryOf[K comparable, V any] struct {
    67  	key   K
    68  	value V
    69  }
    70  
    71  // NewMapOf creates a new MapOf instance.
    72  func NewMapOf[K comparable, V any]() *MapOf[K, V] {
    73  	return NewMapOfPresized[K, V](defaultMinMapTableLen * entriesPerMapBucket)
    74  }
    75  
    76  // NewMapOfPresized creates a new MapOf instance with capacity enough
    77  // to hold sizeHint entries. The capacity is treated as the minimal capacity
    78  // meaning that the underlying hash table will never shrink to
    79  // a smaller capacity. If sizeHint is zero or negative, the value
    80  // is ignored.
    81  func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] {
    82  	return newMapOfPresized[K, V](makeHasher[K](), sizeHint)
    83  }
    84  
    85  func newMapOfPresized[K comparable, V any](
    86  	hasher func(K, uint64) uint64,
    87  	sizeHint int,
    88  ) *MapOf[K, V] {
    89  	m := &MapOf[K, V]{}
    90  	m.resizeCond = *sync.NewCond(&m.resizeMu)
    91  	m.hasher = hasher
    92  	var table *mapOfTable[K, V]
    93  	if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
    94  		table = newMapOfTable[K, V](defaultMinMapTableLen)
    95  	} else {
    96  		tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
    97  		table = newMapOfTable[K, V](int(tableLen))
    98  	}
    99  	m.minTableLen = len(table.buckets)
   100  	atomic.StorePointer(&m.table, unsafe.Pointer(table))
   101  	return m
   102  }
   103  
   104  func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] {
   105  	buckets := make([]bucketOfPadded, minTableLen)
   106  	counterLen := minTableLen >> 10
   107  	if counterLen < minMapCounterLen {
   108  		counterLen = minMapCounterLen
   109  	} else if counterLen > maxMapCounterLen {
   110  		counterLen = maxMapCounterLen
   111  	}
   112  	counter := make([]counterStripe, counterLen)
   113  	t := &mapOfTable[K, V]{
   114  		buckets: buckets,
   115  		size:    counter,
   116  		seed:    makeSeed(),
   117  	}
   118  	return t
   119  }
   120  
   121  // Load returns the value stored in the map for a key, or zero value
   122  // of type V if no value is present.
   123  // The ok result indicates whether value was found in the map.
   124  func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
   125  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   126  	hash := shiftHash(m.hasher(key, table.seed))
   127  	bidx := uint64(len(table.buckets)-1) & hash
   128  	b := &table.buckets[bidx]
   129  	for {
   130  		for i := 0; i < entriesPerMapBucket; i++ {
   131  			// We treat the hash code only as a hint, so there is no
   132  			// need to get an atomic snapshot.
   133  			h := atomic.LoadUint64(&b.hashes[i])
   134  			if h == uint64(0) || h != hash {
   135  				continue
   136  			}
   137  			eptr := atomic.LoadPointer(&b.entries[i])
   138  			if eptr == nil {
   139  				continue
   140  			}
   141  			e := (*entryOf[K, V])(eptr)
   142  			if e.key == key {
   143  				return e.value, true
   144  			}
   145  		}
   146  		bptr := atomic.LoadPointer(&b.next)
   147  		if bptr == nil {
   148  			return
   149  		}
   150  		b = (*bucketOfPadded)(bptr)
   151  	}
   152  }
   153  
   154  // Store sets the value for a key.
   155  func (m *MapOf[K, V]) Store(key K, value V) {
   156  	m.doCompute(
   157  		key,
   158  		func(V, bool) (V, bool) {
   159  			return value, false
   160  		},
   161  		false,
   162  		false,
   163  	)
   164  }
   165  
   166  // LoadOrStore returns the existing value for the key if present.
   167  // Otherwise, it stores and returns the given value.
   168  // The loaded result is true if the value was loaded, false if stored.
   169  func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
   170  	return m.doCompute(
   171  		key,
   172  		func(V, bool) (V, bool) {
   173  			return value, false
   174  		},
   175  		true,
   176  		false,
   177  	)
   178  }
   179  
   180  // LoadAndStore returns the existing value for the key if present,
   181  // while setting the new value for the key.
   182  // It stores the new value and returns the existing one, if present.
   183  // The loaded result is true if the existing value was loaded,
   184  // false otherwise.
   185  func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
   186  	return m.doCompute(
   187  		key,
   188  		func(V, bool) (V, bool) {
   189  			return value, false
   190  		},
   191  		false,
   192  		false,
   193  	)
   194  }
   195  
   196  // LoadOrCompute returns the existing value for the key if present.
   197  // Otherwise, it computes the value using the provided function and
   198  // returns the computed value. The loaded result is true if the value
   199  // was loaded, false if stored.
   200  //
   201  // This call locks a hash table bucket while the compute function
   202  // is executed. It means that modifications on other entries in
   203  // the bucket will be blocked until the valueFn executes. Consider
   204  // this when the function includes long-running operations.
   205  func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) {
   206  	return m.doCompute(
   207  		key,
   208  		func(V, bool) (V, bool) {
   209  			return valueFn(), false
   210  		},
   211  		true,
   212  		false,
   213  	)
   214  }
   215  
   216  // Compute either sets the computed new value for the key or deletes
   217  // the value for the key. When the delete result of the valueFn function
   218  // is set to true, the value will be deleted, if it exists. When delete
   219  // is set to false, the value is updated to the newValue.
   220  // The ok result indicates whether value was computed and stored, thus, is
   221  // present in the map. The actual result contains the new value in cases where
   222  // the value was computed and stored. See the example for a few use cases.
   223  //
   224  // This call locks a hash table bucket while the compute function
   225  // is executed. It means that modifications on other entries in
   226  // the bucket will be blocked until the valueFn executes. Consider
   227  // this when the function includes long-running operations.
   228  func (m *MapOf[K, V]) Compute(
   229  	key K,
   230  	valueFn func(oldValue V, loaded bool) (newValue V, delete bool),
   231  ) (actual V, ok bool) {
   232  	return m.doCompute(key, valueFn, false, true)
   233  }
   234  
   235  // LoadAndDelete deletes the value for a key, returning the previous
   236  // value if any. The loaded result reports whether the key was
   237  // present.
   238  func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
   239  	return m.doCompute(
   240  		key,
   241  		func(value V, loaded bool) (V, bool) {
   242  			return value, true
   243  		},
   244  		false,
   245  		false,
   246  	)
   247  }
   248  
   249  // Delete deletes the value for a key.
   250  func (m *MapOf[K, V]) Delete(key K) {
   251  	m.doCompute(
   252  		key,
   253  		func(value V, loaded bool) (V, bool) {
   254  			return value, true
   255  		},
   256  		false,
   257  		false,
   258  	)
   259  }
   260  
   261  func (m *MapOf[K, V]) doCompute(
   262  	key K,
   263  	valueFn func(oldValue V, loaded bool) (V, bool),
   264  	loadIfExists, computeOnly bool,
   265  ) (V, bool) {
   266  	// Read-only path.
   267  	if loadIfExists {
   268  		if v, ok := m.Load(key); ok {
   269  			return v, !computeOnly
   270  		}
   271  	}
   272  	// Write path.
   273  	for {
   274  	compute_attempt:
   275  		var (
   276  			emptyb       *bucketOfPadded
   277  			emptyidx     int
   278  			hintNonEmpty int
   279  		)
   280  		table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   281  		tableLen := len(table.buckets)
   282  		hash := shiftHash(m.hasher(key, table.seed))
   283  		bidx := uint64(len(table.buckets)-1) & hash
   284  		rootb := &table.buckets[bidx]
   285  		rootb.mu.Lock()
   286  		// The following two checks must go in reverse to what's
   287  		// in the resize method.
   288  		if m.resizeInProgress() {
   289  			// Resize is in progress. Wait, then go for another attempt.
   290  			rootb.mu.Unlock()
   291  			m.waitForResize()
   292  			goto compute_attempt
   293  		}
   294  		if m.newerTableExists(table) {
   295  			// Someone resized the table. Go for another attempt.
   296  			rootb.mu.Unlock()
   297  			goto compute_attempt
   298  		}
   299  		b := rootb
   300  		for {
   301  			for i := 0; i < entriesPerMapBucket; i++ {
   302  				h := atomic.LoadUint64(&b.hashes[i])
   303  				if h == uint64(0) {
   304  					if emptyb == nil {
   305  						emptyb = b
   306  						emptyidx = i
   307  					}
   308  					continue
   309  				}
   310  				if h != hash {
   311  					hintNonEmpty++
   312  					continue
   313  				}
   314  				e := (*entryOf[K, V])(b.entries[i])
   315  				if e.key == key {
   316  					if loadIfExists {
   317  						rootb.mu.Unlock()
   318  						return e.value, !computeOnly
   319  					}
   320  					// In-place update/delete.
   321  					// We get a copy of the value via an interface{} on each call,
   322  					// thus the live value pointers are unique. Otherwise atomic
   323  					// snapshot won't be correct in case of multiple Store calls
   324  					// using the same value.
   325  					oldv := e.value
   326  					newv, del := valueFn(oldv, true)
   327  					if del {
   328  						// Deletion.
   329  						// First we update the hash, then the entry.
   330  						atomic.StoreUint64(&b.hashes[i], uint64(0))
   331  						atomic.StorePointer(&b.entries[i], nil)
   332  						leftEmpty := false
   333  						if hintNonEmpty == 0 {
   334  							leftEmpty = isEmptyBucketOf(b)
   335  						}
   336  						rootb.mu.Unlock()
   337  						table.addSize(bidx, -1)
   338  						// Might need to shrink the table.
   339  						if leftEmpty {
   340  							m.resize(table, mapShrinkHint)
   341  						}
   342  						return oldv, !computeOnly
   343  					}
   344  					newe := new(entryOf[K, V])
   345  					newe.key = key
   346  					newe.value = newv
   347  					atomic.StorePointer(&b.entries[i], unsafe.Pointer(newe))
   348  					rootb.mu.Unlock()
   349  					if computeOnly {
   350  						// Compute expects the new value to be returned.
   351  						return newv, true
   352  					}
   353  					// LoadAndStore expects the old value to be returned.
   354  					return oldv, true
   355  				}
   356  				hintNonEmpty++
   357  			}
   358  			if b.next == nil {
   359  				if emptyb != nil {
   360  					// Insertion into an existing bucket.
   361  					var zeroedV V
   362  					newValue, del := valueFn(zeroedV, false)
   363  					if del {
   364  						rootb.mu.Unlock()
   365  						return zeroedV, false
   366  					}
   367  					newe := new(entryOf[K, V])
   368  					newe.key = key
   369  					newe.value = newValue
   370  					// First we update the hash, then the entry.
   371  					atomic.StoreUint64(&emptyb.hashes[emptyidx], hash)
   372  					atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe))
   373  					rootb.mu.Unlock()
   374  					table.addSize(bidx, 1)
   375  					return newValue, computeOnly
   376  				}
   377  				growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
   378  				if table.sumSize() > int64(growThreshold) {
   379  					// Need to grow the table. Then go for another attempt.
   380  					rootb.mu.Unlock()
   381  					m.resize(table, mapGrowHint)
   382  					goto compute_attempt
   383  				}
   384  				// Insertion into a new bucket.
   385  				var zeroedV V
   386  				newValue, del := valueFn(zeroedV, false)
   387  				if del {
   388  					rootb.mu.Unlock()
   389  					return newValue, false
   390  				}
   391  				// Create and append the bucket.
   392  				newb := new(bucketOfPadded)
   393  				newb.hashes[0] = hash
   394  				newe := new(entryOf[K, V])
   395  				newe.key = key
   396  				newe.value = newValue
   397  				newb.entries[0] = unsafe.Pointer(newe)
   398  				atomic.StorePointer(&b.next, unsafe.Pointer(newb))
   399  				rootb.mu.Unlock()
   400  				table.addSize(bidx, 1)
   401  				return newValue, computeOnly
   402  			}
   403  			b = (*bucketOfPadded)(b.next)
   404  		}
   405  	}
   406  }
   407  
   408  func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool {
   409  	curTablePtr := atomic.LoadPointer(&m.table)
   410  	return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
   411  }
   412  
   413  func (m *MapOf[K, V]) resizeInProgress() bool {
   414  	return atomic.LoadInt64(&m.resizing) == 1
   415  }
   416  
   417  func (m *MapOf[K, V]) waitForResize() {
   418  	m.resizeMu.Lock()
   419  	for m.resizeInProgress() {
   420  		m.resizeCond.Wait()
   421  	}
   422  	m.resizeMu.Unlock()
   423  }
   424  
   425  func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) {
   426  	knownTableLen := len(knownTable.buckets)
   427  	// Fast path for shrink attempts.
   428  	if hint == mapShrinkHint {
   429  		shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction)
   430  		if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold {
   431  			return
   432  		}
   433  	}
   434  	// Slow path.
   435  	if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
   436  		// Someone else started resize. Wait for it to finish.
   437  		m.waitForResize()
   438  		return
   439  	}
   440  	var newTable *mapOfTable[K, V]
   441  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   442  	tableLen := len(table.buckets)
   443  	switch hint {
   444  	case mapGrowHint:
   445  		// Grow the table with factor of 2.
   446  		atomic.AddInt64(&m.totalGrowths, 1)
   447  		newTable = newMapOfTable[K, V](tableLen << 1)
   448  	case mapShrinkHint:
   449  		shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
   450  		if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
   451  			// Shrink the table with factor of 2.
   452  			atomic.AddInt64(&m.totalShrinks, 1)
   453  			newTable = newMapOfTable[K, V](tableLen >> 1)
   454  		} else {
   455  			// No need to shrink. Wake up all waiters and give up.
   456  			m.resizeMu.Lock()
   457  			atomic.StoreInt64(&m.resizing, 0)
   458  			m.resizeCond.Broadcast()
   459  			m.resizeMu.Unlock()
   460  			return
   461  		}
   462  	case mapClearHint:
   463  		newTable = newMapOfTable[K, V](m.minTableLen)
   464  	default:
   465  		panic(fmt.Sprintf("unexpected resize hint: %d", hint))
   466  	}
   467  	// Copy the data only if we're not clearing the map.
   468  	if hint != mapClearHint {
   469  		for i := 0; i < tableLen; i++ {
   470  			copied := copyBucketOf(&table.buckets[i], newTable, m.hasher)
   471  			newTable.addSizePlain(uint64(i), copied)
   472  		}
   473  	}
   474  	// Publish the new table and wake up all waiters.
   475  	atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
   476  	m.resizeMu.Lock()
   477  	atomic.StoreInt64(&m.resizing, 0)
   478  	m.resizeCond.Broadcast()
   479  	m.resizeMu.Unlock()
   480  }
   481  
   482  func copyBucketOf[K comparable, V any](
   483  	b *bucketOfPadded,
   484  	destTable *mapOfTable[K, V],
   485  	hasher func(K, uint64) uint64,
   486  ) (copied int) {
   487  	rootb := b
   488  	rootb.mu.Lock()
   489  	for {
   490  		for i := 0; i < entriesPerMapBucket; i++ {
   491  			if b.entries[i] != nil {
   492  				e := (*entryOf[K, V])(b.entries[i])
   493  				hash := shiftHash(hasher(e.key, destTable.seed))
   494  				bidx := uint64(len(destTable.buckets)-1) & hash
   495  				destb := &destTable.buckets[bidx]
   496  				appendToBucketOf(hash, b.entries[i], destb)
   497  				copied++
   498  			}
   499  		}
   500  		if b.next == nil {
   501  			rootb.mu.Unlock()
   502  			return
   503  		}
   504  		b = (*bucketOfPadded)(b.next)
   505  	}
   506  }
   507  
   508  // Range calls f sequentially for each key and value present in the
   509  // map. If f returns false, range stops the iteration.
   510  //
   511  // Range does not necessarily correspond to any consistent snapshot
   512  // of the Map's contents: no key will be visited more than once, but
   513  // if the value for any key is stored or deleted concurrently, Range
   514  // may reflect any mapping for that key from any point during the
   515  // Range call.
   516  //
   517  // It is safe to modify the map while iterating it, including entry
   518  // creation, modification and deletion. However, the concurrent
   519  // modification rule apply, i.e. the changes may be not reflected
   520  // in the subsequently iterated entries.
   521  func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
   522  	var zeroPtr unsafe.Pointer
   523  	// Pre-allocate array big enough to fit entries for most hash tables.
   524  	bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapBucket)
   525  	tablep := atomic.LoadPointer(&m.table)
   526  	table := *(*mapOfTable[K, V])(tablep)
   527  	for i := range table.buckets {
   528  		rootb := &table.buckets[i]
   529  		b := rootb
   530  		// Prevent concurrent modifications and copy all entries into
   531  		// the intermediate slice.
   532  		rootb.mu.Lock()
   533  		for {
   534  			for i := 0; i < entriesPerMapBucket; i++ {
   535  				if b.entries[i] != nil {
   536  					bentries = append(bentries, b.entries[i])
   537  				}
   538  			}
   539  			if b.next == nil {
   540  				rootb.mu.Unlock()
   541  				break
   542  			}
   543  			b = (*bucketOfPadded)(b.next)
   544  		}
   545  		// Call the function for all copied entries.
   546  		for j := range bentries {
   547  			entry := (*entryOf[K, V])(bentries[j])
   548  			if !f(entry.key, entry.value) {
   549  				return
   550  			}
   551  			// Remove the reference to avoid preventing the copied
   552  			// entries from being GCed until this method finishes.
   553  			bentries[j] = zeroPtr
   554  		}
   555  		bentries = bentries[:0]
   556  	}
   557  }
   558  
   559  // Clear deletes all keys and values currently stored in the map.
   560  func (m *MapOf[K, V]) Clear() {
   561  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   562  	m.resize(table, mapClearHint)
   563  }
   564  
   565  // Size returns current size of the map.
   566  func (m *MapOf[K, V]) Size() int {
   567  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   568  	return int(table.sumSize())
   569  }
   570  
   571  func appendToBucketOf(hash uint64, entryPtr unsafe.Pointer, b *bucketOfPadded) {
   572  	for {
   573  		for i := 0; i < entriesPerMapBucket; i++ {
   574  			if b.entries[i] == nil {
   575  				b.hashes[i] = hash
   576  				b.entries[i] = entryPtr
   577  				return
   578  			}
   579  		}
   580  		if b.next == nil {
   581  			newb := new(bucketOfPadded)
   582  			newb.hashes[0] = hash
   583  			newb.entries[0] = entryPtr
   584  			b.next = unsafe.Pointer(newb)
   585  			return
   586  		}
   587  		b = (*bucketOfPadded)(b.next)
   588  	}
   589  }
   590  
   591  func isEmptyBucketOf(rootb *bucketOfPadded) bool {
   592  	b := rootb
   593  	for {
   594  		for i := 0; i < entriesPerMapBucket; i++ {
   595  			if b.entries[i] != nil {
   596  				return false
   597  			}
   598  		}
   599  		if b.next == nil {
   600  			return true
   601  		}
   602  		b = (*bucketOfPadded)(b.next)
   603  	}
   604  }
   605  
   606  func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) {
   607  	cidx := uint64(len(table.size)-1) & bucketIdx
   608  	atomic.AddInt64(&table.size[cidx].c, int64(delta))
   609  }
   610  
   611  func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
   612  	cidx := uint64(len(table.size)-1) & bucketIdx
   613  	table.size[cidx].c += int64(delta)
   614  }
   615  
   616  func (table *mapOfTable[K, V]) sumSize() int64 {
   617  	sum := int64(0)
   618  	for i := range table.size {
   619  		sum += atomic.LoadInt64(&table.size[i].c)
   620  	}
   621  	return sum
   622  }
   623  
   624  func shiftHash(h uint64) uint64 {
   625  	// uint64(0) is a reserved value which stands for an empty slot.
   626  	if h == uint64(0) {
   627  		return uint64(1)
   628  	}
   629  	return h
   630  }
   631  
   632  // O(N) operation; use for debug purposes only
   633  func (m *MapOf[K, V]) stats() mapStats {
   634  	stats := mapStats{
   635  		TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
   636  		TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
   637  		MinEntries:   math.MaxInt32,
   638  	}
   639  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   640  	stats.RootBuckets = len(table.buckets)
   641  	stats.Counter = int(table.sumSize())
   642  	stats.CounterLen = len(table.size)
   643  	for i := range table.buckets {
   644  		nentries := 0
   645  		b := &table.buckets[i]
   646  		stats.TotalBuckets++
   647  		for {
   648  			nentriesLocal := 0
   649  			stats.Capacity += entriesPerMapBucket
   650  			for i := 0; i < entriesPerMapBucket; i++ {
   651  				if atomic.LoadPointer(&b.entries[i]) != nil {
   652  					stats.Size++
   653  					nentriesLocal++
   654  				}
   655  			}
   656  			nentries += nentriesLocal
   657  			if nentriesLocal == 0 {
   658  				stats.EmptyBuckets++
   659  			}
   660  			if b.next == nil {
   661  				break
   662  			}
   663  			b = (*bucketOfPadded)(b.next)
   664  			stats.TotalBuckets++
   665  		}
   666  		if nentries < stats.MinEntries {
   667  			stats.MinEntries = nentries
   668  		}
   669  		if nentries > stats.MaxEntries {
   670  			stats.MaxEntries = nentries
   671  		}
   672  	}
   673  	return stats
   674  }