github.com/puzpuzpuz/xsync/v3@v3.1.1-0.20240225193106-cbe4ec1e954f/mapof.go (about)

     1  package xsync
     2  
     3  import (
     4  	"fmt"
     5  	"math"
     6  	"sync"
     7  	"sync/atomic"
     8  	"unsafe"
     9  )
    10  
    11  // MapOf is like a Go map[K]V but is safe for concurrent
    12  // use by multiple goroutines without additional locking or
    13  // coordination. It follows the interface of sync.Map with
    14  // a number of valuable extensions like Compute or Size.
    15  //
    16  // A MapOf must not be copied after first use.
    17  //
    18  // MapOf uses a modified version of Cache-Line Hash Table (CLHT)
    19  // data structure: https://github.com/LPD-EPFL/CLHT
    20  //
    21  // CLHT is built around idea to organize the hash table in
    22  // cache-line-sized buckets, so that on all modern CPUs update
    23  // operations complete with at most one cache-line transfer.
    24  // Also, Get operations involve no write to memory, as well as no
    25  // mutexes or any other sort of locks. Due to this design, in all
    26  // considered scenarios MapOf outperforms sync.Map.
    27  type MapOf[K comparable, V any] struct {
    28  	totalGrowths int64
    29  	totalShrinks int64
    30  	resizing     int64          // resize in progress flag; updated atomically
    31  	resizeMu     sync.Mutex     // only used along with resizeCond
    32  	resizeCond   sync.Cond      // used to wake up resize waiters (concurrent modifications)
    33  	table        unsafe.Pointer // *mapOfTable
    34  	hasher       func(K, uint64) uint64
    35  	minTableLen  int
    36  }
    37  
    38  type mapOfTable[K comparable, V any] struct {
    39  	buckets []bucketOfPadded
    40  	// striped counter for number of table entries;
    41  	// used to determine if a table shrinking is needed
    42  	// occupies min(buckets_memory/1024, 64KB) of memory
    43  	size []counterStripe
    44  	seed uint64
    45  }
    46  
    47  // bucketOfPadded is a CL-sized map bucket holding up to
    48  // entriesPerMapBucket entries.
    49  type bucketOfPadded struct {
    50  	//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
    51  	pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte
    52  	bucketOf
    53  }
    54  
    55  type bucketOf struct {
    56  	hashes  [entriesPerMapBucket]uint64
    57  	entries [entriesPerMapBucket]unsafe.Pointer // *entryOf
    58  	next    unsafe.Pointer                      // *bucketOfPadded
    59  	mu      sync.Mutex
    60  }
    61  
    62  // entryOf is an immutable map entry.
    63  type entryOf[K comparable, V any] struct {
    64  	key   K
    65  	value V
    66  }
    67  
    68  // NewMapOf creates a new MapOf instance.
    69  func NewMapOf[K comparable, V any]() *MapOf[K, V] {
    70  	return NewMapOfPresized[K, V](defaultMinMapTableLen * entriesPerMapBucket)
    71  }
    72  
    73  // NewMapOfPresized creates a new MapOf instance with capacity enough
    74  // to hold sizeHint entries. The capacity is treated as the minimal capacity
    75  // meaning that the underlying hash table will never shrink to
    76  // a smaller capacity. If sizeHint is zero or negative, the value
    77  // is ignored.
    78  func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] {
    79  	return newMapOfPresized[K, V](makeHasher[K](), sizeHint)
    80  }
    81  
    82  func newMapOfPresized[K comparable, V any](
    83  	hasher func(K, uint64) uint64,
    84  	sizeHint int,
    85  ) *MapOf[K, V] {
    86  	m := &MapOf[K, V]{}
    87  	m.resizeCond = *sync.NewCond(&m.resizeMu)
    88  	m.hasher = hasher
    89  	var table *mapOfTable[K, V]
    90  	if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
    91  		table = newMapOfTable[K, V](defaultMinMapTableLen)
    92  	} else {
    93  		tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
    94  		table = newMapOfTable[K, V](int(tableLen))
    95  	}
    96  	m.minTableLen = len(table.buckets)
    97  	atomic.StorePointer(&m.table, unsafe.Pointer(table))
    98  	return m
    99  }
   100  
   101  func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] {
   102  	buckets := make([]bucketOfPadded, minTableLen)
   103  	counterLen := minTableLen >> 10
   104  	if counterLen < minMapCounterLen {
   105  		counterLen = minMapCounterLen
   106  	} else if counterLen > maxMapCounterLen {
   107  		counterLen = maxMapCounterLen
   108  	}
   109  	counter := make([]counterStripe, counterLen)
   110  	t := &mapOfTable[K, V]{
   111  		buckets: buckets,
   112  		size:    counter,
   113  		seed:    makeSeed(),
   114  	}
   115  	return t
   116  }
   117  
   118  // Load returns the value stored in the map for a key, or zero value
   119  // of type V if no value is present.
   120  // The ok result indicates whether value was found in the map.
   121  func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
   122  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   123  	hash := shiftHash(m.hasher(key, table.seed))
   124  	bidx := uint64(len(table.buckets)-1) & hash
   125  	b := &table.buckets[bidx]
   126  	for {
   127  		for i := 0; i < entriesPerMapBucket; i++ {
   128  			// We treat the hash code only as a hint, so there is no
   129  			// need to get an atomic snapshot.
   130  			h := atomic.LoadUint64(&b.hashes[i])
   131  			if h == uint64(0) || h != hash {
   132  				continue
   133  			}
   134  			eptr := atomic.LoadPointer(&b.entries[i])
   135  			if eptr == nil {
   136  				continue
   137  			}
   138  			e := (*entryOf[K, V])(eptr)
   139  			if e.key == key {
   140  				return e.value, true
   141  			}
   142  		}
   143  		bptr := atomic.LoadPointer(&b.next)
   144  		if bptr == nil {
   145  			return
   146  		}
   147  		b = (*bucketOfPadded)(bptr)
   148  	}
   149  }
   150  
   151  // Store sets the value for a key.
   152  func (m *MapOf[K, V]) Store(key K, value V) {
   153  	m.doCompute(
   154  		key,
   155  		func(V, bool) (V, bool) {
   156  			return value, false
   157  		},
   158  		false,
   159  		false,
   160  	)
   161  }
   162  
   163  // LoadOrStore returns the existing value for the key if present.
   164  // Otherwise, it stores and returns the given value.
   165  // The loaded result is true if the value was loaded, false if stored.
   166  func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
   167  	return m.doCompute(
   168  		key,
   169  		func(V, bool) (V, bool) {
   170  			return value, false
   171  		},
   172  		true,
   173  		false,
   174  	)
   175  }
   176  
   177  // LoadAndStore returns the existing value for the key if present,
   178  // while setting the new value for the key.
   179  // It stores the new value and returns the existing one, if present.
   180  // The loaded result is true if the existing value was loaded,
   181  // false otherwise.
   182  func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
   183  	return m.doCompute(
   184  		key,
   185  		func(V, bool) (V, bool) {
   186  			return value, false
   187  		},
   188  		false,
   189  		false,
   190  	)
   191  }
   192  
   193  // LoadOrCompute returns the existing value for the key if present.
   194  // Otherwise, it computes the value using the provided function and
   195  // returns the computed value. The loaded result is true if the value
   196  // was loaded, false if stored.
   197  //
   198  // This call locks a hash table bucket while the compute function
   199  // is executed. It means that modifications on other entries in
   200  // the bucket will be blocked until the valueFn executes. Consider
   201  // this when the function includes long-running operations.
   202  func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) {
   203  	return m.doCompute(
   204  		key,
   205  		func(V, bool) (V, bool) {
   206  			return valueFn(), false
   207  		},
   208  		true,
   209  		false,
   210  	)
   211  }
   212  
   213  // Compute either sets the computed new value for the key or deletes
   214  // the value for the key. When the delete result of the valueFn function
   215  // is set to true, the value will be deleted, if it exists. When delete
   216  // is set to false, the value is updated to the newValue.
   217  // The ok result indicates whether value was computed and stored, thus, is
   218  // present in the map. The actual result contains the new value in cases where
   219  // the value was computed and stored. See the example for a few use cases.
   220  //
   221  // This call locks a hash table bucket while the compute function
   222  // is executed. It means that modifications on other entries in
   223  // the bucket will be blocked until the valueFn executes. Consider
   224  // this when the function includes long-running operations.
   225  func (m *MapOf[K, V]) Compute(
   226  	key K,
   227  	valueFn func(oldValue V, loaded bool) (newValue V, delete bool),
   228  ) (actual V, ok bool) {
   229  	return m.doCompute(key, valueFn, false, true)
   230  }
   231  
   232  // LoadAndDelete deletes the value for a key, returning the previous
   233  // value if any. The loaded result reports whether the key was
   234  // present.
   235  func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
   236  	return m.doCompute(
   237  		key,
   238  		func(value V, loaded bool) (V, bool) {
   239  			return value, true
   240  		},
   241  		false,
   242  		false,
   243  	)
   244  }
   245  
   246  // Delete deletes the value for a key.
   247  func (m *MapOf[K, V]) Delete(key K) {
   248  	m.doCompute(
   249  		key,
   250  		func(value V, loaded bool) (V, bool) {
   251  			return value, true
   252  		},
   253  		false,
   254  		false,
   255  	)
   256  }
   257  
   258  func (m *MapOf[K, V]) doCompute(
   259  	key K,
   260  	valueFn func(oldValue V, loaded bool) (V, bool),
   261  	loadIfExists, computeOnly bool,
   262  ) (V, bool) {
   263  	// Read-only path.
   264  	if loadIfExists {
   265  		if v, ok := m.Load(key); ok {
   266  			return v, !computeOnly
   267  		}
   268  	}
   269  	// Write path.
   270  	for {
   271  	compute_attempt:
   272  		var (
   273  			emptyb       *bucketOfPadded
   274  			emptyidx     int
   275  			hintNonEmpty int
   276  		)
   277  		table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   278  		tableLen := len(table.buckets)
   279  		hash := shiftHash(m.hasher(key, table.seed))
   280  		bidx := uint64(len(table.buckets)-1) & hash
   281  		rootb := &table.buckets[bidx]
   282  		rootb.mu.Lock()
   283  		// The following two checks must go in reverse to what's
   284  		// in the resize method.
   285  		if m.resizeInProgress() {
   286  			// Resize is in progress. Wait, then go for another attempt.
   287  			rootb.mu.Unlock()
   288  			m.waitForResize()
   289  			goto compute_attempt
   290  		}
   291  		if m.newerTableExists(table) {
   292  			// Someone resized the table. Go for another attempt.
   293  			rootb.mu.Unlock()
   294  			goto compute_attempt
   295  		}
   296  		b := rootb
   297  		for {
   298  			for i := 0; i < entriesPerMapBucket; i++ {
   299  				h := atomic.LoadUint64(&b.hashes[i])
   300  				if h == uint64(0) {
   301  					if emptyb == nil {
   302  						emptyb = b
   303  						emptyidx = i
   304  					}
   305  					continue
   306  				}
   307  				if h != hash {
   308  					hintNonEmpty++
   309  					continue
   310  				}
   311  				e := (*entryOf[K, V])(b.entries[i])
   312  				if e.key == key {
   313  					if loadIfExists {
   314  						rootb.mu.Unlock()
   315  						return e.value, !computeOnly
   316  					}
   317  					// In-place update/delete.
   318  					// We get a copy of the value via an interface{} on each call,
   319  					// thus the live value pointers are unique. Otherwise atomic
   320  					// snapshot won't be correct in case of multiple Store calls
   321  					// using the same value.
   322  					oldv := e.value
   323  					newv, del := valueFn(oldv, true)
   324  					if del {
   325  						// Deletion.
   326  						// First we update the hash, then the entry.
   327  						atomic.StoreUint64(&b.hashes[i], uint64(0))
   328  						atomic.StorePointer(&b.entries[i], nil)
   329  						leftEmpty := false
   330  						if hintNonEmpty == 0 {
   331  							leftEmpty = isEmptyBucketOf(b)
   332  						}
   333  						rootb.mu.Unlock()
   334  						table.addSize(bidx, -1)
   335  						// Might need to shrink the table.
   336  						if leftEmpty {
   337  							m.resize(table, mapShrinkHint)
   338  						}
   339  						return oldv, !computeOnly
   340  					}
   341  					newe := new(entryOf[K, V])
   342  					newe.key = key
   343  					newe.value = newv
   344  					atomic.StorePointer(&b.entries[i], unsafe.Pointer(newe))
   345  					rootb.mu.Unlock()
   346  					if computeOnly {
   347  						// Compute expects the new value to be returned.
   348  						return newv, true
   349  					}
   350  					// LoadAndStore expects the old value to be returned.
   351  					return oldv, true
   352  				}
   353  				hintNonEmpty++
   354  			}
   355  			if b.next == nil {
   356  				if emptyb != nil {
   357  					// Insertion into an existing bucket.
   358  					var zeroedV V
   359  					newValue, del := valueFn(zeroedV, false)
   360  					if del {
   361  						rootb.mu.Unlock()
   362  						return zeroedV, false
   363  					}
   364  					newe := new(entryOf[K, V])
   365  					newe.key = key
   366  					newe.value = newValue
   367  					// First we update the hash, then the entry.
   368  					atomic.StoreUint64(&emptyb.hashes[emptyidx], hash)
   369  					atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe))
   370  					rootb.mu.Unlock()
   371  					table.addSize(bidx, 1)
   372  					return newValue, computeOnly
   373  				}
   374  				growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
   375  				if table.sumSize() > int64(growThreshold) {
   376  					// Need to grow the table. Then go for another attempt.
   377  					rootb.mu.Unlock()
   378  					m.resize(table, mapGrowHint)
   379  					goto compute_attempt
   380  				}
   381  				// Insertion into a new bucket.
   382  				var zeroedV V
   383  				newValue, del := valueFn(zeroedV, false)
   384  				if del {
   385  					rootb.mu.Unlock()
   386  					return newValue, false
   387  				}
   388  				// Create and append the bucket.
   389  				newb := new(bucketOfPadded)
   390  				newb.hashes[0] = hash
   391  				newe := new(entryOf[K, V])
   392  				newe.key = key
   393  				newe.value = newValue
   394  				newb.entries[0] = unsafe.Pointer(newe)
   395  				atomic.StorePointer(&b.next, unsafe.Pointer(newb))
   396  				rootb.mu.Unlock()
   397  				table.addSize(bidx, 1)
   398  				return newValue, computeOnly
   399  			}
   400  			b = (*bucketOfPadded)(b.next)
   401  		}
   402  	}
   403  }
   404  
   405  func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool {
   406  	curTablePtr := atomic.LoadPointer(&m.table)
   407  	return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
   408  }
   409  
   410  func (m *MapOf[K, V]) resizeInProgress() bool {
   411  	return atomic.LoadInt64(&m.resizing) == 1
   412  }
   413  
   414  func (m *MapOf[K, V]) waitForResize() {
   415  	m.resizeMu.Lock()
   416  	for m.resizeInProgress() {
   417  		m.resizeCond.Wait()
   418  	}
   419  	m.resizeMu.Unlock()
   420  }
   421  
   422  func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) {
   423  	knownTableLen := len(knownTable.buckets)
   424  	// Fast path for shrink attempts.
   425  	if hint == mapShrinkHint {
   426  		shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction)
   427  		if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold {
   428  			return
   429  		}
   430  	}
   431  	// Slow path.
   432  	if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
   433  		// Someone else started resize. Wait for it to finish.
   434  		m.waitForResize()
   435  		return
   436  	}
   437  	var newTable *mapOfTable[K, V]
   438  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   439  	tableLen := len(table.buckets)
   440  	switch hint {
   441  	case mapGrowHint:
   442  		// Grow the table with factor of 2.
   443  		atomic.AddInt64(&m.totalGrowths, 1)
   444  		newTable = newMapOfTable[K, V](tableLen << 1)
   445  	case mapShrinkHint:
   446  		shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
   447  		if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
   448  			// Shrink the table with factor of 2.
   449  			atomic.AddInt64(&m.totalShrinks, 1)
   450  			newTable = newMapOfTable[K, V](tableLen >> 1)
   451  		} else {
   452  			// No need to shrink. Wake up all waiters and give up.
   453  			m.resizeMu.Lock()
   454  			atomic.StoreInt64(&m.resizing, 0)
   455  			m.resizeCond.Broadcast()
   456  			m.resizeMu.Unlock()
   457  			return
   458  		}
   459  	case mapClearHint:
   460  		newTable = newMapOfTable[K, V](m.minTableLen)
   461  	default:
   462  		panic(fmt.Sprintf("unexpected resize hint: %d", hint))
   463  	}
   464  	// Copy the data only if we're not clearing the map.
   465  	if hint != mapClearHint {
   466  		for i := 0; i < tableLen; i++ {
   467  			copied := copyBucketOf(&table.buckets[i], newTable, m.hasher)
   468  			newTable.addSizePlain(uint64(i), copied)
   469  		}
   470  	}
   471  	// Publish the new table and wake up all waiters.
   472  	atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
   473  	m.resizeMu.Lock()
   474  	atomic.StoreInt64(&m.resizing, 0)
   475  	m.resizeCond.Broadcast()
   476  	m.resizeMu.Unlock()
   477  }
   478  
   479  func copyBucketOf[K comparable, V any](
   480  	b *bucketOfPadded,
   481  	destTable *mapOfTable[K, V],
   482  	hasher func(K, uint64) uint64,
   483  ) (copied int) {
   484  	rootb := b
   485  	rootb.mu.Lock()
   486  	for {
   487  		for i := 0; i < entriesPerMapBucket; i++ {
   488  			if b.entries[i] != nil {
   489  				e := (*entryOf[K, V])(b.entries[i])
   490  				hash := shiftHash(hasher(e.key, destTable.seed))
   491  				bidx := uint64(len(destTable.buckets)-1) & hash
   492  				destb := &destTable.buckets[bidx]
   493  				appendToBucketOf(hash, b.entries[i], destb)
   494  				copied++
   495  			}
   496  		}
   497  		if b.next == nil {
   498  			rootb.mu.Unlock()
   499  			return
   500  		}
   501  		b = (*bucketOfPadded)(b.next)
   502  	}
   503  }
   504  
   505  // Range calls f sequentially for each key and value present in the
   506  // map. If f returns false, range stops the iteration.
   507  //
   508  // Range does not necessarily correspond to any consistent snapshot
   509  // of the Map's contents: no key will be visited more than once, but
   510  // if the value for any key is stored or deleted concurrently, Range
   511  // may reflect any mapping for that key from any point during the
   512  // Range call.
   513  //
   514  // It is safe to modify the map while iterating it, including entry
   515  // creation, modification and deletion. However, the concurrent
   516  // modification rule apply, i.e. the changes may be not reflected
   517  // in the subsequently iterated entries.
   518  func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
   519  	var zeroPtr unsafe.Pointer
   520  	// Pre-allocate array big enough to fit entries for most hash tables.
   521  	bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapBucket)
   522  	tablep := atomic.LoadPointer(&m.table)
   523  	table := *(*mapOfTable[K, V])(tablep)
   524  	for i := range table.buckets {
   525  		rootb := &table.buckets[i]
   526  		b := rootb
   527  		// Prevent concurrent modifications and copy all entries into
   528  		// the intermediate slice.
   529  		rootb.mu.Lock()
   530  		for {
   531  			for i := 0; i < entriesPerMapBucket; i++ {
   532  				if b.entries[i] != nil {
   533  					bentries = append(bentries, b.entries[i])
   534  				}
   535  			}
   536  			if b.next == nil {
   537  				rootb.mu.Unlock()
   538  				break
   539  			}
   540  			b = (*bucketOfPadded)(b.next)
   541  		}
   542  		// Call the function for all copied entries.
   543  		for j := range bentries {
   544  			entry := (*entryOf[K, V])(bentries[j])
   545  			if !f(entry.key, entry.value) {
   546  				return
   547  			}
   548  			// Remove the reference to avoid preventing the copied
   549  			// entries from being GCed until this method finishes.
   550  			bentries[j] = zeroPtr
   551  		}
   552  		bentries = bentries[:0]
   553  	}
   554  }
   555  
   556  // Clear deletes all keys and values currently stored in the map.
   557  func (m *MapOf[K, V]) Clear() {
   558  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   559  	m.resize(table, mapClearHint)
   560  }
   561  
   562  // Size returns current size of the map.
   563  func (m *MapOf[K, V]) Size() int {
   564  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   565  	return int(table.sumSize())
   566  }
   567  
   568  func appendToBucketOf(hash uint64, entryPtr unsafe.Pointer, b *bucketOfPadded) {
   569  	for {
   570  		for i := 0; i < entriesPerMapBucket; i++ {
   571  			if b.entries[i] == nil {
   572  				b.hashes[i] = hash
   573  				b.entries[i] = entryPtr
   574  				return
   575  			}
   576  		}
   577  		if b.next == nil {
   578  			newb := new(bucketOfPadded)
   579  			newb.hashes[0] = hash
   580  			newb.entries[0] = entryPtr
   581  			b.next = unsafe.Pointer(newb)
   582  			return
   583  		}
   584  		b = (*bucketOfPadded)(b.next)
   585  	}
   586  }
   587  
   588  func isEmptyBucketOf(rootb *bucketOfPadded) bool {
   589  	b := rootb
   590  	for {
   591  		for i := 0; i < entriesPerMapBucket; i++ {
   592  			if b.entries[i] != nil {
   593  				return false
   594  			}
   595  		}
   596  		if b.next == nil {
   597  			return true
   598  		}
   599  		b = (*bucketOfPadded)(b.next)
   600  	}
   601  }
   602  
   603  func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) {
   604  	cidx := uint64(len(table.size)-1) & bucketIdx
   605  	atomic.AddInt64(&table.size[cidx].c, int64(delta))
   606  }
   607  
   608  func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
   609  	cidx := uint64(len(table.size)-1) & bucketIdx
   610  	table.size[cidx].c += int64(delta)
   611  }
   612  
   613  func (table *mapOfTable[K, V]) sumSize() int64 {
   614  	sum := int64(0)
   615  	for i := range table.size {
   616  		sum += atomic.LoadInt64(&table.size[i].c)
   617  	}
   618  	return sum
   619  }
   620  
   621  func shiftHash(h uint64) uint64 {
   622  	// uint64(0) is a reserved value which stands for an empty slot.
   623  	if h == uint64(0) {
   624  		return uint64(1)
   625  	}
   626  	return h
   627  }
   628  
   629  // O(N) operation; use for debug purposes only
   630  func (m *MapOf[K, V]) stats() mapStats {
   631  	stats := mapStats{
   632  		TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
   633  		TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
   634  		MinEntries:   math.MaxInt32,
   635  	}
   636  	table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
   637  	stats.RootBuckets = len(table.buckets)
   638  	stats.Counter = int(table.sumSize())
   639  	stats.CounterLen = len(table.size)
   640  	for i := range table.buckets {
   641  		nentries := 0
   642  		b := &table.buckets[i]
   643  		stats.TotalBuckets++
   644  		for {
   645  			nentriesLocal := 0
   646  			stats.Capacity += entriesPerMapBucket
   647  			for i := 0; i < entriesPerMapBucket; i++ {
   648  				if atomic.LoadPointer(&b.entries[i]) != nil {
   649  					stats.Size++
   650  					nentriesLocal++
   651  				}
   652  			}
   653  			nentries += nentriesLocal
   654  			if nentriesLocal == 0 {
   655  				stats.EmptyBuckets++
   656  			}
   657  			if b.next == nil {
   658  				break
   659  			}
   660  			b = (*bucketOfPadded)(b.next)
   661  			stats.TotalBuckets++
   662  		}
   663  		if nentries < stats.MinEntries {
   664  			stats.MinEntries = nentries
   665  		}
   666  		if nentries > stats.MaxEntries {
   667  			stats.MaxEntries = nentries
   668  		}
   669  	}
   670  	return stats
   671  }