github.com/fufuok/utils@v1.0.10/xsync/map.go (about)

     1  package xsync
     2  
     3  import (
     4  	"fmt"
     5  	"math"
     6  	"runtime"
     7  	"strings"
     8  	"sync"
     9  	"sync/atomic"
    10  	"unsafe"
    11  )
    12  
    13  type mapResizeHint int
    14  
    15  const (
    16  	mapGrowHint   mapResizeHint = 0
    17  	mapShrinkHint mapResizeHint = 1
    18  	mapClearHint  mapResizeHint = 2
    19  )
    20  
    21  const (
    22  	// number of entries per bucket; 3 entries lead to size of 64B
    23  	// (one cache line) on 64-bit machines
    24  	entriesPerMapBucket = 3
    25  	// threshold fraction of table occupation to start a table shrinking
    26  	// when deleting the last entry in a bucket chain
    27  	mapShrinkFraction = 128
    28  	// map load factor to trigger a table resize during insertion;
    29  	// a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen
    30  	// key-value pairs (this is a soft limit)
    31  	mapLoadFactor = 0.75
    32  	// minimal table size, i.e. number of buckets; thus, minimal map
    33  	// capacity can be calculated as entriesPerMapBucket*defaultMinMapTableLen
    34  	defaultMinMapTableLen = 32
    35  	// minimum counter stripes to use
    36  	minMapCounterLen = 8
    37  	// maximum counter stripes to use; stands for around 4KB of memory
    38  	maxMapCounterLen = 32
    39  )
    40  
    41  var (
    42  	topHashMask       = uint64((1<<20)-1) << 44
    43  	topHashEntryMasks = [3]uint64{
    44  		topHashMask,
    45  		topHashMask >> 20,
    46  		topHashMask >> 40,
    47  	}
    48  )
    49  
    50  // Map is like a Go map[string]interface{} but is safe for concurrent
    51  // use by multiple goroutines without additional locking or
    52  // coordination. It follows the interface of sync.Map with
    53  // a number of valuable extensions like Compute or Size.
    54  //
    55  // A Map must not be copied after first use.
    56  //
    57  // Map uses a modified version of Cache-Line Hash Table (CLHT)
    58  // data structure: https://github.com/LPD-EPFL/CLHT
    59  //
    60  // CLHT is built around idea to organize the hash table in
    61  // cache-line-sized buckets, so that on all modern CPUs update
    62  // operations complete with at most one cache-line transfer.
    63  // Also, Get operations involve no write to memory, as well as no
    64  // mutexes or any other sort of locks. Due to this design, in all
    65  // considered scenarios Map outperforms sync.Map.
    66  //
    67  // One important difference with sync.Map is that only string keys
    68  // are supported. That's because Golang standard library does not
    69  // expose the built-in hash functions for interface{} values.
    70  type Map struct {
    71  	totalGrowths int64
    72  	totalShrinks int64
    73  	resizing     int64          // resize in progress flag; updated atomically
    74  	resizeMu     sync.Mutex     // only used along with resizeCond
    75  	resizeCond   sync.Cond      // used to wake up resize waiters (concurrent modifications)
    76  	table        unsafe.Pointer // *mapTable
    77  	minTableLen  int
    78  }
    79  
    80  type mapTable struct {
    81  	buckets []bucketPadded
    82  	// striped counter for number of table entries;
    83  	// used to determine if a table shrinking is needed
    84  	// occupies min(buckets_memory/1024, 64KB) of memory
    85  	size []counterStripe
    86  	seed uint64
    87  }
    88  
    89  type counterStripe struct {
    90  	c int64
    91  	//lint:ignore U1000 prevents false sharing
    92  	pad [cacheLineSize - 8]byte
    93  }
    94  
    95  type bucketPadded struct {
    96  	//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
    97  	pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte
    98  	bucket
    99  }
   100  
   101  type bucket struct {
   102  	next   unsafe.Pointer // *bucketPadded
   103  	keys   [entriesPerMapBucket]unsafe.Pointer
   104  	values [entriesPerMapBucket]unsafe.Pointer
   105  	// topHashMutex is a 2-in-1 value.
   106  	//
   107  	// It contains packed top 20 bits (20 MSBs) of hash codes for keys
   108  	// stored in the bucket:
   109  	// | key 0's top hash | key 1's top hash | key 2's top hash | bitmap for keys | mutex |
   110  	// |      20 bits     |      20 bits     |      20 bits     |     3 bits      | 1 bit |
   111  	//
   112  	// The least significant bit is used for the mutex (TTAS spinlock).
   113  	topHashMutex uint64
   114  }
   115  
   116  type rangeEntry struct {
   117  	key   unsafe.Pointer
   118  	value unsafe.Pointer
   119  }
   120  
   121  // NewMap creates a new Map instance.
   122  func NewMap() *Map {
   123  	return NewMapPresized(defaultMinMapTableLen * entriesPerMapBucket)
   124  }
   125  
   126  // NewMapPresized creates a new Map instance with capacity enough to hold
   127  // sizeHint entries. The capacity is treated as the minimal capacity
   128  // meaning that the underlying hash table will never shrink to
   129  // a smaller capacity. If sizeHint is zero or negative, the value
   130  // is ignored.
   131  func NewMapPresized(sizeHint int) *Map {
   132  	m := &Map{}
   133  	m.resizeCond = *sync.NewCond(&m.resizeMu)
   134  	var table *mapTable
   135  	if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
   136  		table = newMapTable(defaultMinMapTableLen)
   137  	} else {
   138  		tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
   139  		table = newMapTable(int(tableLen))
   140  	}
   141  	m.minTableLen = len(table.buckets)
   142  	atomic.StorePointer(&m.table, unsafe.Pointer(table))
   143  	return m
   144  }
   145  
   146  func newMapTable(minTableLen int) *mapTable {
   147  	buckets := make([]bucketPadded, minTableLen)
   148  	counterLen := minTableLen >> 10
   149  	if counterLen < minMapCounterLen {
   150  		counterLen = minMapCounterLen
   151  	} else if counterLen > maxMapCounterLen {
   152  		counterLen = maxMapCounterLen
   153  	}
   154  	counter := make([]counterStripe, counterLen)
   155  	t := &mapTable{
   156  		buckets: buckets,
   157  		size:    counter,
   158  		seed:    makeSeed(),
   159  	}
   160  	return t
   161  }
   162  
   163  // Load returns the value stored in the map for a key, or nil if no
   164  // value is present.
   165  // The ok result indicates whether value was found in the map.
   166  func (m *Map) Load(key string) (value interface{}, ok bool) {
   167  	table := (*mapTable)(atomic.LoadPointer(&m.table))
   168  	hash := hashString(key, table.seed)
   169  	bidx := uint64(len(table.buckets)-1) & hash
   170  	b := &table.buckets[bidx]
   171  	for {
   172  		topHashes := atomic.LoadUint64(&b.topHashMutex)
   173  		for i := 0; i < entriesPerMapBucket; i++ {
   174  			if !topHashMatch(hash, topHashes, i) {
   175  				continue
   176  			}
   177  		atomic_snapshot:
   178  			// Start atomic snapshot.
   179  			vp := atomic.LoadPointer(&b.values[i])
   180  			kp := atomic.LoadPointer(&b.keys[i])
   181  			if kp != nil && vp != nil {
   182  				if key == derefKey(kp) {
   183  					if uintptr(vp) == uintptr(atomic.LoadPointer(&b.values[i])) {
   184  						// Atomic snapshot succeeded.
   185  						return derefValue(vp), true
   186  					}
   187  					// Concurrent update/remove. Go for another spin.
   188  					goto atomic_snapshot
   189  				}
   190  			}
   191  		}
   192  		bptr := atomic.LoadPointer(&b.next)
   193  		if bptr == nil {
   194  			return
   195  		}
   196  		b = (*bucketPadded)(bptr)
   197  	}
   198  }
   199  
   200  // Store sets the value for a key.
   201  func (m *Map) Store(key string, value interface{}) {
   202  	m.doCompute(
   203  		key,
   204  		func(interface{}, bool) (interface{}, bool) {
   205  			return value, false
   206  		},
   207  		false,
   208  		false,
   209  	)
   210  }
   211  
   212  // LoadOrStore returns the existing value for the key if present.
   213  // Otherwise, it stores and returns the given value.
   214  // The loaded result is true if the value was loaded, false if stored.
   215  func (m *Map) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) {
   216  	return m.doCompute(
   217  		key,
   218  		func(interface{}, bool) (interface{}, bool) {
   219  			return value, false
   220  		},
   221  		true,
   222  		false,
   223  	)
   224  }
   225  
   226  // LoadAndStore returns the existing value for the key if present,
   227  // while setting the new value for the key.
   228  // It stores the new value and returns the existing one, if present.
   229  // The loaded result is true if the existing value was loaded,
   230  // false otherwise.
   231  func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, loaded bool) {
   232  	return m.doCompute(
   233  		key,
   234  		func(interface{}, bool) (interface{}, bool) {
   235  			return value, false
   236  		},
   237  		false,
   238  		false,
   239  	)
   240  }
   241  
   242  // LoadOrCompute returns the existing value for the key if present.
   243  // Otherwise, it computes the value using the provided function and
   244  // returns the computed value. The loaded result is true if the value
   245  // was loaded, false if stored.
   246  //
   247  // This call locks a hash table bucket while the compute function
   248  // is executed. It means that modifications on other entries in
   249  // the bucket will be blocked until the valueFn executes. Consider
   250  // this when the function includes long-running operations.
   251  func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
   252  	return m.doCompute(
   253  		key,
   254  		func(interface{}, bool) (interface{}, bool) {
   255  			return valueFn(), false
   256  		},
   257  		true,
   258  		false,
   259  	)
   260  }
   261  
   262  // Compute either sets the computed new value for the key or deletes
   263  // the value for the key. When the delete result of the valueFn function
   264  // is set to true, the value will be deleted, if it exists. When delete
   265  // is set to false, the value is updated to the newValue.
   266  // The ok result indicates whether value was computed and stored, thus, is
   267  // present in the map. The actual result contains the new value in cases where
   268  // the value was computed and stored. See the example for a few use cases.
   269  //
   270  // This call locks a hash table bucket while the compute function
   271  // is executed. It means that modifications on other entries in
   272  // the bucket will be blocked until the valueFn executes. Consider
   273  // this when the function includes long-running operations.
   274  func (m *Map) Compute(
   275  	key string,
   276  	valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool),
   277  ) (actual interface{}, ok bool) {
   278  	return m.doCompute(key, valueFn, false, true)
   279  }
   280  
   281  // LoadAndDelete deletes the value for a key, returning the previous
   282  // value if any. The loaded result reports whether the key was
   283  // present.
   284  func (m *Map) LoadAndDelete(key string) (value interface{}, loaded bool) {
   285  	return m.doCompute(
   286  		key,
   287  		func(value interface{}, loaded bool) (interface{}, bool) {
   288  			return value, true
   289  		},
   290  		false,
   291  		false,
   292  	)
   293  }
   294  
   295  // Delete deletes the value for a key.
   296  func (m *Map) Delete(key string) {
   297  	m.doCompute(
   298  		key,
   299  		func(value interface{}, loaded bool) (interface{}, bool) {
   300  			return value, true
   301  		},
   302  		false,
   303  		false,
   304  	)
   305  }
   306  
   307  func (m *Map) doCompute(
   308  	key string,
   309  	valueFn func(oldValue interface{}, loaded bool) (interface{}, bool),
   310  	loadIfExists, computeOnly bool,
   311  ) (interface{}, bool) {
   312  	// Read-only path.
   313  	if loadIfExists {
   314  		if v, ok := m.Load(key); ok {
   315  			return v, !computeOnly
   316  		}
   317  	}
   318  	// Write path.
   319  	for {
   320  	compute_attempt:
   321  		var (
   322  			emptyb       *bucketPadded
   323  			emptyidx     int
   324  			hintNonEmpty int
   325  		)
   326  		table := (*mapTable)(atomic.LoadPointer(&m.table))
   327  		tableLen := len(table.buckets)
   328  		hash := hashString(key, table.seed)
   329  		bidx := uint64(len(table.buckets)-1) & hash
   330  		rootb := &table.buckets[bidx]
   331  		lockBucket(&rootb.topHashMutex)
   332  		// The following two checks must go in reverse to what's
   333  		// in the resize method.
   334  		if m.resizeInProgress() {
   335  			// Resize is in progress. Wait, then go for another attempt.
   336  			unlockBucket(&rootb.topHashMutex)
   337  			m.waitForResize()
   338  			goto compute_attempt
   339  		}
   340  		if m.newerTableExists(table) {
   341  			// Someone resized the table. Go for another attempt.
   342  			unlockBucket(&rootb.topHashMutex)
   343  			goto compute_attempt
   344  		}
   345  		b := rootb
   346  		for {
   347  			topHashes := atomic.LoadUint64(&b.topHashMutex)
   348  			for i := 0; i < entriesPerMapBucket; i++ {
   349  				if b.keys[i] == nil {
   350  					if emptyb == nil {
   351  						emptyb = b
   352  						emptyidx = i
   353  					}
   354  					continue
   355  				}
   356  				if !topHashMatch(hash, topHashes, i) {
   357  					hintNonEmpty++
   358  					continue
   359  				}
   360  				if key == derefKey(b.keys[i]) {
   361  					vp := b.values[i]
   362  					if loadIfExists {
   363  						unlockBucket(&rootb.topHashMutex)
   364  						return derefValue(vp), !computeOnly
   365  					}
   366  					// In-place update/delete.
   367  					// We get a copy of the value via an interface{} on each call,
   368  					// thus the live value pointers are unique. Otherwise atomic
   369  					// snapshot won't be correct in case of multiple Store calls
   370  					// using the same value.
   371  					oldValue := derefValue(vp)
   372  					newValue, del := valueFn(oldValue, true)
   373  					if del {
   374  						// Deletion.
   375  						// First we update the value, then the key.
   376  						// This is important for atomic snapshot states.
   377  						atomic.StoreUint64(&b.topHashMutex, eraseTopHash(topHashes, i))
   378  						atomic.StorePointer(&b.values[i], nil)
   379  						atomic.StorePointer(&b.keys[i], nil)
   380  						leftEmpty := false
   381  						if hintNonEmpty == 0 {
   382  							leftEmpty = isEmptyBucket(b)
   383  						}
   384  						unlockBucket(&rootb.topHashMutex)
   385  						table.addSize(bidx, -1)
   386  						// Might need to shrink the table.
   387  						if leftEmpty {
   388  							m.resize(table, mapShrinkHint)
   389  						}
   390  						return oldValue, !computeOnly
   391  					}
   392  					nvp := unsafe.Pointer(&newValue)
   393  					if assertionsEnabled && vp == nvp {
   394  						panic("non-unique value pointer")
   395  					}
   396  					atomic.StorePointer(&b.values[i], nvp)
   397  					unlockBucket(&rootb.topHashMutex)
   398  					if computeOnly {
   399  						// Compute expects the new value to be returned.
   400  						return newValue, true
   401  					}
   402  					// LoadAndStore expects the old value to be returned.
   403  					return oldValue, true
   404  				}
   405  				hintNonEmpty++
   406  			}
   407  			if b.next == nil {
   408  				if emptyb != nil {
   409  					// Insertion into an existing bucket.
   410  					var zeroedV interface{}
   411  					newValue, del := valueFn(zeroedV, false)
   412  					if del {
   413  						unlockBucket(&rootb.topHashMutex)
   414  						return zeroedV, false
   415  					}
   416  					// First we update the value, then the key.
   417  					// This is important for atomic snapshot states.
   418  					topHashes = atomic.LoadUint64(&emptyb.topHashMutex)
   419  					atomic.StoreUint64(&emptyb.topHashMutex, storeTopHash(hash, topHashes, emptyidx))
   420  					atomic.StorePointer(&emptyb.values[emptyidx], unsafe.Pointer(&newValue))
   421  					atomic.StorePointer(&emptyb.keys[emptyidx], unsafe.Pointer(&key))
   422  					unlockBucket(&rootb.topHashMutex)
   423  					table.addSize(bidx, 1)
   424  					return newValue, computeOnly
   425  				}
   426  				growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
   427  				if table.sumSize() > int64(growThreshold) {
   428  					// Need to grow the table. Then go for another attempt.
   429  					unlockBucket(&rootb.topHashMutex)
   430  					m.resize(table, mapGrowHint)
   431  					goto compute_attempt
   432  				}
   433  				// Insertion into a new bucket.
   434  				var zeroedV interface{}
   435  				newValue, del := valueFn(zeroedV, false)
   436  				if del {
   437  					unlockBucket(&rootb.topHashMutex)
   438  					return newValue, false
   439  				}
   440  				// Create and append the bucket.
   441  				newb := new(bucketPadded)
   442  				newb.keys[0] = unsafe.Pointer(&key)
   443  				newb.values[0] = unsafe.Pointer(&newValue)
   444  				newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
   445  				atomic.StorePointer(&b.next, unsafe.Pointer(newb))
   446  				unlockBucket(&rootb.topHashMutex)
   447  				table.addSize(bidx, 1)
   448  				return newValue, computeOnly
   449  			}
   450  			b = (*bucketPadded)(b.next)
   451  		}
   452  	}
   453  }
   454  
   455  func (m *Map) newerTableExists(table *mapTable) bool {
   456  	curTablePtr := atomic.LoadPointer(&m.table)
   457  	return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
   458  }
   459  
   460  func (m *Map) resizeInProgress() bool {
   461  	return atomic.LoadInt64(&m.resizing) == 1
   462  }
   463  
   464  func (m *Map) waitForResize() {
   465  	m.resizeMu.Lock()
   466  	for m.resizeInProgress() {
   467  		m.resizeCond.Wait()
   468  	}
   469  	m.resizeMu.Unlock()
   470  }
   471  
   472  func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) {
   473  	knownTableLen := len(knownTable.buckets)
   474  	// Fast path for shrink attempts.
   475  	if hint == mapShrinkHint {
   476  		shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction)
   477  		if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold {
   478  			return
   479  		}
   480  	}
   481  	// Slow path.
   482  	if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
   483  		// Someone else started resize. Wait for it to finish.
   484  		m.waitForResize()
   485  		return
   486  	}
   487  	var newTable *mapTable
   488  	table := (*mapTable)(atomic.LoadPointer(&m.table))
   489  	tableLen := len(table.buckets)
   490  	switch hint {
   491  	case mapGrowHint:
   492  		// Grow the table with factor of 2.
   493  		atomic.AddInt64(&m.totalGrowths, 1)
   494  		newTable = newMapTable(tableLen << 1)
   495  	case mapShrinkHint:
   496  		shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
   497  		if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
   498  			// Shrink the table with factor of 2.
   499  			atomic.AddInt64(&m.totalShrinks, 1)
   500  			newTable = newMapTable(tableLen >> 1)
   501  		} else {
   502  			// No need to shrink. Wake up all waiters and give up.
   503  			m.resizeMu.Lock()
   504  			atomic.StoreInt64(&m.resizing, 0)
   505  			m.resizeCond.Broadcast()
   506  			m.resizeMu.Unlock()
   507  			return
   508  		}
   509  	case mapClearHint:
   510  		newTable = newMapTable(m.minTableLen)
   511  	default:
   512  		panic(fmt.Sprintf("unexpected resize hint: %d", hint))
   513  	}
   514  	// Copy the data only if we're not clearing the map.
   515  	if hint != mapClearHint {
   516  		for i := 0; i < tableLen; i++ {
   517  			copied := copyBucket(&table.buckets[i], newTable)
   518  			newTable.addSizePlain(uint64(i), copied)
   519  		}
   520  	}
   521  	// Publish the new table and wake up all waiters.
   522  	atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
   523  	m.resizeMu.Lock()
   524  	atomic.StoreInt64(&m.resizing, 0)
   525  	m.resizeCond.Broadcast()
   526  	m.resizeMu.Unlock()
   527  }
   528  
   529  func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) {
   530  	rootb := b
   531  	lockBucket(&rootb.topHashMutex)
   532  	for {
   533  		for i := 0; i < entriesPerMapBucket; i++ {
   534  			if b.keys[i] != nil {
   535  				k := derefKey(b.keys[i])
   536  				hash := hashString(k, destTable.seed)
   537  				bidx := uint64(len(destTable.buckets)-1) & hash
   538  				destb := &destTable.buckets[bidx]
   539  				appendToBucket(hash, b.keys[i], b.values[i], destb)
   540  				copied++
   541  			}
   542  		}
   543  		if b.next == nil {
   544  			unlockBucket(&rootb.topHashMutex)
   545  			return
   546  		}
   547  		b = (*bucketPadded)(b.next)
   548  	}
   549  }
   550  
   551  func appendToBucket(hash uint64, keyPtr, valPtr unsafe.Pointer, b *bucketPadded) {
   552  	for {
   553  		for i := 0; i < entriesPerMapBucket; i++ {
   554  			if b.keys[i] == nil {
   555  				b.keys[i] = keyPtr
   556  				b.values[i] = valPtr
   557  				b.topHashMutex = storeTopHash(hash, b.topHashMutex, i)
   558  				return
   559  			}
   560  		}
   561  		if b.next == nil {
   562  			newb := new(bucketPadded)
   563  			newb.keys[0] = keyPtr
   564  			newb.values[0] = valPtr
   565  			newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
   566  			b.next = unsafe.Pointer(newb)
   567  			return
   568  		}
   569  		b = (*bucketPadded)(b.next)
   570  	}
   571  }
   572  
   573  func isEmptyBucket(rootb *bucketPadded) bool {
   574  	b := rootb
   575  	for {
   576  		for i := 0; i < entriesPerMapBucket; i++ {
   577  			if b.keys[i] != nil {
   578  				return false
   579  			}
   580  		}
   581  		if b.next == nil {
   582  			return true
   583  		}
   584  		b = (*bucketPadded)(b.next)
   585  	}
   586  }
   587  
   588  // Range calls f sequentially for each key and value present in the
   589  // map. If f returns false, range stops the iteration.
   590  //
   591  // Range does not necessarily correspond to any consistent snapshot
   592  // of the Map's contents: no key will be visited more than once, but
   593  // if the value for any key is stored or deleted concurrently, Range
   594  // may reflect any mapping for that key from any point during the
   595  // Range call.
   596  //
   597  // It is safe to modify the map while iterating it, including entry
   598  // creation, modification and deletion. However, the concurrent
   599  // modification rule apply, i.e. the changes may be not reflected
   600  // in the subsequently iterated entries.
   601  func (m *Map) Range(f func(key string, value interface{}) bool) {
   602  	var zeroEntry rangeEntry
   603  	// Pre-allocate array big enough to fit entries for most hash tables.
   604  	bentries := make([]rangeEntry, 0, 16*entriesPerMapBucket)
   605  	tablep := atomic.LoadPointer(&m.table)
   606  	table := *(*mapTable)(tablep)
   607  	for i := range table.buckets {
   608  		rootb := &table.buckets[i]
   609  		b := rootb
   610  		// Prevent concurrent modifications and copy all entries into
   611  		// the intermediate slice.
   612  		lockBucket(&rootb.topHashMutex)
   613  		for {
   614  			for i := 0; i < entriesPerMapBucket; i++ {
   615  				if b.keys[i] != nil {
   616  					bentries = append(bentries, rangeEntry{
   617  						key:   b.keys[i],
   618  						value: b.values[i],
   619  					})
   620  				}
   621  			}
   622  			if b.next == nil {
   623  				unlockBucket(&rootb.topHashMutex)
   624  				break
   625  			}
   626  			b = (*bucketPadded)(b.next)
   627  		}
   628  		// Call the function for all copied entries.
   629  		for j := range bentries {
   630  			k := derefKey(bentries[j].key)
   631  			v := derefValue(bentries[j].value)
   632  			if !f(k, v) {
   633  				return
   634  			}
   635  			// Remove the reference to avoid preventing the copied
   636  			// entries from being GCed until this method finishes.
   637  			bentries[j] = zeroEntry
   638  		}
   639  		bentries = bentries[:0]
   640  	}
   641  }
   642  
   643  // Clear deletes all keys and values currently stored in the map.
   644  func (m *Map) Clear() {
   645  	table := (*mapTable)(atomic.LoadPointer(&m.table))
   646  	m.resize(table, mapClearHint)
   647  }
   648  
   649  // Size returns current size of the map.
   650  func (m *Map) Size() int {
   651  	table := (*mapTable)(atomic.LoadPointer(&m.table))
   652  	return int(table.sumSize())
   653  }
   654  
   655  func derefKey(keyPtr unsafe.Pointer) string {
   656  	return *(*string)(keyPtr)
   657  }
   658  
   659  func derefValue(valuePtr unsafe.Pointer) interface{} {
   660  	return *(*interface{})(valuePtr)
   661  }
   662  
   663  func lockBucket(mu *uint64) {
   664  	for {
   665  		var v uint64
   666  		for {
   667  			v = atomic.LoadUint64(mu)
   668  			if v&1 != 1 {
   669  				break
   670  			}
   671  			runtime.Gosched()
   672  		}
   673  		if atomic.CompareAndSwapUint64(mu, v, v|1) {
   674  			return
   675  		}
   676  		runtime.Gosched()
   677  	}
   678  }
   679  
   680  func unlockBucket(mu *uint64) {
   681  	v := atomic.LoadUint64(mu)
   682  	atomic.StoreUint64(mu, v&^1)
   683  }
   684  
   685  func topHashMatch(hash, topHashes uint64, idx int) bool {
   686  	if topHashes&(1<<(idx+1)) == 0 {
   687  		// Entry is not present.
   688  		return false
   689  	}
   690  	hash = hash & topHashMask
   691  	topHashes = (topHashes & topHashEntryMasks[idx]) << (20 * idx)
   692  	return hash == topHashes
   693  }
   694  
   695  func storeTopHash(hash, topHashes uint64, idx int) uint64 {
   696  	// Zero out top hash at idx.
   697  	topHashes = topHashes &^ topHashEntryMasks[idx]
   698  	// Chop top 20 MSBs of the given hash and position them at idx.
   699  	hash = (hash & topHashMask) >> (20 * idx)
   700  	// Store the MSBs.
   701  	topHashes = topHashes | hash
   702  	// Mark the entry as present.
   703  	return topHashes | (1 << (idx + 1))
   704  }
   705  
   706  func eraseTopHash(topHashes uint64, idx int) uint64 {
   707  	return topHashes &^ (1 << (idx + 1))
   708  }
   709  
   710  func (table *mapTable) addSize(bucketIdx uint64, delta int) {
   711  	cidx := uint64(len(table.size)-1) & bucketIdx
   712  	atomic.AddInt64(&table.size[cidx].c, int64(delta))
   713  }
   714  
   715  func (table *mapTable) addSizePlain(bucketIdx uint64, delta int) {
   716  	cidx := uint64(len(table.size)-1) & bucketIdx
   717  	table.size[cidx].c += int64(delta)
   718  }
   719  
   720  func (table *mapTable) sumSize() int64 {
   721  	sum := int64(0)
   722  	for i := range table.size {
   723  		sum += atomic.LoadInt64(&table.size[i].c)
   724  	}
   725  	return sum
   726  }
   727  
   728  type mapStats struct {
   729  	RootBuckets  int
   730  	TotalBuckets int
   731  	EmptyBuckets int
   732  	Capacity     int
   733  	Size         int // calculated number of entries
   734  	Counter      int // number of entries according to table counter
   735  	CounterLen   int // number of counter stripes
   736  	MinEntries   int // min entries per chain of buckets
   737  	MaxEntries   int // max entries per chain of buckets
   738  	TotalGrowths int64
   739  	TotalShrinks int64
   740  }
   741  
   742  func (s *mapStats) ToString() string {
   743  	var sb strings.Builder
   744  	sb.WriteString("\n---\n")
   745  	sb.WriteString(fmt.Sprintf("RootBuckets:  %d\n", s.RootBuckets))
   746  	sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets))
   747  	sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets))
   748  	sb.WriteString(fmt.Sprintf("Capacity:     %d\n", s.Capacity))
   749  	sb.WriteString(fmt.Sprintf("Size:         %d\n", s.Size))
   750  	sb.WriteString(fmt.Sprintf("Counter:      %d\n", s.Counter))
   751  	sb.WriteString(fmt.Sprintf("CounterLen:   %d\n", s.CounterLen))
   752  	sb.WriteString(fmt.Sprintf("MinEntries:   %d\n", s.MinEntries))
   753  	sb.WriteString(fmt.Sprintf("MaxEntries:   %d\n", s.MaxEntries))
   754  	sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths))
   755  	sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks))
   756  	sb.WriteString("---\n")
   757  	return sb.String()
   758  }
   759  
   760  // O(N) operation; use for debug purposes only
   761  func (m *Map) stats() mapStats {
   762  	stats := mapStats{
   763  		TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
   764  		TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
   765  		MinEntries:   math.MaxInt32,
   766  	}
   767  	table := (*mapTable)(atomic.LoadPointer(&m.table))
   768  	stats.RootBuckets = len(table.buckets)
   769  	stats.Counter = int(table.sumSize())
   770  	stats.CounterLen = len(table.size)
   771  	for i := range table.buckets {
   772  		nentries := 0
   773  		b := &table.buckets[i]
   774  		stats.TotalBuckets++
   775  		for {
   776  			nentriesLocal := 0
   777  			stats.Capacity += entriesPerMapBucket
   778  			for i := 0; i < entriesPerMapBucket; i++ {
   779  				if atomic.LoadPointer(&b.keys[i]) != nil {
   780  					stats.Size++
   781  					nentriesLocal++
   782  				}
   783  			}
   784  			nentries += nentriesLocal
   785  			if nentriesLocal == 0 {
   786  				stats.EmptyBuckets++
   787  			}
   788  			if b.next == nil {
   789  				break
   790  			}
   791  			b = (*bucketPadded)(b.next)
   792  			stats.TotalBuckets++
   793  		}
   794  		if nentries < stats.MinEntries {
   795  			stats.MinEntries = nentries
   796  		}
   797  		if nentries > stats.MaxEntries {
   798  			stats.MaxEntries = nentries
   799  		}
   800  	}
   801  	return stats
   802  }