github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/runtime/hashmap.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table. The data is arranged
    10  // into an array of buckets. Each bucket contains up to
    11  // 8 key/value pairs. The low-order bits of the hash are
    12  // used to select a bucket. Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big. Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space. I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and values)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/value pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows. Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"runtime/internal/atomic"
    58  	"runtime/internal/sys"
    59  	"unsafe"
    60  )
    61  
    62  const (
    63  	// Maximum number of key/value pairs a bucket can hold.
    64  	bucketCntBits = 3
    65  	bucketCnt     = 1 << bucketCntBits
    66  
    67  	// Maximum average load of a bucket that triggers growth is 6.5.
    68  	// Represent as loadFactorNum/loadFactDen, to allow integer math.
    69  	loadFactorNum = 13
    70  	loadFactorDen = 2
    71  
    72  	// Maximum key or value size to keep inline (instead of mallocing per element).
    73  	// Must fit in a uint8.
    74  	// Fast versions cannot handle big values - the cutoff size for
    75  	// fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
    76  	maxKeySize   = 128
    77  	maxValueSize = 128
    78  
    79  	// data offset should be the size of the bmap struct, but needs to be
    80  	// aligned correctly. For amd64p32 this means 64-bit alignment
    81  	// even though pointers are 32 bit.
    82  	dataOffset = unsafe.Offsetof(struct {
    83  		b bmap
    84  		v int64
    85  	}{}.v)
    86  
    87  	// Possible tophash values. We reserve a few possibilities for special marks.
    88  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    89  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    90  	// during map writes and thus no one else can observe the map during that time).
    91  	empty          = 0 // cell is empty
    92  	evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
    93  	evacuatedX     = 2 // key/value is valid.  Entry has been evacuated to first half of larger table.
    94  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    95  	minTopHash     = 4 // minimum tophash for a normal filled cell.
    96  
    97  	// flags
    98  	iterator     = 1 // there may be an iterator using buckets
    99  	oldIterator  = 2 // there may be an iterator using oldbuckets
   100  	hashWriting  = 4 // a goroutine is writing to the map
   101  	sameSizeGrow = 8 // the current map growth is to a new map of the same size
   102  
   103  	// sentinel bucket ID for iterator checks
   104  	noCheck = 1<<(8*sys.PtrSize) - 1
   105  )
   106  
   107  // A header for a Go map.
   108  type hmap struct {
   109  	// Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
   110  	// ../reflect/type.go. Don't change this structure without also changing that code!
   111  	count     int // # live cells == size of map.  Must be first (used by len() builtin)
   112  	flags     uint8
   113  	B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   114  	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
   115  	hash0     uint32 // hash seed
   116  
   117  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   118  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   119  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   120  
   121  	extra *mapextra // optional fields
   122  }
   123  
   124  // mapextra holds fields that are not present on all maps.
   125  type mapextra struct {
   126  	// If both key and value do not contain pointers and are inline, then we mark bucket
   127  	// type as containing no pointers. This avoids scanning such maps.
   128  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   129  	// alive, we store pointers to all overflow buckets in hmap.overflow and h.map.oldoverflow.
   130  	// overflow and oldoverflow are only used if key and value do not contain pointers.
   131  	// overflow contains overflow buckets for hmap.buckets.
   132  	// oldoverflow contains overflow buckets for hmap.oldbuckets.
   133  	// The indirection allows to store a pointer to the slice in hiter.
   134  	overflow    *[]*bmap
   135  	oldoverflow *[]*bmap
   136  
   137  	// nextOverflow holds a pointer to a free overflow bucket.
   138  	nextOverflow *bmap
   139  }
   140  
   141  // A bucket for a Go map.
   142  type bmap struct {
   143  	// tophash generally contains the top byte of the hash value
   144  	// for each key in this bucket. If tophash[0] < minTopHash,
   145  	// tophash[0] is a bucket evacuation state instead.
   146  	tophash [bucketCnt]uint8
   147  	// Followed by bucketCnt keys and then bucketCnt values.
   148  	// NOTE: packing all the keys together and then all the values together makes the
   149  	// code a bit more complicated than alternating key/value/key/value/... but it allows
   150  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   151  	// Followed by an overflow pointer.
   152  }
   153  
   154  // A hash iteration structure.
   155  // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
   156  // the layout of this structure.
   157  type hiter struct {
   158  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
   159  	value       unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
   160  	t           *maptype
   161  	h           *hmap
   162  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   163  	bptr        *bmap          // current bucket
   164  	overflow    *[]*bmap       // keeps overflow buckets of hmap.buckets alive
   165  	oldoverflow *[]*bmap       // keeps overflow buckets of hmap.oldbuckets alive
   166  	startBucket uintptr        // bucket iteration started at
   167  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   168  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   169  	B           uint8
   170  	i           uint8
   171  	bucket      uintptr
   172  	checkBucket uintptr
   173  }
   174  
   175  // bucketShift returns 1<<b, optimized for code generation.
   176  func bucketShift(b uint8) uintptr {
   177  	if sys.GoarchAmd64|sys.GoarchAmd64p32|sys.Goarch386 != 0 {
   178  		b &= sys.PtrSize*8 - 1 // help x86 archs remove shift overflow checks
   179  	}
   180  	return uintptr(1) << b
   181  }
   182  
   183  // bucketMask returns 1<<b - 1, optimized for code generation.
   184  func bucketMask(b uint8) uintptr {
   185  	return bucketShift(b) - 1
   186  }
   187  
   188  // tophash calculates the tophash value for hash.
   189  func tophash(hash uintptr) uint8 {
   190  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   191  	if top < minTopHash {
   192  		top += minTopHash
   193  	}
   194  	return top
   195  }
   196  
   197  func evacuated(b *bmap) bool {
   198  	h := b.tophash[0]
   199  	return h > empty && h < minTopHash
   200  }
   201  
   202  func (b *bmap) overflow(t *maptype) *bmap {
   203  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
   204  }
   205  
   206  func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
   207  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
   208  }
   209  
   210  func (b *bmap) keys() unsafe.Pointer {
   211  	return add(unsafe.Pointer(b), dataOffset)
   212  }
   213  
   214  // incrnoverflow increments h.noverflow.
   215  // noverflow counts the number of overflow buckets.
   216  // This is used to trigger same-size map growth.
   217  // See also tooManyOverflowBuckets.
   218  // To keep hmap small, noverflow is a uint16.
   219  // When there are few buckets, noverflow is an exact count.
   220  // When there are many buckets, noverflow is an approximate count.
   221  func (h *hmap) incrnoverflow() {
   222  	// We trigger same-size map growth if there are
   223  	// as many overflow buckets as buckets.
   224  	// We need to be able to count to 1<<h.B.
   225  	if h.B < 16 {
   226  		h.noverflow++
   227  		return
   228  	}
   229  	// Increment with probability 1/(1<<(h.B-15)).
   230  	// When we reach 1<<15 - 1, we will have approximately
   231  	// as many overflow buckets as buckets.
   232  	mask := uint32(1)<<(h.B-15) - 1
   233  	// Example: if h.B == 18, then mask == 7,
   234  	// and fastrand & 7 == 0 with probability 1/8.
   235  	if fastrand()&mask == 0 {
   236  		h.noverflow++
   237  	}
   238  }
   239  
   240  func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
   241  	var ovf *bmap
   242  	if h.extra != nil && h.extra.nextOverflow != nil {
   243  		// We have preallocated overflow buckets available.
   244  		// See makeBucketArray for more details.
   245  		ovf = h.extra.nextOverflow
   246  		if ovf.overflow(t) == nil {
   247  			// We're not at the end of the preallocated overflow buckets. Bump the pointer.
   248  			h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
   249  		} else {
   250  			// This is the last preallocated overflow bucket.
   251  			// Reset the overflow pointer on this bucket,
   252  			// which was set to a non-nil sentinel value.
   253  			ovf.setoverflow(t, nil)
   254  			h.extra.nextOverflow = nil
   255  		}
   256  	} else {
   257  		ovf = (*bmap)(newobject(t.bucket))
   258  	}
   259  	h.incrnoverflow()
   260  	if t.bucket.kind&kindNoPointers != 0 {
   261  		h.createOverflow()
   262  		*h.extra.overflow = append(*h.extra.overflow, ovf)
   263  	}
   264  	b.setoverflow(t, ovf)
   265  	return ovf
   266  }
   267  
   268  func (h *hmap) createOverflow() {
   269  	if h.extra == nil {
   270  		h.extra = new(mapextra)
   271  	}
   272  	if h.extra.overflow == nil {
   273  		h.extra.overflow = new([]*bmap)
   274  	}
   275  }
   276  
   277  func makemap64(t *maptype, hint int64, h *hmap) *hmap {
   278  	if int64(int(hint)) != hint {
   279  		hint = 0
   280  	}
   281  	return makemap(t, int(hint), h)
   282  }
   283  
   284  // makemap implements a Go map creation make(map[k]v, hint)
   285  // If the compiler has determined that the map or the first bucket
   286  // can be created on the stack, h and/or bucket may be non-nil.
   287  // If h != nil, the map can be created directly in h.
   288  // If h.buckets != nil, bucket pointed to can be used as the first bucket.
   289  func makemap(t *maptype, hint int, h *hmap) *hmap {
   290  	// The size of hmap should be 48 bytes on 64 bit
   291  	// and 28 bytes on 32 bit platforms.
   292  	if sz := unsafe.Sizeof(hmap{}); sz != 8+5*sys.PtrSize {
   293  		println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
   294  		throw("bad hmap size")
   295  	}
   296  
   297  	if hint < 0 || hint > int(maxSliceCap(t.bucket.size)) {
   298  		hint = 0
   299  	}
   300  
   301  	// initialize Hmap
   302  	if h == nil {
   303  		h = (*hmap)(newobject(t.hmap))
   304  	}
   305  	h.hash0 = fastrand()
   306  
   307  	// find size parameter which will hold the requested # of elements
   308  	B := uint8(0)
   309  	for overLoadFactor(hint, B) {
   310  		B++
   311  	}
   312  	h.B = B
   313  
   314  	// allocate initial hash table
   315  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   316  	// If hint is large zeroing this memory could take a while.
   317  	if h.B != 0 {
   318  		var nextOverflow *bmap
   319  		h.buckets, nextOverflow = makeBucketArray(t, h.B)
   320  		if nextOverflow != nil {
   321  			h.extra = new(mapextra)
   322  			h.extra.nextOverflow = nextOverflow
   323  		}
   324  	}
   325  
   326  	return h
   327  }
   328  
   329  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   330  // it will return a reference to the zero object for the value type if
   331  // the key is not in the map.
   332  // NOTE: The returned pointer may keep the whole map live, so don't
   333  // hold onto it for very long.
   334  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   335  	if raceenabled && h != nil {
   336  		callerpc := getcallerpc()
   337  		pc := funcPC(mapaccess1)
   338  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   339  		raceReadObjectPC(t.key, key, callerpc, pc)
   340  	}
   341  	if msanenabled && h != nil {
   342  		msanread(key, t.key.size)
   343  	}
   344  	if h == nil || h.count == 0 {
   345  		return unsafe.Pointer(&zeroVal[0])
   346  	}
   347  	if h.flags&hashWriting != 0 {
   348  		throw("concurrent map read and map write")
   349  	}
   350  	alg := t.key.alg
   351  	hash := alg.hash(key, uintptr(h.hash0))
   352  	m := bucketMask(h.B)
   353  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   354  	if c := h.oldbuckets; c != nil {
   355  		if !h.sameSizeGrow() {
   356  			// There used to be half as many buckets; mask down one more power of two.
   357  			m >>= 1
   358  		}
   359  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   360  		if !evacuated(oldb) {
   361  			b = oldb
   362  		}
   363  	}
   364  	top := tophash(hash)
   365  	for ; b != nil; b = b.overflow(t) {
   366  		for i := uintptr(0); i < bucketCnt; i++ {
   367  			if b.tophash[i] != top {
   368  				continue
   369  			}
   370  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   371  			if t.indirectkey {
   372  				k = *((*unsafe.Pointer)(k))
   373  			}
   374  			if alg.equal(key, k) {
   375  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   376  				if t.indirectvalue {
   377  					v = *((*unsafe.Pointer)(v))
   378  				}
   379  				return v
   380  			}
   381  		}
   382  	}
   383  	return unsafe.Pointer(&zeroVal[0])
   384  }
   385  
   386  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   387  	if raceenabled && h != nil {
   388  		callerpc := getcallerpc()
   389  		pc := funcPC(mapaccess2)
   390  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   391  		raceReadObjectPC(t.key, key, callerpc, pc)
   392  	}
   393  	if msanenabled && h != nil {
   394  		msanread(key, t.key.size)
   395  	}
   396  	if h == nil || h.count == 0 {
   397  		return unsafe.Pointer(&zeroVal[0]), false
   398  	}
   399  	if h.flags&hashWriting != 0 {
   400  		throw("concurrent map read and map write")
   401  	}
   402  	alg := t.key.alg
   403  	hash := alg.hash(key, uintptr(h.hash0))
   404  	m := bucketMask(h.B)
   405  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   406  	if c := h.oldbuckets; c != nil {
   407  		if !h.sameSizeGrow() {
   408  			// There used to be half as many buckets; mask down one more power of two.
   409  			m >>= 1
   410  		}
   411  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
   412  		if !evacuated(oldb) {
   413  			b = oldb
   414  		}
   415  	}
   416  	top := tophash(hash)
   417  	for ; b != nil; b = b.overflow(t) {
   418  		for i := uintptr(0); i < bucketCnt; i++ {
   419  			if b.tophash[i] != top {
   420  				continue
   421  			}
   422  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   423  			if t.indirectkey {
   424  				k = *((*unsafe.Pointer)(k))
   425  			}
   426  			if alg.equal(key, k) {
   427  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   428  				if t.indirectvalue {
   429  					v = *((*unsafe.Pointer)(v))
   430  				}
   431  				return v, true
   432  			}
   433  		}
   434  	}
   435  	return unsafe.Pointer(&zeroVal[0]), false
   436  }
   437  
   438  // returns both key and value. Used by map iterator
   439  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   440  	if h == nil || h.count == 0 {
   441  		return nil, nil
   442  	}
   443  	alg := t.key.alg
   444  	hash := alg.hash(key, uintptr(h.hash0))
   445  	m := bucketMask(h.B)
   446  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   447  	if c := h.oldbuckets; c != nil {
   448  		if !h.sameSizeGrow() {
   449  			// There used to be half as many buckets; mask down one more power of two.
   450  			m >>= 1
   451  		}
   452  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
   453  		if !evacuated(oldb) {
   454  			b = oldb
   455  		}
   456  	}
   457  	top := tophash(hash)
   458  	for ; b != nil; b = b.overflow(t) {
   459  		for i := uintptr(0); i < bucketCnt; i++ {
   460  			if b.tophash[i] != top {
   461  				continue
   462  			}
   463  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   464  			if t.indirectkey {
   465  				k = *((*unsafe.Pointer)(k))
   466  			}
   467  			if alg.equal(key, k) {
   468  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   469  				if t.indirectvalue {
   470  					v = *((*unsafe.Pointer)(v))
   471  				}
   472  				return k, v
   473  			}
   474  		}
   475  	}
   476  	return nil, nil
   477  }
   478  
   479  func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
   480  	v := mapaccess1(t, h, key)
   481  	if v == unsafe.Pointer(&zeroVal[0]) {
   482  		return zero
   483  	}
   484  	return v
   485  }
   486  
   487  func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
   488  	v := mapaccess1(t, h, key)
   489  	if v == unsafe.Pointer(&zeroVal[0]) {
   490  		return zero, false
   491  	}
   492  	return v, true
   493  }
   494  
   495  // Like mapaccess, but allocates a slot for the key if it is not present in the map.
   496  func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   497  	if h == nil {
   498  		panic(plainError("assignment to entry in nil map"))
   499  	}
   500  	if raceenabled {
   501  		callerpc := getcallerpc()
   502  		pc := funcPC(mapassign)
   503  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   504  		raceReadObjectPC(t.key, key, callerpc, pc)
   505  	}
   506  	if msanenabled {
   507  		msanread(key, t.key.size)
   508  	}
   509  	if h.flags&hashWriting != 0 {
   510  		throw("concurrent map writes")
   511  	}
   512  	alg := t.key.alg
   513  	hash := alg.hash(key, uintptr(h.hash0))
   514  
   515  	// Set hashWriting after calling alg.hash, since alg.hash may panic,
   516  	// in which case we have not actually done a write.
   517  	h.flags |= hashWriting
   518  
   519  	if h.buckets == nil {
   520  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   521  	}
   522  
   523  again:
   524  	bucket := hash & bucketMask(h.B)
   525  	if h.growing() {
   526  		growWork(t, h, bucket)
   527  	}
   528  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   529  	top := tophash(hash)
   530  
   531  	var inserti *uint8
   532  	var insertk unsafe.Pointer
   533  	var val unsafe.Pointer
   534  	for {
   535  		for i := uintptr(0); i < bucketCnt; i++ {
   536  			if b.tophash[i] != top {
   537  				if b.tophash[i] == empty && inserti == nil {
   538  					inserti = &b.tophash[i]
   539  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   540  					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   541  				}
   542  				continue
   543  			}
   544  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   545  			if t.indirectkey {
   546  				k = *((*unsafe.Pointer)(k))
   547  			}
   548  			if !alg.equal(key, k) {
   549  				continue
   550  			}
   551  			// already have a mapping for key. Update it.
   552  			if t.needkeyupdate {
   553  				typedmemmove(t.key, k, key)
   554  			}
   555  			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   556  			goto done
   557  		}
   558  		ovf := b.overflow(t)
   559  		if ovf == nil {
   560  			break
   561  		}
   562  		b = ovf
   563  	}
   564  
   565  	// Did not find mapping for key. Allocate new cell & add entry.
   566  
   567  	// If we hit the max load factor or we have too many overflow buckets,
   568  	// and we're not already in the middle of growing, start growing.
   569  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   570  		hashGrow(t, h)
   571  		goto again // Growing the table invalidates everything, so try again
   572  	}
   573  
   574  	if inserti == nil {
   575  		// all current buckets are full, allocate a new one.
   576  		newb := h.newoverflow(t, b)
   577  		inserti = &newb.tophash[0]
   578  		insertk = add(unsafe.Pointer(newb), dataOffset)
   579  		val = add(insertk, bucketCnt*uintptr(t.keysize))
   580  	}
   581  
   582  	// store new key/value at insert position
   583  	if t.indirectkey {
   584  		kmem := newobject(t.key)
   585  		*(*unsafe.Pointer)(insertk) = kmem
   586  		insertk = kmem
   587  	}
   588  	if t.indirectvalue {
   589  		vmem := newobject(t.elem)
   590  		*(*unsafe.Pointer)(val) = vmem
   591  	}
   592  	typedmemmove(t.key, insertk, key)
   593  	*inserti = top
   594  	h.count++
   595  
   596  done:
   597  	if h.flags&hashWriting == 0 {
   598  		throw("concurrent map writes")
   599  	}
   600  	h.flags &^= hashWriting
   601  	if t.indirectvalue {
   602  		val = *((*unsafe.Pointer)(val))
   603  	}
   604  	return val
   605  }
   606  
   607  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   608  	if raceenabled && h != nil {
   609  		callerpc := getcallerpc()
   610  		pc := funcPC(mapdelete)
   611  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   612  		raceReadObjectPC(t.key, key, callerpc, pc)
   613  	}
   614  	if msanenabled && h != nil {
   615  		msanread(key, t.key.size)
   616  	}
   617  	if h == nil || h.count == 0 {
   618  		return
   619  	}
   620  	if h.flags&hashWriting != 0 {
   621  		throw("concurrent map writes")
   622  	}
   623  
   624  	alg := t.key.alg
   625  	hash := alg.hash(key, uintptr(h.hash0))
   626  
   627  	// Set hashWriting after calling alg.hash, since alg.hash may panic,
   628  	// in which case we have not actually done a write (delete).
   629  	h.flags |= hashWriting
   630  
   631  	bucket := hash & bucketMask(h.B)
   632  	if h.growing() {
   633  		growWork(t, h, bucket)
   634  	}
   635  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   636  	top := tophash(hash)
   637  search:
   638  	for ; b != nil; b = b.overflow(t) {
   639  		for i := uintptr(0); i < bucketCnt; i++ {
   640  			if b.tophash[i] != top {
   641  				continue
   642  			}
   643  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   644  			k2 := k
   645  			if t.indirectkey {
   646  				k2 = *((*unsafe.Pointer)(k2))
   647  			}
   648  			if !alg.equal(key, k2) {
   649  				continue
   650  			}
   651  			// Only clear key if there are pointers in it.
   652  			if t.indirectkey {
   653  				*(*unsafe.Pointer)(k) = nil
   654  			} else if t.key.kind&kindNoPointers == 0 {
   655  				memclrHasPointers(k, t.key.size)
   656  			}
   657  			// Only clear value if there are pointers in it.
   658  			if t.indirectvalue || t.elem.kind&kindNoPointers == 0 {
   659  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   660  				if t.indirectvalue {
   661  					*(*unsafe.Pointer)(v) = nil
   662  				} else {
   663  					memclrHasPointers(v, t.elem.size)
   664  				}
   665  			}
   666  			b.tophash[i] = empty
   667  			h.count--
   668  			break search
   669  		}
   670  	}
   671  
   672  	if h.flags&hashWriting == 0 {
   673  		throw("concurrent map writes")
   674  	}
   675  	h.flags &^= hashWriting
   676  }
   677  
   678  // mapiterinit initializes the hiter struct used for ranging over maps.
   679  // The hiter struct pointed to by 'it' is allocated on the stack
   680  // by the compilers order pass or on the heap by reflect_mapiterinit.
   681  // Both need to have zeroed hiter since the struct contains pointers.
   682  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   683  	if raceenabled && h != nil {
   684  		callerpc := getcallerpc()
   685  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
   686  	}
   687  
   688  	if h == nil || h.count == 0 {
   689  		return
   690  	}
   691  
   692  	if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
   693  		throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
   694  	}
   695  	it.t = t
   696  	it.h = h
   697  
   698  	// grab snapshot of bucket state
   699  	it.B = h.B
   700  	it.buckets = h.buckets
   701  	if t.bucket.kind&kindNoPointers != 0 {
   702  		// Allocate the current slice and remember pointers to both current and old.
   703  		// This preserves all relevant overflow buckets alive even if
   704  		// the table grows and/or overflow buckets are added to the table
   705  		// while we are iterating.
   706  		h.createOverflow()
   707  		it.overflow = h.extra.overflow
   708  		it.oldoverflow = h.extra.oldoverflow
   709  	}
   710  
   711  	// decide where to start
   712  	r := uintptr(fastrand())
   713  	if h.B > 31-bucketCntBits {
   714  		r += uintptr(fastrand()) << 31
   715  	}
   716  	it.startBucket = r & bucketMask(h.B)
   717  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
   718  
   719  	// iterator state
   720  	it.bucket = it.startBucket
   721  
   722  	// Remember we have an iterator.
   723  	// Can run concurrently with another mapiterinit().
   724  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   725  		atomic.Or8(&h.flags, iterator|oldIterator)
   726  	}
   727  
   728  	mapiternext(it)
   729  }
   730  
   731  func mapiternext(it *hiter) {
   732  	h := it.h
   733  	if raceenabled {
   734  		callerpc := getcallerpc()
   735  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
   736  	}
   737  	if h.flags&hashWriting != 0 {
   738  		throw("concurrent map iteration and map write")
   739  	}
   740  	t := it.t
   741  	bucket := it.bucket
   742  	b := it.bptr
   743  	i := it.i
   744  	checkBucket := it.checkBucket
   745  	alg := t.key.alg
   746  
   747  next:
   748  	if b == nil {
   749  		if bucket == it.startBucket && it.wrapped {
   750  			// end of iteration
   751  			it.key = nil
   752  			it.value = nil
   753  			return
   754  		}
   755  		if h.growing() && it.B == h.B {
   756  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   757  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   758  			// bucket hasn't been evacuated) then we need to iterate through the old
   759  			// bucket and only return the ones that will be migrated to this bucket.
   760  			oldbucket := bucket & it.h.oldbucketmask()
   761  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   762  			if !evacuated(b) {
   763  				checkBucket = bucket
   764  			} else {
   765  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   766  				checkBucket = noCheck
   767  			}
   768  		} else {
   769  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   770  			checkBucket = noCheck
   771  		}
   772  		bucket++
   773  		if bucket == bucketShift(it.B) {
   774  			bucket = 0
   775  			it.wrapped = true
   776  		}
   777  		i = 0
   778  	}
   779  	for ; i < bucketCnt; i++ {
   780  		offi := (i + it.offset) & (bucketCnt - 1)
   781  		if b.tophash[offi] == empty || b.tophash[offi] == evacuatedEmpty {
   782  			continue
   783  		}
   784  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
   785  		if t.indirectkey {
   786  			k = *((*unsafe.Pointer)(k))
   787  		}
   788  		v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
   789  		if checkBucket != noCheck && !h.sameSizeGrow() {
   790  			// Special case: iterator was started during a grow to a larger size
   791  			// and the grow is not done yet. We're working on a bucket whose
   792  			// oldbucket has not been evacuated yet. Or at least, it wasn't
   793  			// evacuated when we started the bucket. So we're iterating
   794  			// through the oldbucket, skipping any keys that will go
   795  			// to the other new bucket (each oldbucket expands to two
   796  			// buckets during a grow).
   797  			if t.reflexivekey || alg.equal(k, k) {
   798  				// If the item in the oldbucket is not destined for
   799  				// the current new bucket in the iteration, skip it.
   800  				hash := alg.hash(k, uintptr(h.hash0))
   801  				if hash&bucketMask(it.B) != checkBucket {
   802  					continue
   803  				}
   804  			} else {
   805  				// Hash isn't repeatable if k != k (NaNs).  We need a
   806  				// repeatable and randomish choice of which direction
   807  				// to send NaNs during evacuation. We'll use the low
   808  				// bit of tophash to decide which way NaNs go.
   809  				// NOTE: this case is why we need two evacuate tophash
   810  				// values, evacuatedX and evacuatedY, that differ in
   811  				// their low bit.
   812  				if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   813  					continue
   814  				}
   815  			}
   816  		}
   817  		if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
   818  			!(t.reflexivekey || alg.equal(k, k)) {
   819  			// This is the golden data, we can return it.
   820  			// OR
   821  			// key!=key, so the entry can't be deleted or updated, so we can just return it.
   822  			// That's lucky for us because when key!=key we can't look it up successfully.
   823  			it.key = k
   824  			if t.indirectvalue {
   825  				v = *((*unsafe.Pointer)(v))
   826  			}
   827  			it.value = v
   828  		} else {
   829  			// The hash table has grown since the iterator was started.
   830  			// The golden data for this key is now somewhere else.
   831  			// Check the current hash table for the data.
   832  			// This code handles the case where the key
   833  			// has been deleted, updated, or deleted and reinserted.
   834  			// NOTE: we need to regrab the key as it has potentially been
   835  			// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   836  			rk, rv := mapaccessK(t, h, k)
   837  			if rk == nil {
   838  				continue // key has been deleted
   839  			}
   840  			it.key = rk
   841  			it.value = rv
   842  		}
   843  		it.bucket = bucket
   844  		if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
   845  			it.bptr = b
   846  		}
   847  		it.i = i + 1
   848  		it.checkBucket = checkBucket
   849  		return
   850  	}
   851  	b = b.overflow(t)
   852  	i = 0
   853  	goto next
   854  }
   855  
   856  func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow *bmap) {
   857  	base := bucketShift(b)
   858  	nbuckets := base
   859  	// For small b, overflow buckets are unlikely.
   860  	// Avoid the overhead of the calculation.
   861  	if b >= 4 {
   862  		// Add on the estimated number of overflow buckets
   863  		// required to insert the median number of elements
   864  		// used with this value of b.
   865  		nbuckets += bucketShift(b - 4)
   866  		sz := t.bucket.size * nbuckets
   867  		up := roundupsize(sz)
   868  		if up != sz {
   869  			nbuckets = up / t.bucket.size
   870  		}
   871  	}
   872  	buckets = newarray(t.bucket, int(nbuckets))
   873  	if base != nbuckets {
   874  		// We preallocated some overflow buckets.
   875  		// To keep the overhead of tracking these overflow buckets to a minimum,
   876  		// we use the convention that if a preallocated overflow bucket's overflow
   877  		// pointer is nil, then there are more available by bumping the pointer.
   878  		// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
   879  		nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
   880  		last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
   881  		last.setoverflow(t, (*bmap)(buckets))
   882  	}
   883  	return buckets, nextOverflow
   884  }
   885  
   886  func hashGrow(t *maptype, h *hmap) {
   887  	// If we've hit the load factor, get bigger.
   888  	// Otherwise, there are too many overflow buckets,
   889  	// so keep the same number of buckets and "grow" laterally.
   890  	bigger := uint8(1)
   891  	if !overLoadFactor(h.count+1, h.B) {
   892  		bigger = 0
   893  		h.flags |= sameSizeGrow
   894  	}
   895  	oldbuckets := h.buckets
   896  	newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger)
   897  
   898  	flags := h.flags &^ (iterator | oldIterator)
   899  	if h.flags&iterator != 0 {
   900  		flags |= oldIterator
   901  	}
   902  	// commit the grow (atomic wrt gc)
   903  	h.B += bigger
   904  	h.flags = flags
   905  	h.oldbuckets = oldbuckets
   906  	h.buckets = newbuckets
   907  	h.nevacuate = 0
   908  	h.noverflow = 0
   909  
   910  	if h.extra != nil && h.extra.overflow != nil {
   911  		// Promote current overflow buckets to the old generation.
   912  		if h.extra.oldoverflow != nil {
   913  			throw("oldoverflow is not nil")
   914  		}
   915  		h.extra.oldoverflow = h.extra.overflow
   916  		h.extra.overflow = nil
   917  	}
   918  	if nextOverflow != nil {
   919  		if h.extra == nil {
   920  			h.extra = new(mapextra)
   921  		}
   922  		h.extra.nextOverflow = nextOverflow
   923  	}
   924  
   925  	// the actual copying of the hash table data is done incrementally
   926  	// by growWork() and evacuate().
   927  }
   928  
   929  // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
   930  func overLoadFactor(count int, B uint8) bool {
   931  	return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
   932  }
   933  
   934  // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
   935  // Note that most of these overflow buckets must be in sparse use;
   936  // if use was dense, then we'd have already triggered regular map growth.
   937  func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
   938  	// If the threshold is too low, we do extraneous work.
   939  	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
   940  	// "too many" means (approximately) as many overflow buckets as regular buckets.
   941  	// See incrnoverflow for more details.
   942  	if B > 15 {
   943  		B = 15
   944  	}
   945  	// The compiler doesn't see here that B < 16; mask B to generate shorter shift code.
   946  	return noverflow >= uint16(1)<<(B&15)
   947  }
   948  
   949  // growing reports whether h is growing. The growth may be to the same size or bigger.
   950  func (h *hmap) growing() bool {
   951  	return h.oldbuckets != nil
   952  }
   953  
   954  // sameSizeGrow reports whether the current growth is to a map of the same size.
   955  func (h *hmap) sameSizeGrow() bool {
   956  	return h.flags&sameSizeGrow != 0
   957  }
   958  
   959  // noldbuckets calculates the number of buckets prior to the current map growth.
   960  func (h *hmap) noldbuckets() uintptr {
   961  	oldB := h.B
   962  	if !h.sameSizeGrow() {
   963  		oldB--
   964  	}
   965  	return bucketShift(oldB)
   966  }
   967  
   968  // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
   969  func (h *hmap) oldbucketmask() uintptr {
   970  	return h.noldbuckets() - 1
   971  }
   972  
   973  func growWork(t *maptype, h *hmap, bucket uintptr) {
   974  	// make sure we evacuate the oldbucket corresponding
   975  	// to the bucket we're about to use
   976  	evacuate(t, h, bucket&h.oldbucketmask())
   977  
   978  	// evacuate one more oldbucket to make progress on growing
   979  	if h.growing() {
   980  		evacuate(t, h, h.nevacuate)
   981  	}
   982  }
   983  
   984  func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
   985  	b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
   986  	return evacuated(b)
   987  }
   988  
   989  // evacDst is an evacuation destination.
   990  type evacDst struct {
   991  	b *bmap          // current destination bucket
   992  	i int            // key/val index into b
   993  	k unsafe.Pointer // pointer to current key storage
   994  	v unsafe.Pointer // pointer to current value storage
   995  }
   996  
   997  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
   998  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   999  	newbit := h.noldbuckets()
  1000  	if !evacuated(b) {
  1001  		// TODO: reuse overflow buckets instead of using new ones, if there
  1002  		// is no iterator using the old buckets.  (If !oldIterator.)
  1003  
  1004  		// xy contains the x and y (low and high) evacuation destinations.
  1005  		var xy [2]evacDst
  1006  		x := &xy[0]
  1007  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
  1008  		x.k = add(unsafe.Pointer(x.b), dataOffset)
  1009  		x.v = add(x.k, bucketCnt*uintptr(t.keysize))
  1010  
  1011  		if !h.sameSizeGrow() {
  1012  			// Only calculate y pointers if we're growing bigger.
  1013  			// Otherwise GC can see bad pointers.
  1014  			y := &xy[1]
  1015  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
  1016  			y.k = add(unsafe.Pointer(y.b), dataOffset)
  1017  			y.v = add(y.k, bucketCnt*uintptr(t.keysize))
  1018  		}
  1019  
  1020  		for ; b != nil; b = b.overflow(t) {
  1021  			k := add(unsafe.Pointer(b), dataOffset)
  1022  			v := add(k, bucketCnt*uintptr(t.keysize))
  1023  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
  1024  				top := b.tophash[i]
  1025  				if top == empty {
  1026  					b.tophash[i] = evacuatedEmpty
  1027  					continue
  1028  				}
  1029  				if top < minTopHash {
  1030  					throw("bad map state")
  1031  				}
  1032  				k2 := k
  1033  				if t.indirectkey {
  1034  					k2 = *((*unsafe.Pointer)(k2))
  1035  				}
  1036  				var useY uint8
  1037  				if !h.sameSizeGrow() {
  1038  					// Compute hash to make our evacuation decision (whether we need
  1039  					// to send this key/value to bucket x or bucket y).
  1040  					hash := t.key.alg.hash(k2, uintptr(h.hash0))
  1041  					if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) {
  1042  						// If key != key (NaNs), then the hash could be (and probably
  1043  						// will be) entirely different from the old hash. Moreover,
  1044  						// it isn't reproducible. Reproducibility is required in the
  1045  						// presence of iterators, as our evacuation decision must
  1046  						// match whatever decision the iterator made.
  1047  						// Fortunately, we have the freedom to send these keys either
  1048  						// way. Also, tophash is meaningless for these kinds of keys.
  1049  						// We let the low bit of tophash drive the evacuation decision.
  1050  						// We recompute a new random tophash for the next level so
  1051  						// these keys will get evenly distributed across all buckets
  1052  						// after multiple grows.
  1053  						useY = top & 1
  1054  						top = tophash(hash)
  1055  					} else {
  1056  						if hash&newbit != 0 {
  1057  							useY = 1
  1058  						}
  1059  					}
  1060  				}
  1061  
  1062  				if evacuatedX+1 != evacuatedY {
  1063  					throw("bad evacuatedN")
  1064  				}
  1065  
  1066  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY
  1067  				dst := &xy[useY]                 // evacuation destination
  1068  
  1069  				if dst.i == bucketCnt {
  1070  					dst.b = h.newoverflow(t, dst.b)
  1071  					dst.i = 0
  1072  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
  1073  					dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
  1074  				}
  1075  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
  1076  				if t.indirectkey {
  1077  					*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
  1078  				} else {
  1079  					typedmemmove(t.key, dst.k, k) // copy value
  1080  				}
  1081  				if t.indirectvalue {
  1082  					*(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
  1083  				} else {
  1084  					typedmemmove(t.elem, dst.v, v)
  1085  				}
  1086  				dst.i++
  1087  				// These updates might push these pointers past the end of the
  1088  				// key or value arrays.  That's ok, as we have the overflow pointer
  1089  				// at the end of the bucket to protect against pointing past the
  1090  				// end of the bucket.
  1091  				dst.k = add(dst.k, uintptr(t.keysize))
  1092  				dst.v = add(dst.v, uintptr(t.valuesize))
  1093  			}
  1094  		}
  1095  		// Unlink the overflow buckets & clear key/value to help GC.
  1096  		if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 {
  1097  			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
  1098  			// Preserve b.tophash because the evacuation
  1099  			// state is maintained there.
  1100  			ptr := add(b, dataOffset)
  1101  			n := uintptr(t.bucketsize) - dataOffset
  1102  			memclrHasPointers(ptr, n)
  1103  		}
  1104  	}
  1105  
  1106  	if oldbucket == h.nevacuate {
  1107  		advanceEvacuationMark(h, t, newbit)
  1108  	}
  1109  }
  1110  
  1111  func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
  1112  	h.nevacuate++
  1113  	// Experiments suggest that 1024 is overkill by at least an order of magnitude.
  1114  	// Put it in there as a safeguard anyway, to ensure O(1) behavior.
  1115  	stop := h.nevacuate + 1024
  1116  	if stop > newbit {
  1117  		stop = newbit
  1118  	}
  1119  	for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
  1120  		h.nevacuate++
  1121  	}
  1122  	if h.nevacuate == newbit { // newbit == # of oldbuckets
  1123  		// Growing is all done. Free old main bucket array.
  1124  		h.oldbuckets = nil
  1125  		// Can discard old overflow buckets as well.
  1126  		// If they are still referenced by an iterator,
  1127  		// then the iterator holds a pointers to the slice.
  1128  		if h.extra != nil {
  1129  			h.extra.oldoverflow = nil
  1130  		}
  1131  		h.flags &^= sameSizeGrow
  1132  	}
  1133  }
  1134  
  1135  func ismapkey(t *_type) bool {
  1136  	return t.alg.hash != nil
  1137  }
  1138  
  1139  // Reflect stubs. Called from ../reflect/asm_*.s
  1140  
  1141  //go:linkname reflect_makemap reflect.makemap
  1142  func reflect_makemap(t *maptype, cap int) *hmap {
  1143  	// Check invariants and reflects math.
  1144  	if sz := unsafe.Sizeof(hmap{}); sz != t.hmap.size {
  1145  		println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
  1146  		throw("bad hmap size")
  1147  	}
  1148  	if !ismapkey(t.key) {
  1149  		throw("runtime.reflect_makemap: unsupported map key type")
  1150  	}
  1151  	if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
  1152  		t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
  1153  		throw("key size wrong")
  1154  	}
  1155  	if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
  1156  		t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
  1157  		throw("value size wrong")
  1158  	}
  1159  	if t.key.align > bucketCnt {
  1160  		throw("key align too big")
  1161  	}
  1162  	if t.elem.align > bucketCnt {
  1163  		throw("value align too big")
  1164  	}
  1165  	if t.key.size%uintptr(t.key.align) != 0 {
  1166  		throw("key size not a multiple of key align")
  1167  	}
  1168  	if t.elem.size%uintptr(t.elem.align) != 0 {
  1169  		throw("value size not a multiple of value align")
  1170  	}
  1171  	if bucketCnt < 8 {
  1172  		throw("bucketsize too small for proper alignment")
  1173  	}
  1174  	if dataOffset%uintptr(t.key.align) != 0 {
  1175  		throw("need padding in bucket (key)")
  1176  	}
  1177  	if dataOffset%uintptr(t.elem.align) != 0 {
  1178  		throw("need padding in bucket (value)")
  1179  	}
  1180  
  1181  	return makemap(t, cap, nil)
  1182  }
  1183  
  1184  //go:linkname reflect_mapaccess reflect.mapaccess
  1185  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
  1186  	val, ok := mapaccess2(t, h, key)
  1187  	if !ok {
  1188  		// reflect wants nil for a missing element
  1189  		val = nil
  1190  	}
  1191  	return val
  1192  }
  1193  
  1194  //go:linkname reflect_mapassign reflect.mapassign
  1195  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
  1196  	p := mapassign(t, h, key)
  1197  	typedmemmove(t.elem, p, val)
  1198  }
  1199  
  1200  //go:linkname reflect_mapdelete reflect.mapdelete
  1201  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
  1202  	mapdelete(t, h, key)
  1203  }
  1204  
  1205  //go:linkname reflect_mapiterinit reflect.mapiterinit
  1206  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
  1207  	it := new(hiter)
  1208  	mapiterinit(t, h, it)
  1209  	return it
  1210  }
  1211  
  1212  //go:linkname reflect_mapiternext reflect.mapiternext
  1213  func reflect_mapiternext(it *hiter) {
  1214  	mapiternext(it)
  1215  }
  1216  
  1217  //go:linkname reflect_mapiterkey reflect.mapiterkey
  1218  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
  1219  	return it.key
  1220  }
  1221  
  1222  //go:linkname reflect_maplen reflect.maplen
  1223  func reflect_maplen(h *hmap) int {
  1224  	if h == nil {
  1225  		return 0
  1226  	}
  1227  	if raceenabled {
  1228  		callerpc := getcallerpc()
  1229  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
  1230  	}
  1231  	return h.count
  1232  }
  1233  
  1234  //go:linkname reflect_ismapkey reflect.ismapkey
  1235  func reflect_ismapkey(t *_type) bool {
  1236  	return ismapkey(t)
  1237  }
  1238  
  1239  const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go
  1240  var zeroVal [maxZero]byte