github.com/fenixara/go@v0.0.0-20170127160404-96ea0918e670/src/runtime/hashmap.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table. The data is arranged
    10  // into an array of buckets. Each bucket contains up to
    11  // 8 key/value pairs. The low-order bits of the hash are
    12  // used to select a bucket. Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big. Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space. I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and values)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/value pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows. Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"runtime/internal/atomic"
    58  	"runtime/internal/sys"
    59  	"unsafe"
    60  )
    61  
    62  const (
    63  	// Maximum number of key/value pairs a bucket can hold.
    64  	bucketCntBits = 3
    65  	bucketCnt     = 1 << bucketCntBits
    66  
    67  	// Maximum average load of a bucket that triggers growth.
    68  	loadFactor = 6.5
    69  
    70  	// Maximum key or value size to keep inline (instead of mallocing per element).
    71  	// Must fit in a uint8.
    72  	// Fast versions cannot handle big values - the cutoff size for
    73  	// fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
    74  	maxKeySize   = 128
    75  	maxValueSize = 128
    76  
    77  	// data offset should be the size of the bmap struct, but needs to be
    78  	// aligned correctly. For amd64p32 this means 64-bit alignment
    79  	// even though pointers are 32 bit.
    80  	dataOffset = unsafe.Offsetof(struct {
    81  		b bmap
    82  		v int64
    83  	}{}.v)
    84  
    85  	// Possible tophash values. We reserve a few possibilities for special marks.
    86  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    87  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    88  	// during map writes and thus no one else can observe the map during that time).
    89  	empty          = 0 // cell is empty
    90  	evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
    91  	evacuatedX     = 2 // key/value is valid.  Entry has been evacuated to first half of larger table.
    92  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    93  	minTopHash     = 4 // minimum tophash for a normal filled cell.
    94  
    95  	// flags
    96  	iterator     = 1 // there may be an iterator using buckets
    97  	oldIterator  = 2 // there may be an iterator using oldbuckets
    98  	hashWriting  = 4 // a goroutine is writing to the map
    99  	sameSizeGrow = 8 // the current map growth is to a new map of the same size
   100  
   101  	// sentinel bucket ID for iterator checks
   102  	noCheck = 1<<(8*sys.PtrSize) - 1
   103  )
   104  
   105  // A header for a Go map.
   106  type hmap struct {
   107  	// Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
   108  	// ../reflect/type.go. Don't change this structure without also changing that code!
   109  	count     int // # live cells == size of map.  Must be first (used by len() builtin)
   110  	flags     uint8
   111  	B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   112  	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
   113  	hash0     uint32 // hash seed
   114  
   115  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   116  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   117  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   118  
   119  	// If both key and value do not contain pointers and are inline, then we mark bucket
   120  	// type as containing no pointers. This avoids scanning such maps.
   121  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   122  	// alive, we store pointers to all overflow buckets in hmap.overflow.
   123  	// Overflow is used only if key and value do not contain pointers.
   124  	// overflow[0] contains overflow buckets for hmap.buckets.
   125  	// overflow[1] contains overflow buckets for hmap.oldbuckets.
   126  	// The first indirection allows us to reduce static size of hmap.
   127  	// The second indirection allows to store a pointer to the slice in hiter.
   128  	overflow *[2]*[]*bmap
   129  }
   130  
   131  // A bucket for a Go map.
   132  type bmap struct {
   133  	// tophash generally contains the top byte of the hash value
   134  	// for each key in this bucket. If tophash[0] < minTopHash,
   135  	// tophash[0] is a bucket evacuation state instead.
   136  	tophash [bucketCnt]uint8
   137  	// Followed by bucketCnt keys and then bucketCnt values.
   138  	// NOTE: packing all the keys together and then all the values together makes the
   139  	// code a bit more complicated than alternating key/value/key/value/... but it allows
   140  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   141  	// Followed by an overflow pointer.
   142  }
   143  
   144  // A hash iteration structure.
   145  // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
   146  // the layout of this structure.
   147  type hiter struct {
   148  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
   149  	value       unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
   150  	t           *maptype
   151  	h           *hmap
   152  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   153  	bptr        *bmap          // current bucket
   154  	overflow    [2]*[]*bmap    // keeps overflow buckets alive
   155  	startBucket uintptr        // bucket iteration started at
   156  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   157  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   158  	B           uint8
   159  	i           uint8
   160  	bucket      uintptr
   161  	checkBucket uintptr
   162  }
   163  
   164  func evacuated(b *bmap) bool {
   165  	h := b.tophash[0]
   166  	return h > empty && h < minTopHash
   167  }
   168  
   169  func (b *bmap) overflow(t *maptype) *bmap {
   170  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
   171  }
   172  
   173  // incrnoverflow increments h.noverflow.
   174  // noverflow counts the number of overflow buckets.
   175  // This is used to trigger same-size map growth.
   176  // See also tooManyOverflowBuckets.
   177  // To keep hmap small, noverflow is a uint16.
   178  // When there are few buckets, noverflow is an exact count.
   179  // When there are many buckets, noverflow is an approximate count.
   180  func (h *hmap) incrnoverflow() {
   181  	// We trigger same-size map growth if there are
   182  	// as many overflow buckets as buckets.
   183  	// We need to be able to count to 1<<h.B.
   184  	if h.B < 16 {
   185  		h.noverflow++
   186  		return
   187  	}
   188  	// Increment with probability 1/(1<<(h.B-15)).
   189  	// When we reach 1<<15 - 1, we will have approximately
   190  	// as many overflow buckets as buckets.
   191  	mask := uint32(1)<<(h.B-15) - 1
   192  	// Example: if h.B == 18, then mask == 7,
   193  	// and fastrand & 7 == 0 with probability 1/8.
   194  	if fastrand()&mask == 0 {
   195  		h.noverflow++
   196  	}
   197  }
   198  
   199  func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
   200  	h.incrnoverflow()
   201  	if t.bucket.kind&kindNoPointers != 0 {
   202  		h.createOverflow()
   203  		*h.overflow[0] = append(*h.overflow[0], ovf)
   204  	}
   205  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
   206  }
   207  
   208  func (h *hmap) createOverflow() {
   209  	if h.overflow == nil {
   210  		h.overflow = new([2]*[]*bmap)
   211  	}
   212  	if h.overflow[0] == nil {
   213  		h.overflow[0] = new([]*bmap)
   214  	}
   215  }
   216  
   217  // makemap implements a Go map creation make(map[k]v, hint)
   218  // If the compiler has determined that the map or the first bucket
   219  // can be created on the stack, h and/or bucket may be non-nil.
   220  // If h != nil, the map can be created directly in h.
   221  // If bucket != nil, bucket can be used as the first bucket.
   222  func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
   223  	if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != t.hmap.size {
   224  		println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
   225  		throw("bad hmap size")
   226  	}
   227  
   228  	if hint < 0 || int64(int32(hint)) != hint {
   229  		panic(plainError("makemap: size out of range"))
   230  		// TODO: make hint an int, then none of this nonsense
   231  	}
   232  
   233  	if !ismapkey(t.key) {
   234  		throw("runtime.makemap: unsupported map key type")
   235  	}
   236  
   237  	// check compiler's and reflect's math
   238  	if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
   239  		t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
   240  		throw("key size wrong")
   241  	}
   242  	if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
   243  		t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
   244  		throw("value size wrong")
   245  	}
   246  
   247  	// invariants we depend on. We should probably check these at compile time
   248  	// somewhere, but for now we'll do it here.
   249  	if t.key.align > bucketCnt {
   250  		throw("key align too big")
   251  	}
   252  	if t.elem.align > bucketCnt {
   253  		throw("value align too big")
   254  	}
   255  	if t.key.size%uintptr(t.key.align) != 0 {
   256  		throw("key size not a multiple of key align")
   257  	}
   258  	if t.elem.size%uintptr(t.elem.align) != 0 {
   259  		throw("value size not a multiple of value align")
   260  	}
   261  	if bucketCnt < 8 {
   262  		throw("bucketsize too small for proper alignment")
   263  	}
   264  	if dataOffset%uintptr(t.key.align) != 0 {
   265  		throw("need padding in bucket (key)")
   266  	}
   267  	if dataOffset%uintptr(t.elem.align) != 0 {
   268  		throw("need padding in bucket (value)")
   269  	}
   270  
   271  	// find size parameter which will hold the requested # of elements
   272  	B := uint8(0)
   273  	for ; overLoadFactor(hint, B); B++ {
   274  	}
   275  
   276  	// allocate initial hash table
   277  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   278  	// If hint is large zeroing this memory could take a while.
   279  	buckets := bucket
   280  	if B != 0 {
   281  		buckets = newarray(t.bucket, 1<<B)
   282  	}
   283  
   284  	// initialize Hmap
   285  	if h == nil {
   286  		h = (*hmap)(newobject(t.hmap))
   287  	}
   288  	h.count = 0
   289  	h.B = B
   290  	h.flags = 0
   291  	h.hash0 = fastrand()
   292  	h.buckets = buckets
   293  	h.oldbuckets = nil
   294  	h.nevacuate = 0
   295  	h.noverflow = 0
   296  
   297  	return h
   298  }
   299  
   300  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   301  // it will return a reference to the zero object for the value type if
   302  // the key is not in the map.
   303  // NOTE: The returned pointer may keep the whole map live, so don't
   304  // hold onto it for very long.
   305  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   306  	if raceenabled && h != nil {
   307  		callerpc := getcallerpc(unsafe.Pointer(&t))
   308  		pc := funcPC(mapaccess1)
   309  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   310  		raceReadObjectPC(t.key, key, callerpc, pc)
   311  	}
   312  	if msanenabled && h != nil {
   313  		msanread(key, t.key.size)
   314  	}
   315  	if h == nil || h.count == 0 {
   316  		return unsafe.Pointer(&zeroVal[0])
   317  	}
   318  	if h.flags&hashWriting != 0 {
   319  		throw("concurrent map read and map write")
   320  	}
   321  	alg := t.key.alg
   322  	hash := alg.hash(key, uintptr(h.hash0))
   323  	m := uintptr(1)<<h.B - 1
   324  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   325  	if c := h.oldbuckets; c != nil {
   326  		if !h.sameSizeGrow() {
   327  			// There used to be half as many buckets; mask down one more power of two.
   328  			m >>= 1
   329  		}
   330  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   331  		if !evacuated(oldb) {
   332  			b = oldb
   333  		}
   334  	}
   335  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   336  	if top < minTopHash {
   337  		top += minTopHash
   338  	}
   339  	for {
   340  		for i := uintptr(0); i < bucketCnt; i++ {
   341  			if b.tophash[i] != top {
   342  				continue
   343  			}
   344  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   345  			if t.indirectkey {
   346  				k = *((*unsafe.Pointer)(k))
   347  			}
   348  			if alg.equal(key, k) {
   349  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   350  				if t.indirectvalue {
   351  					v = *((*unsafe.Pointer)(v))
   352  				}
   353  				return v
   354  			}
   355  		}
   356  		b = b.overflow(t)
   357  		if b == nil {
   358  			return unsafe.Pointer(&zeroVal[0])
   359  		}
   360  	}
   361  }
   362  
   363  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   364  	if raceenabled && h != nil {
   365  		callerpc := getcallerpc(unsafe.Pointer(&t))
   366  		pc := funcPC(mapaccess2)
   367  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   368  		raceReadObjectPC(t.key, key, callerpc, pc)
   369  	}
   370  	if msanenabled && h != nil {
   371  		msanread(key, t.key.size)
   372  	}
   373  	if h == nil || h.count == 0 {
   374  		return unsafe.Pointer(&zeroVal[0]), false
   375  	}
   376  	if h.flags&hashWriting != 0 {
   377  		throw("concurrent map read and map write")
   378  	}
   379  	alg := t.key.alg
   380  	hash := alg.hash(key, uintptr(h.hash0))
   381  	m := uintptr(1)<<h.B - 1
   382  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   383  	if c := h.oldbuckets; c != nil {
   384  		if !h.sameSizeGrow() {
   385  			// There used to be half as many buckets; mask down one more power of two.
   386  			m >>= 1
   387  		}
   388  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
   389  		if !evacuated(oldb) {
   390  			b = oldb
   391  		}
   392  	}
   393  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   394  	if top < minTopHash {
   395  		top += minTopHash
   396  	}
   397  	for {
   398  		for i := uintptr(0); i < bucketCnt; i++ {
   399  			if b.tophash[i] != top {
   400  				continue
   401  			}
   402  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   403  			if t.indirectkey {
   404  				k = *((*unsafe.Pointer)(k))
   405  			}
   406  			if alg.equal(key, k) {
   407  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   408  				if t.indirectvalue {
   409  					v = *((*unsafe.Pointer)(v))
   410  				}
   411  				return v, true
   412  			}
   413  		}
   414  		b = b.overflow(t)
   415  		if b == nil {
   416  			return unsafe.Pointer(&zeroVal[0]), false
   417  		}
   418  	}
   419  }
   420  
   421  // returns both key and value. Used by map iterator
   422  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   423  	if h == nil || h.count == 0 {
   424  		return nil, nil
   425  	}
   426  	alg := t.key.alg
   427  	hash := alg.hash(key, uintptr(h.hash0))
   428  	m := uintptr(1)<<h.B - 1
   429  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   430  	if c := h.oldbuckets; c != nil {
   431  		if !h.sameSizeGrow() {
   432  			// There used to be half as many buckets; mask down one more power of two.
   433  			m >>= 1
   434  		}
   435  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
   436  		if !evacuated(oldb) {
   437  			b = oldb
   438  		}
   439  	}
   440  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   441  	if top < minTopHash {
   442  		top += minTopHash
   443  	}
   444  	for {
   445  		for i := uintptr(0); i < bucketCnt; i++ {
   446  			if b.tophash[i] != top {
   447  				continue
   448  			}
   449  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   450  			if t.indirectkey {
   451  				k = *((*unsafe.Pointer)(k))
   452  			}
   453  			if alg.equal(key, k) {
   454  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   455  				if t.indirectvalue {
   456  					v = *((*unsafe.Pointer)(v))
   457  				}
   458  				return k, v
   459  			}
   460  		}
   461  		b = b.overflow(t)
   462  		if b == nil {
   463  			return nil, nil
   464  		}
   465  	}
   466  }
   467  
   468  func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
   469  	v := mapaccess1(t, h, key)
   470  	if v == unsafe.Pointer(&zeroVal[0]) {
   471  		return zero
   472  	}
   473  	return v
   474  }
   475  
   476  func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
   477  	v := mapaccess1(t, h, key)
   478  	if v == unsafe.Pointer(&zeroVal[0]) {
   479  		return zero, false
   480  	}
   481  	return v, true
   482  }
   483  
   484  // Like mapaccess, but allocates a slot for the key if it is not present in the map.
   485  func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   486  	if h == nil {
   487  		panic(plainError("assignment to entry in nil map"))
   488  	}
   489  	if raceenabled {
   490  		callerpc := getcallerpc(unsafe.Pointer(&t))
   491  		pc := funcPC(mapassign)
   492  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   493  		raceReadObjectPC(t.key, key, callerpc, pc)
   494  	}
   495  	if msanenabled {
   496  		msanread(key, t.key.size)
   497  	}
   498  	if h.flags&hashWriting != 0 {
   499  		throw("concurrent map writes")
   500  	}
   501  	h.flags |= hashWriting
   502  
   503  	alg := t.key.alg
   504  	hash := alg.hash(key, uintptr(h.hash0))
   505  
   506  	if h.buckets == nil {
   507  		h.buckets = newarray(t.bucket, 1)
   508  	}
   509  
   510  again:
   511  	bucket := hash & (uintptr(1)<<h.B - 1)
   512  	if h.growing() {
   513  		growWork(t, h, bucket)
   514  	}
   515  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   516  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   517  	if top < minTopHash {
   518  		top += minTopHash
   519  	}
   520  
   521  	var inserti *uint8
   522  	var insertk unsafe.Pointer
   523  	var val unsafe.Pointer
   524  	for {
   525  		for i := uintptr(0); i < bucketCnt; i++ {
   526  			if b.tophash[i] != top {
   527  				if b.tophash[i] == empty && inserti == nil {
   528  					inserti = &b.tophash[i]
   529  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   530  					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   531  				}
   532  				continue
   533  			}
   534  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   535  			if t.indirectkey {
   536  				k = *((*unsafe.Pointer)(k))
   537  			}
   538  			if !alg.equal(key, k) {
   539  				continue
   540  			}
   541  			// already have a mapping for key. Update it.
   542  			if t.needkeyupdate {
   543  				typedmemmove(t.key, k, key)
   544  			}
   545  			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   546  			goto done
   547  		}
   548  		ovf := b.overflow(t)
   549  		if ovf == nil {
   550  			break
   551  		}
   552  		b = ovf
   553  	}
   554  
   555  	// Did not find mapping for key. Allocate new cell & add entry.
   556  
   557  	// If we hit the max load factor or we have too many overflow buckets,
   558  	// and we're not already in the middle of growing, start growing.
   559  	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   560  		hashGrow(t, h)
   561  		goto again // Growing the table invalidates everything, so try again
   562  	}
   563  
   564  	if inserti == nil {
   565  		// all current buckets are full, allocate a new one.
   566  		newb := (*bmap)(newobject(t.bucket))
   567  		h.setoverflow(t, b, newb)
   568  		inserti = &newb.tophash[0]
   569  		insertk = add(unsafe.Pointer(newb), dataOffset)
   570  		val = add(insertk, bucketCnt*uintptr(t.keysize))
   571  	}
   572  
   573  	// store new key/value at insert position
   574  	if t.indirectkey {
   575  		kmem := newobject(t.key)
   576  		*(*unsafe.Pointer)(insertk) = kmem
   577  		insertk = kmem
   578  	}
   579  	if t.indirectvalue {
   580  		vmem := newobject(t.elem)
   581  		*(*unsafe.Pointer)(val) = vmem
   582  	}
   583  	typedmemmove(t.key, insertk, key)
   584  	*inserti = top
   585  	h.count++
   586  
   587  done:
   588  	if h.flags&hashWriting == 0 {
   589  		throw("concurrent map writes")
   590  	}
   591  	h.flags &^= hashWriting
   592  	if t.indirectvalue {
   593  		val = *((*unsafe.Pointer)(val))
   594  	}
   595  	return val
   596  }
   597  
   598  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   599  	if raceenabled && h != nil {
   600  		callerpc := getcallerpc(unsafe.Pointer(&t))
   601  		pc := funcPC(mapdelete)
   602  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   603  		raceReadObjectPC(t.key, key, callerpc, pc)
   604  	}
   605  	if msanenabled && h != nil {
   606  		msanread(key, t.key.size)
   607  	}
   608  	if h == nil || h.count == 0 {
   609  		return
   610  	}
   611  	if h.flags&hashWriting != 0 {
   612  		throw("concurrent map writes")
   613  	}
   614  	h.flags |= hashWriting
   615  
   616  	alg := t.key.alg
   617  	hash := alg.hash(key, uintptr(h.hash0))
   618  	bucket := hash & (uintptr(1)<<h.B - 1)
   619  	if h.growing() {
   620  		growWork(t, h, bucket)
   621  	}
   622  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   623  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   624  	if top < minTopHash {
   625  		top += minTopHash
   626  	}
   627  	for {
   628  		for i := uintptr(0); i < bucketCnt; i++ {
   629  			if b.tophash[i] != top {
   630  				continue
   631  			}
   632  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   633  			k2 := k
   634  			if t.indirectkey {
   635  				k2 = *((*unsafe.Pointer)(k2))
   636  			}
   637  			if !alg.equal(key, k2) {
   638  				continue
   639  			}
   640  			if t.indirectkey {
   641  				*(*unsafe.Pointer)(k) = nil
   642  			} else {
   643  				typedmemclr(t.key, k)
   644  			}
   645  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
   646  			if t.indirectvalue {
   647  				*(*unsafe.Pointer)(v) = nil
   648  			} else {
   649  				typedmemclr(t.elem, v)
   650  			}
   651  			b.tophash[i] = empty
   652  			h.count--
   653  			goto done
   654  		}
   655  		b = b.overflow(t)
   656  		if b == nil {
   657  			goto done
   658  		}
   659  	}
   660  
   661  done:
   662  	if h.flags&hashWriting == 0 {
   663  		throw("concurrent map writes")
   664  	}
   665  	h.flags &^= hashWriting
   666  }
   667  
   668  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   669  	// Clear pointer fields so garbage collector does not complain.
   670  	it.key = nil
   671  	it.value = nil
   672  	it.t = nil
   673  	it.h = nil
   674  	it.buckets = nil
   675  	it.bptr = nil
   676  	it.overflow[0] = nil
   677  	it.overflow[1] = nil
   678  
   679  	if raceenabled && h != nil {
   680  		callerpc := getcallerpc(unsafe.Pointer(&t))
   681  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
   682  	}
   683  
   684  	if h == nil || h.count == 0 {
   685  		it.key = nil
   686  		it.value = nil
   687  		return
   688  	}
   689  
   690  	if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
   691  		throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
   692  	}
   693  	it.t = t
   694  	it.h = h
   695  
   696  	// grab snapshot of bucket state
   697  	it.B = h.B
   698  	it.buckets = h.buckets
   699  	if t.bucket.kind&kindNoPointers != 0 {
   700  		// Allocate the current slice and remember pointers to both current and old.
   701  		// This preserves all relevant overflow buckets alive even if
   702  		// the table grows and/or overflow buckets are added to the table
   703  		// while we are iterating.
   704  		h.createOverflow()
   705  		it.overflow = *h.overflow
   706  	}
   707  
   708  	// decide where to start
   709  	r := uintptr(fastrand())
   710  	if h.B > 31-bucketCntBits {
   711  		r += uintptr(fastrand()) << 31
   712  	}
   713  	it.startBucket = r & (uintptr(1)<<h.B - 1)
   714  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
   715  
   716  	// iterator state
   717  	it.bucket = it.startBucket
   718  	it.wrapped = false
   719  	it.bptr = nil
   720  
   721  	// Remember we have an iterator.
   722  	// Can run concurrently with another hash_iter_init().
   723  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   724  		atomic.Or8(&h.flags, iterator|oldIterator)
   725  	}
   726  
   727  	mapiternext(it)
   728  }
   729  
   730  func mapiternext(it *hiter) {
   731  	h := it.h
   732  	if raceenabled {
   733  		callerpc := getcallerpc(unsafe.Pointer(&it))
   734  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
   735  	}
   736  	if h.flags&hashWriting != 0 {
   737  		throw("concurrent map iteration and map write")
   738  	}
   739  	t := it.t
   740  	bucket := it.bucket
   741  	b := it.bptr
   742  	i := it.i
   743  	checkBucket := it.checkBucket
   744  	alg := t.key.alg
   745  
   746  next:
   747  	if b == nil {
   748  		if bucket == it.startBucket && it.wrapped {
   749  			// end of iteration
   750  			it.key = nil
   751  			it.value = nil
   752  			return
   753  		}
   754  		if h.growing() && it.B == h.B {
   755  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   756  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   757  			// bucket hasn't been evacuated) then we need to iterate through the old
   758  			// bucket and only return the ones that will be migrated to this bucket.
   759  			oldbucket := bucket & it.h.oldbucketmask()
   760  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   761  			if !evacuated(b) {
   762  				checkBucket = bucket
   763  			} else {
   764  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   765  				checkBucket = noCheck
   766  			}
   767  		} else {
   768  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   769  			checkBucket = noCheck
   770  		}
   771  		bucket++
   772  		if bucket == uintptr(1)<<it.B {
   773  			bucket = 0
   774  			it.wrapped = true
   775  		}
   776  		i = 0
   777  	}
   778  	for ; i < bucketCnt; i++ {
   779  		offi := (i + it.offset) & (bucketCnt - 1)
   780  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
   781  		v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
   782  		if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
   783  			if checkBucket != noCheck && !h.sameSizeGrow() {
   784  				// Special case: iterator was started during a grow to a larger size
   785  				// and the grow is not done yet. We're working on a bucket whose
   786  				// oldbucket has not been evacuated yet. Or at least, it wasn't
   787  				// evacuated when we started the bucket. So we're iterating
   788  				// through the oldbucket, skipping any keys that will go
   789  				// to the other new bucket (each oldbucket expands to two
   790  				// buckets during a grow).
   791  				k2 := k
   792  				if t.indirectkey {
   793  					k2 = *((*unsafe.Pointer)(k2))
   794  				}
   795  				if t.reflexivekey || alg.equal(k2, k2) {
   796  					// If the item in the oldbucket is not destined for
   797  					// the current new bucket in the iteration, skip it.
   798  					hash := alg.hash(k2, uintptr(h.hash0))
   799  					if hash&(uintptr(1)<<it.B-1) != checkBucket {
   800  						continue
   801  					}
   802  				} else {
   803  					// Hash isn't repeatable if k != k (NaNs).  We need a
   804  					// repeatable and randomish choice of which direction
   805  					// to send NaNs during evacuation. We'll use the low
   806  					// bit of tophash to decide which way NaNs go.
   807  					// NOTE: this case is why we need two evacuate tophash
   808  					// values, evacuatedX and evacuatedY, that differ in
   809  					// their low bit.
   810  					if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   811  						continue
   812  					}
   813  				}
   814  			}
   815  			if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
   816  				// this is the golden data, we can return it.
   817  				if t.indirectkey {
   818  					k = *((*unsafe.Pointer)(k))
   819  				}
   820  				it.key = k
   821  				if t.indirectvalue {
   822  					v = *((*unsafe.Pointer)(v))
   823  				}
   824  				it.value = v
   825  			} else {
   826  				// The hash table has grown since the iterator was started.
   827  				// The golden data for this key is now somewhere else.
   828  				k2 := k
   829  				if t.indirectkey {
   830  					k2 = *((*unsafe.Pointer)(k2))
   831  				}
   832  				if t.reflexivekey || alg.equal(k2, k2) {
   833  					// Check the current hash table for the data.
   834  					// This code handles the case where the key
   835  					// has been deleted, updated, or deleted and reinserted.
   836  					// NOTE: we need to regrab the key as it has potentially been
   837  					// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   838  					rk, rv := mapaccessK(t, h, k2)
   839  					if rk == nil {
   840  						continue // key has been deleted
   841  					}
   842  					it.key = rk
   843  					it.value = rv
   844  				} else {
   845  					// if key!=key then the entry can't be deleted or
   846  					// updated, so we can just return it. That's lucky for
   847  					// us because when key!=key we can't look it up
   848  					// successfully in the current table.
   849  					it.key = k2
   850  					if t.indirectvalue {
   851  						v = *((*unsafe.Pointer)(v))
   852  					}
   853  					it.value = v
   854  				}
   855  			}
   856  			it.bucket = bucket
   857  			if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
   858  				it.bptr = b
   859  			}
   860  			it.i = i + 1
   861  			it.checkBucket = checkBucket
   862  			return
   863  		}
   864  	}
   865  	b = b.overflow(t)
   866  	i = 0
   867  	goto next
   868  }
   869  
   870  func hashGrow(t *maptype, h *hmap) {
   871  	// If we've hit the load factor, get bigger.
   872  	// Otherwise, there are too many overflow buckets,
   873  	// so keep the same number of buckets and "grow" laterally.
   874  	bigger := uint8(1)
   875  	if !overLoadFactor(int64(h.count), h.B) {
   876  		bigger = 0
   877  		h.flags |= sameSizeGrow
   878  	}
   879  	oldbuckets := h.buckets
   880  	newbuckets := newarray(t.bucket, 1<<(h.B+bigger))
   881  	flags := h.flags &^ (iterator | oldIterator)
   882  	if h.flags&iterator != 0 {
   883  		flags |= oldIterator
   884  	}
   885  	// commit the grow (atomic wrt gc)
   886  	h.B += bigger
   887  	h.flags = flags
   888  	h.oldbuckets = oldbuckets
   889  	h.buckets = newbuckets
   890  	h.nevacuate = 0
   891  	h.noverflow = 0
   892  
   893  	if h.overflow != nil {
   894  		// Promote current overflow buckets to the old generation.
   895  		if h.overflow[1] != nil {
   896  			throw("overflow is not nil")
   897  		}
   898  		h.overflow[1] = h.overflow[0]
   899  		h.overflow[0] = nil
   900  	}
   901  
   902  	// the actual copying of the hash table data is done incrementally
   903  	// by growWork() and evacuate().
   904  }
   905  
   906  // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
   907  func overLoadFactor(count int64, B uint8) bool {
   908  	// TODO: rewrite to use integer math and comparison?
   909  	return count >= bucketCnt && float32(count) >= loadFactor*float32((uintptr(1)<<B))
   910  }
   911  
   912  // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
   913  // Note that most of these overflow buckets must be in sparse use;
   914  // if use was dense, then we'd have already triggered regular map growth.
   915  func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
   916  	// If the threshold is too low, we do extraneous work.
   917  	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
   918  	// "too many" means (approximately) as many overflow buckets as regular buckets.
   919  	// See incrnoverflow for more details.
   920  	if B < 16 {
   921  		return noverflow >= uint16(1)<<B
   922  	}
   923  	return noverflow >= 1<<15
   924  }
   925  
   926  // growing reports whether h is growing. The growth may be to the same size or bigger.
   927  func (h *hmap) growing() bool {
   928  	return h.oldbuckets != nil
   929  }
   930  
   931  // sameSizeGrow reports whether the current growth is to a map of the same size.
   932  func (h *hmap) sameSizeGrow() bool {
   933  	return h.flags&sameSizeGrow != 0
   934  }
   935  
   936  // noldbuckets calculates the number of buckets prior to the current map growth.
   937  func (h *hmap) noldbuckets() uintptr {
   938  	oldB := h.B
   939  	if !h.sameSizeGrow() {
   940  		oldB--
   941  	}
   942  	return uintptr(1) << oldB
   943  }
   944  
   945  // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
   946  func (h *hmap) oldbucketmask() uintptr {
   947  	return h.noldbuckets() - 1
   948  }
   949  
   950  func growWork(t *maptype, h *hmap, bucket uintptr) {
   951  	// make sure we evacuate the oldbucket corresponding
   952  	// to the bucket we're about to use
   953  	evacuate(t, h, bucket&h.oldbucketmask())
   954  
   955  	// evacuate one more oldbucket to make progress on growing
   956  	if h.growing() {
   957  		evacuate(t, h, h.nevacuate)
   958  	}
   959  }
   960  
   961  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
   962  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   963  	newbit := h.noldbuckets()
   964  	alg := t.key.alg
   965  	if !evacuated(b) {
   966  		// TODO: reuse overflow buckets instead of using new ones, if there
   967  		// is no iterator using the old buckets.  (If !oldIterator.)
   968  
   969  		var (
   970  			x, y   *bmap          // current low/high buckets in new map
   971  			xi, yi int            // key/val indices into x and y
   972  			xk, yk unsafe.Pointer // pointers to current x and y key storage
   973  			xv, yv unsafe.Pointer // pointers to current x and y value storage
   974  		)
   975  		x = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   976  		xi = 0
   977  		xk = add(unsafe.Pointer(x), dataOffset)
   978  		xv = add(xk, bucketCnt*uintptr(t.keysize))
   979  		if !h.sameSizeGrow() {
   980  			// Only calculate y pointers if we're growing bigger.
   981  			// Otherwise GC can see bad pointers.
   982  			y = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   983  			yi = 0
   984  			yk = add(unsafe.Pointer(y), dataOffset)
   985  			yv = add(yk, bucketCnt*uintptr(t.keysize))
   986  		}
   987  		for ; b != nil; b = b.overflow(t) {
   988  			k := add(unsafe.Pointer(b), dataOffset)
   989  			v := add(k, bucketCnt*uintptr(t.keysize))
   990  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
   991  				top := b.tophash[i]
   992  				if top == empty {
   993  					b.tophash[i] = evacuatedEmpty
   994  					continue
   995  				}
   996  				if top < minTopHash {
   997  					throw("bad map state")
   998  				}
   999  				k2 := k
  1000  				if t.indirectkey {
  1001  					k2 = *((*unsafe.Pointer)(k2))
  1002  				}
  1003  				useX := true
  1004  				if !h.sameSizeGrow() {
  1005  					// Compute hash to make our evacuation decision (whether we need
  1006  					// to send this key/value to bucket x or bucket y).
  1007  					hash := alg.hash(k2, uintptr(h.hash0))
  1008  					if h.flags&iterator != 0 {
  1009  						if !t.reflexivekey && !alg.equal(k2, k2) {
  1010  							// If key != key (NaNs), then the hash could be (and probably
  1011  							// will be) entirely different from the old hash. Moreover,
  1012  							// it isn't reproducible. Reproducibility is required in the
  1013  							// presence of iterators, as our evacuation decision must
  1014  							// match whatever decision the iterator made.
  1015  							// Fortunately, we have the freedom to send these keys either
  1016  							// way. Also, tophash is meaningless for these kinds of keys.
  1017  							// We let the low bit of tophash drive the evacuation decision.
  1018  							// We recompute a new random tophash for the next level so
  1019  							// these keys will get evenly distributed across all buckets
  1020  							// after multiple grows.
  1021  							if top&1 != 0 {
  1022  								hash |= newbit
  1023  							} else {
  1024  								hash &^= newbit
  1025  							}
  1026  							top = uint8(hash >> (sys.PtrSize*8 - 8))
  1027  							if top < minTopHash {
  1028  								top += minTopHash
  1029  							}
  1030  						}
  1031  					}
  1032  					useX = hash&newbit == 0
  1033  				}
  1034  				if useX {
  1035  					b.tophash[i] = evacuatedX
  1036  					if xi == bucketCnt {
  1037  						newx := (*bmap)(newobject(t.bucket))
  1038  						h.setoverflow(t, x, newx)
  1039  						x = newx
  1040  						xi = 0
  1041  						xk = add(unsafe.Pointer(x), dataOffset)
  1042  						xv = add(xk, bucketCnt*uintptr(t.keysize))
  1043  					}
  1044  					x.tophash[xi] = top
  1045  					if t.indirectkey {
  1046  						*(*unsafe.Pointer)(xk) = k2 // copy pointer
  1047  					} else {
  1048  						typedmemmove(t.key, xk, k) // copy value
  1049  					}
  1050  					if t.indirectvalue {
  1051  						*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
  1052  					} else {
  1053  						typedmemmove(t.elem, xv, v)
  1054  					}
  1055  					xi++
  1056  					xk = add(xk, uintptr(t.keysize))
  1057  					xv = add(xv, uintptr(t.valuesize))
  1058  				} else {
  1059  					b.tophash[i] = evacuatedY
  1060  					if yi == bucketCnt {
  1061  						newy := (*bmap)(newobject(t.bucket))
  1062  						h.setoverflow(t, y, newy)
  1063  						y = newy
  1064  						yi = 0
  1065  						yk = add(unsafe.Pointer(y), dataOffset)
  1066  						yv = add(yk, bucketCnt*uintptr(t.keysize))
  1067  					}
  1068  					y.tophash[yi] = top
  1069  					if t.indirectkey {
  1070  						*(*unsafe.Pointer)(yk) = k2
  1071  					} else {
  1072  						typedmemmove(t.key, yk, k)
  1073  					}
  1074  					if t.indirectvalue {
  1075  						*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
  1076  					} else {
  1077  						typedmemmove(t.elem, yv, v)
  1078  					}
  1079  					yi++
  1080  					yk = add(yk, uintptr(t.keysize))
  1081  					yv = add(yv, uintptr(t.valuesize))
  1082  				}
  1083  			}
  1084  		}
  1085  		// Unlink the overflow buckets & clear key/value to help GC.
  1086  		if h.flags&oldIterator == 0 {
  1087  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
  1088  			// Preserve b.tophash because the evacuation
  1089  			// state is maintained there.
  1090  			if t.bucket.kind&kindNoPointers == 0 {
  1091  				memclrHasPointers(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
  1092  			} else {
  1093  				memclrNoHeapPointers(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
  1094  			}
  1095  		}
  1096  	}
  1097  
  1098  	// Advance evacuation mark
  1099  	if oldbucket == h.nevacuate {
  1100  		h.nevacuate = oldbucket + 1
  1101  		if oldbucket+1 == newbit { // newbit == # of oldbuckets
  1102  			// Growing is all done. Free old main bucket array.
  1103  			h.oldbuckets = nil
  1104  			// Can discard old overflow buckets as well.
  1105  			// If they are still referenced by an iterator,
  1106  			// then the iterator holds a pointers to the slice.
  1107  			if h.overflow != nil {
  1108  				h.overflow[1] = nil
  1109  			}
  1110  			h.flags &^= sameSizeGrow
  1111  		}
  1112  	}
  1113  }
  1114  
  1115  func ismapkey(t *_type) bool {
  1116  	return t.alg.hash != nil
  1117  }
  1118  
  1119  // Reflect stubs. Called from ../reflect/asm_*.s
  1120  
  1121  //go:linkname reflect_makemap reflect.makemap
  1122  func reflect_makemap(t *maptype) *hmap {
  1123  	return makemap(t, 0, nil, nil)
  1124  }
  1125  
  1126  //go:linkname reflect_mapaccess reflect.mapaccess
  1127  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
  1128  	val, ok := mapaccess2(t, h, key)
  1129  	if !ok {
  1130  		// reflect wants nil for a missing element
  1131  		val = nil
  1132  	}
  1133  	return val
  1134  }
  1135  
  1136  //go:linkname reflect_mapassign reflect.mapassign
  1137  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
  1138  	p := mapassign(t, h, key)
  1139  	typedmemmove(t.elem, p, val)
  1140  }
  1141  
  1142  //go:linkname reflect_mapdelete reflect.mapdelete
  1143  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
  1144  	mapdelete(t, h, key)
  1145  }
  1146  
  1147  //go:linkname reflect_mapiterinit reflect.mapiterinit
  1148  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
  1149  	it := new(hiter)
  1150  	mapiterinit(t, h, it)
  1151  	return it
  1152  }
  1153  
  1154  //go:linkname reflect_mapiternext reflect.mapiternext
  1155  func reflect_mapiternext(it *hiter) {
  1156  	mapiternext(it)
  1157  }
  1158  
  1159  //go:linkname reflect_mapiterkey reflect.mapiterkey
  1160  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
  1161  	return it.key
  1162  }
  1163  
  1164  //go:linkname reflect_maplen reflect.maplen
  1165  func reflect_maplen(h *hmap) int {
  1166  	if h == nil {
  1167  		return 0
  1168  	}
  1169  	if raceenabled {
  1170  		callerpc := getcallerpc(unsafe.Pointer(&h))
  1171  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
  1172  	}
  1173  	return h.count
  1174  }
  1175  
  1176  //go:linkname reflect_ismapkey reflect.ismapkey
  1177  func reflect_ismapkey(t *_type) bool {
  1178  	return ismapkey(t)
  1179  }
  1180  
  1181  const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go
  1182  var zeroVal [maxZero]byte