github.com/activestate/go@v0.0.0-20170614201249-0b81c023a722/src/runtime/hashmap.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table. The data is arranged
    10  // into an array of buckets. Each bucket contains up to
    11  // 8 key/value pairs. The low-order bits of the hash are
    12  // used to select a bucket. Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big. Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space. I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and values)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/value pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows. Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"runtime/internal/atomic"
    58  	"runtime/internal/sys"
    59  	"unsafe"
    60  )
    61  
    62  const (
    63  	// Maximum number of key/value pairs a bucket can hold.
    64  	bucketCntBits = 3
    65  	bucketCnt     = 1 << bucketCntBits
    66  
    67  	// Maximum average load of a bucket that triggers growth.
    68  	loadFactor = 6.5
    69  
    70  	// Maximum key or value size to keep inline (instead of mallocing per element).
    71  	// Must fit in a uint8.
    72  	// Fast versions cannot handle big values - the cutoff size for
    73  	// fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
    74  	maxKeySize   = 128
    75  	maxValueSize = 128
    76  
    77  	// data offset should be the size of the bmap struct, but needs to be
    78  	// aligned correctly. For amd64p32 this means 64-bit alignment
    79  	// even though pointers are 32 bit.
    80  	dataOffset = unsafe.Offsetof(struct {
    81  		b bmap
    82  		v int64
    83  	}{}.v)
    84  
    85  	// Possible tophash values. We reserve a few possibilities for special marks.
    86  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    87  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    88  	// during map writes and thus no one else can observe the map during that time).
    89  	empty          = 0 // cell is empty
    90  	evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
    91  	evacuatedX     = 2 // key/value is valid.  Entry has been evacuated to first half of larger table.
    92  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    93  	minTopHash     = 4 // minimum tophash for a normal filled cell.
    94  
    95  	// flags
    96  	iterator     = 1 // there may be an iterator using buckets
    97  	oldIterator  = 2 // there may be an iterator using oldbuckets
    98  	hashWriting  = 4 // a goroutine is writing to the map
    99  	sameSizeGrow = 8 // the current map growth is to a new map of the same size
   100  
   101  	// sentinel bucket ID for iterator checks
   102  	noCheck = 1<<(8*sys.PtrSize) - 1
   103  )
   104  
   105  // A header for a Go map.
   106  type hmap struct {
   107  	// Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
   108  	// ../reflect/type.go. Don't change this structure without also changing that code!
   109  	count     int // # live cells == size of map.  Must be first (used by len() builtin)
   110  	flags     uint8
   111  	B         uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   112  	noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details
   113  	hash0     uint32 // hash seed
   114  
   115  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   116  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   117  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   118  
   119  	extra *mapextra // optional fields
   120  }
   121  
   122  // mapextra holds fields that are not present on all maps.
   123  type mapextra struct {
   124  	// If both key and value do not contain pointers and are inline, then we mark bucket
   125  	// type as containing no pointers. This avoids scanning such maps.
   126  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   127  	// alive, we store pointers to all overflow buckets in hmap.overflow.
   128  	// Overflow is used only if key and value do not contain pointers.
   129  	// overflow[0] contains overflow buckets for hmap.buckets.
   130  	// overflow[1] contains overflow buckets for hmap.oldbuckets.
   131  	// The indirection allows to store a pointer to the slice in hiter.
   132  	overflow [2]*[]*bmap
   133  
   134  	// nextOverflow holds a pointer to a free overflow bucket.
   135  	nextOverflow *bmap
   136  }
   137  
   138  // A bucket for a Go map.
   139  type bmap struct {
   140  	// tophash generally contains the top byte of the hash value
   141  	// for each key in this bucket. If tophash[0] < minTopHash,
   142  	// tophash[0] is a bucket evacuation state instead.
   143  	tophash [bucketCnt]uint8
   144  	// Followed by bucketCnt keys and then bucketCnt values.
   145  	// NOTE: packing all the keys together and then all the values together makes the
   146  	// code a bit more complicated than alternating key/value/key/value/... but it allows
   147  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   148  	// Followed by an overflow pointer.
   149  }
   150  
   151  // A hash iteration structure.
   152  // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
   153  // the layout of this structure.
   154  type hiter struct {
   155  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
   156  	value       unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
   157  	t           *maptype
   158  	h           *hmap
   159  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   160  	bptr        *bmap          // current bucket
   161  	overflow    [2]*[]*bmap    // keeps overflow buckets alive
   162  	startBucket uintptr        // bucket iteration started at
   163  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   164  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   165  	B           uint8
   166  	i           uint8
   167  	bucket      uintptr
   168  	checkBucket uintptr
   169  }
   170  
   171  func evacuated(b *bmap) bool {
   172  	h := b.tophash[0]
   173  	return h > empty && h < minTopHash
   174  }
   175  
   176  func (b *bmap) overflow(t *maptype) *bmap {
   177  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
   178  }
   179  
   180  func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
   181  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
   182  }
   183  
   184  // incrnoverflow increments h.noverflow.
   185  // noverflow counts the number of overflow buckets.
   186  // This is used to trigger same-size map growth.
   187  // See also tooManyOverflowBuckets.
   188  // To keep hmap small, noverflow is a uint16.
   189  // When there are few buckets, noverflow is an exact count.
   190  // When there are many buckets, noverflow is an approximate count.
   191  func (h *hmap) incrnoverflow() {
   192  	// We trigger same-size map growth if there are
   193  	// as many overflow buckets as buckets.
   194  	// We need to be able to count to 1<<h.B.
   195  	if h.B < 16 {
   196  		h.noverflow++
   197  		return
   198  	}
   199  	// Increment with probability 1/(1<<(h.B-15)).
   200  	// When we reach 1<<15 - 1, we will have approximately
   201  	// as many overflow buckets as buckets.
   202  	mask := uint32(1)<<(h.B-15) - 1
   203  	// Example: if h.B == 18, then mask == 7,
   204  	// and fastrand & 7 == 0 with probability 1/8.
   205  	if fastrand()&mask == 0 {
   206  		h.noverflow++
   207  	}
   208  }
   209  
   210  func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap {
   211  	var ovf *bmap
   212  	if h.extra != nil && h.extra.nextOverflow != nil {
   213  		// We have preallocated overflow buckets available.
   214  		// See makeBucketArray for more details.
   215  		ovf = h.extra.nextOverflow
   216  		if ovf.overflow(t) == nil {
   217  			// We're not at the end of the preallocated overflow buckets. Bump the pointer.
   218  			h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize)))
   219  		} else {
   220  			// This is the last preallocated overflow bucket.
   221  			// Reset the overflow pointer on this bucket,
   222  			// which was set to a non-nil sentinel value.
   223  			ovf.setoverflow(t, nil)
   224  			h.extra.nextOverflow = nil
   225  		}
   226  	} else {
   227  		ovf = (*bmap)(newobject(t.bucket))
   228  	}
   229  	h.incrnoverflow()
   230  	if t.bucket.kind&kindNoPointers != 0 {
   231  		h.createOverflow()
   232  		*h.extra.overflow[0] = append(*h.extra.overflow[0], ovf)
   233  	}
   234  	b.setoverflow(t, ovf)
   235  	return ovf
   236  }
   237  
   238  func (h *hmap) createOverflow() {
   239  	if h.extra == nil {
   240  		h.extra = new(mapextra)
   241  	}
   242  	if h.extra.overflow[0] == nil {
   243  		h.extra.overflow[0] = new([]*bmap)
   244  	}
   245  }
   246  
   247  // makemap implements a Go map creation make(map[k]v, hint)
   248  // If the compiler has determined that the map or the first bucket
   249  // can be created on the stack, h and/or bucket may be non-nil.
   250  // If h != nil, the map can be created directly in h.
   251  // If bucket != nil, bucket can be used as the first bucket.
   252  func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
   253  	if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != t.hmap.size {
   254  		println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
   255  		throw("bad hmap size")
   256  	}
   257  
   258  	if hint < 0 || hint > int64(maxSliceCap(t.bucket.size)) {
   259  		hint = 0
   260  	}
   261  
   262  	if !ismapkey(t.key) {
   263  		throw("runtime.makemap: unsupported map key type")
   264  	}
   265  
   266  	// check compiler's and reflect's math
   267  	if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
   268  		t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
   269  		throw("key size wrong")
   270  	}
   271  	if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
   272  		t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
   273  		throw("value size wrong")
   274  	}
   275  
   276  	// invariants we depend on. We should probably check these at compile time
   277  	// somewhere, but for now we'll do it here.
   278  	if t.key.align > bucketCnt {
   279  		throw("key align too big")
   280  	}
   281  	if t.elem.align > bucketCnt {
   282  		throw("value align too big")
   283  	}
   284  	if t.key.size%uintptr(t.key.align) != 0 {
   285  		throw("key size not a multiple of key align")
   286  	}
   287  	if t.elem.size%uintptr(t.elem.align) != 0 {
   288  		throw("value size not a multiple of value align")
   289  	}
   290  	if bucketCnt < 8 {
   291  		throw("bucketsize too small for proper alignment")
   292  	}
   293  	if dataOffset%uintptr(t.key.align) != 0 {
   294  		throw("need padding in bucket (key)")
   295  	}
   296  	if dataOffset%uintptr(t.elem.align) != 0 {
   297  		throw("need padding in bucket (value)")
   298  	}
   299  
   300  	// find size parameter which will hold the requested # of elements
   301  	B := uint8(0)
   302  	for ; overLoadFactor(hint, B); B++ {
   303  	}
   304  
   305  	// allocate initial hash table
   306  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   307  	// If hint is large zeroing this memory could take a while.
   308  	buckets := bucket
   309  	var extra *mapextra
   310  	if B != 0 {
   311  		var nextOverflow *bmap
   312  		buckets, nextOverflow = makeBucketArray(t, B)
   313  		if nextOverflow != nil {
   314  			extra = new(mapextra)
   315  			extra.nextOverflow = nextOverflow
   316  		}
   317  	}
   318  
   319  	// initialize Hmap
   320  	if h == nil {
   321  		h = (*hmap)(newobject(t.hmap))
   322  	}
   323  	h.count = 0
   324  	h.B = B
   325  	h.extra = extra
   326  	h.flags = 0
   327  	h.hash0 = fastrand()
   328  	h.buckets = buckets
   329  	h.oldbuckets = nil
   330  	h.nevacuate = 0
   331  	h.noverflow = 0
   332  
   333  	return h
   334  }
   335  
   336  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   337  // it will return a reference to the zero object for the value type if
   338  // the key is not in the map.
   339  // NOTE: The returned pointer may keep the whole map live, so don't
   340  // hold onto it for very long.
   341  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   342  	if raceenabled && h != nil {
   343  		callerpc := getcallerpc(unsafe.Pointer(&t))
   344  		pc := funcPC(mapaccess1)
   345  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   346  		raceReadObjectPC(t.key, key, callerpc, pc)
   347  	}
   348  	if msanenabled && h != nil {
   349  		msanread(key, t.key.size)
   350  	}
   351  	if h == nil || h.count == 0 {
   352  		return unsafe.Pointer(&zeroVal[0])
   353  	}
   354  	if h.flags&hashWriting != 0 {
   355  		throw("concurrent map read and map write")
   356  	}
   357  	alg := t.key.alg
   358  	hash := alg.hash(key, uintptr(h.hash0))
   359  	m := uintptr(1)<<h.B - 1
   360  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   361  	if c := h.oldbuckets; c != nil {
   362  		if !h.sameSizeGrow() {
   363  			// There used to be half as many buckets; mask down one more power of two.
   364  			m >>= 1
   365  		}
   366  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   367  		if !evacuated(oldb) {
   368  			b = oldb
   369  		}
   370  	}
   371  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   372  	if top < minTopHash {
   373  		top += minTopHash
   374  	}
   375  	for {
   376  		for i := uintptr(0); i < bucketCnt; i++ {
   377  			if b.tophash[i] != top {
   378  				continue
   379  			}
   380  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   381  			if t.indirectkey {
   382  				k = *((*unsafe.Pointer)(k))
   383  			}
   384  			if alg.equal(key, k) {
   385  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   386  				if t.indirectvalue {
   387  					v = *((*unsafe.Pointer)(v))
   388  				}
   389  				return v
   390  			}
   391  		}
   392  		b = b.overflow(t)
   393  		if b == nil {
   394  			return unsafe.Pointer(&zeroVal[0])
   395  		}
   396  	}
   397  }
   398  
   399  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   400  	if raceenabled && h != nil {
   401  		callerpc := getcallerpc(unsafe.Pointer(&t))
   402  		pc := funcPC(mapaccess2)
   403  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   404  		raceReadObjectPC(t.key, key, callerpc, pc)
   405  	}
   406  	if msanenabled && h != nil {
   407  		msanread(key, t.key.size)
   408  	}
   409  	if h == nil || h.count == 0 {
   410  		return unsafe.Pointer(&zeroVal[0]), false
   411  	}
   412  	if h.flags&hashWriting != 0 {
   413  		throw("concurrent map read and map write")
   414  	}
   415  	alg := t.key.alg
   416  	hash := alg.hash(key, uintptr(h.hash0))
   417  	m := uintptr(1)<<h.B - 1
   418  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   419  	if c := h.oldbuckets; c != nil {
   420  		if !h.sameSizeGrow() {
   421  			// There used to be half as many buckets; mask down one more power of two.
   422  			m >>= 1
   423  		}
   424  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
   425  		if !evacuated(oldb) {
   426  			b = oldb
   427  		}
   428  	}
   429  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   430  	if top < minTopHash {
   431  		top += minTopHash
   432  	}
   433  	for {
   434  		for i := uintptr(0); i < bucketCnt; i++ {
   435  			if b.tophash[i] != top {
   436  				continue
   437  			}
   438  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   439  			if t.indirectkey {
   440  				k = *((*unsafe.Pointer)(k))
   441  			}
   442  			if alg.equal(key, k) {
   443  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   444  				if t.indirectvalue {
   445  					v = *((*unsafe.Pointer)(v))
   446  				}
   447  				return v, true
   448  			}
   449  		}
   450  		b = b.overflow(t)
   451  		if b == nil {
   452  			return unsafe.Pointer(&zeroVal[0]), false
   453  		}
   454  	}
   455  }
   456  
   457  // returns both key and value. Used by map iterator
   458  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   459  	if h == nil || h.count == 0 {
   460  		return nil, nil
   461  	}
   462  	alg := t.key.alg
   463  	hash := alg.hash(key, uintptr(h.hash0))
   464  	m := uintptr(1)<<h.B - 1
   465  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   466  	if c := h.oldbuckets; c != nil {
   467  		if !h.sameSizeGrow() {
   468  			// There used to be half as many buckets; mask down one more power of two.
   469  			m >>= 1
   470  		}
   471  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize)))
   472  		if !evacuated(oldb) {
   473  			b = oldb
   474  		}
   475  	}
   476  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   477  	if top < minTopHash {
   478  		top += minTopHash
   479  	}
   480  	for {
   481  		for i := uintptr(0); i < bucketCnt; i++ {
   482  			if b.tophash[i] != top {
   483  				continue
   484  			}
   485  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   486  			if t.indirectkey {
   487  				k = *((*unsafe.Pointer)(k))
   488  			}
   489  			if alg.equal(key, k) {
   490  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   491  				if t.indirectvalue {
   492  					v = *((*unsafe.Pointer)(v))
   493  				}
   494  				return k, v
   495  			}
   496  		}
   497  		b = b.overflow(t)
   498  		if b == nil {
   499  			return nil, nil
   500  		}
   501  	}
   502  }
   503  
   504  func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
   505  	v := mapaccess1(t, h, key)
   506  	if v == unsafe.Pointer(&zeroVal[0]) {
   507  		return zero
   508  	}
   509  	return v
   510  }
   511  
   512  func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
   513  	v := mapaccess1(t, h, key)
   514  	if v == unsafe.Pointer(&zeroVal[0]) {
   515  		return zero, false
   516  	}
   517  	return v, true
   518  }
   519  
   520  // Like mapaccess, but allocates a slot for the key if it is not present in the map.
   521  func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   522  	if h == nil {
   523  		panic(plainError("assignment to entry in nil map"))
   524  	}
   525  	if raceenabled {
   526  		callerpc := getcallerpc(unsafe.Pointer(&t))
   527  		pc := funcPC(mapassign)
   528  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   529  		raceReadObjectPC(t.key, key, callerpc, pc)
   530  	}
   531  	if msanenabled {
   532  		msanread(key, t.key.size)
   533  	}
   534  	if h.flags&hashWriting != 0 {
   535  		throw("concurrent map writes")
   536  	}
   537  	alg := t.key.alg
   538  	hash := alg.hash(key, uintptr(h.hash0))
   539  
   540  	// Set hashWriting after calling alg.hash, since alg.hash may panic,
   541  	// in which case we have not actually done a write.
   542  	h.flags |= hashWriting
   543  
   544  	if h.buckets == nil {
   545  		h.buckets = newarray(t.bucket, 1)
   546  	}
   547  
   548  again:
   549  	bucket := hash & (uintptr(1)<<h.B - 1)
   550  	if h.growing() {
   551  		growWork(t, h, bucket)
   552  	}
   553  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   554  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   555  	if top < minTopHash {
   556  		top += minTopHash
   557  	}
   558  
   559  	var inserti *uint8
   560  	var insertk unsafe.Pointer
   561  	var val unsafe.Pointer
   562  	for {
   563  		for i := uintptr(0); i < bucketCnt; i++ {
   564  			if b.tophash[i] != top {
   565  				if b.tophash[i] == empty && inserti == nil {
   566  					inserti = &b.tophash[i]
   567  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   568  					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   569  				}
   570  				continue
   571  			}
   572  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   573  			if t.indirectkey {
   574  				k = *((*unsafe.Pointer)(k))
   575  			}
   576  			if !alg.equal(key, k) {
   577  				continue
   578  			}
   579  			// already have a mapping for key. Update it.
   580  			if t.needkeyupdate {
   581  				typedmemmove(t.key, k, key)
   582  			}
   583  			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   584  			goto done
   585  		}
   586  		ovf := b.overflow(t)
   587  		if ovf == nil {
   588  			break
   589  		}
   590  		b = ovf
   591  	}
   592  
   593  	// Did not find mapping for key. Allocate new cell & add entry.
   594  
   595  	// If we hit the max load factor or we have too many overflow buckets,
   596  	// and we're not already in the middle of growing, start growing.
   597  	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   598  		hashGrow(t, h)
   599  		goto again // Growing the table invalidates everything, so try again
   600  	}
   601  
   602  	if inserti == nil {
   603  		// all current buckets are full, allocate a new one.
   604  		newb := h.newoverflow(t, b)
   605  		inserti = &newb.tophash[0]
   606  		insertk = add(unsafe.Pointer(newb), dataOffset)
   607  		val = add(insertk, bucketCnt*uintptr(t.keysize))
   608  	}
   609  
   610  	// store new key/value at insert position
   611  	if t.indirectkey {
   612  		kmem := newobject(t.key)
   613  		*(*unsafe.Pointer)(insertk) = kmem
   614  		insertk = kmem
   615  	}
   616  	if t.indirectvalue {
   617  		vmem := newobject(t.elem)
   618  		*(*unsafe.Pointer)(val) = vmem
   619  	}
   620  	typedmemmove(t.key, insertk, key)
   621  	*inserti = top
   622  	h.count++
   623  
   624  done:
   625  	if h.flags&hashWriting == 0 {
   626  		throw("concurrent map writes")
   627  	}
   628  	h.flags &^= hashWriting
   629  	if t.indirectvalue {
   630  		val = *((*unsafe.Pointer)(val))
   631  	}
   632  	return val
   633  }
   634  
   635  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   636  	if raceenabled && h != nil {
   637  		callerpc := getcallerpc(unsafe.Pointer(&t))
   638  		pc := funcPC(mapdelete)
   639  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   640  		raceReadObjectPC(t.key, key, callerpc, pc)
   641  	}
   642  	if msanenabled && h != nil {
   643  		msanread(key, t.key.size)
   644  	}
   645  	if h == nil || h.count == 0 {
   646  		return
   647  	}
   648  	if h.flags&hashWriting != 0 {
   649  		throw("concurrent map writes")
   650  	}
   651  
   652  	alg := t.key.alg
   653  	hash := alg.hash(key, uintptr(h.hash0))
   654  
   655  	// Set hashWriting after calling alg.hash, since alg.hash may panic,
   656  	// in which case we have not actually done a write (delete).
   657  	h.flags |= hashWriting
   658  
   659  	bucket := hash & (uintptr(1)<<h.B - 1)
   660  	if h.growing() {
   661  		growWork(t, h, bucket)
   662  	}
   663  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   664  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   665  	if top < minTopHash {
   666  		top += minTopHash
   667  	}
   668  	for {
   669  		for i := uintptr(0); i < bucketCnt; i++ {
   670  			if b.tophash[i] != top {
   671  				continue
   672  			}
   673  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   674  			k2 := k
   675  			if t.indirectkey {
   676  				k2 = *((*unsafe.Pointer)(k2))
   677  			}
   678  			if !alg.equal(key, k2) {
   679  				continue
   680  			}
   681  			if t.indirectkey {
   682  				*(*unsafe.Pointer)(k) = nil
   683  			} else {
   684  				typedmemclr(t.key, k)
   685  			}
   686  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
   687  			if t.indirectvalue {
   688  				*(*unsafe.Pointer)(v) = nil
   689  			} else {
   690  				typedmemclr(t.elem, v)
   691  			}
   692  			b.tophash[i] = empty
   693  			h.count--
   694  			goto done
   695  		}
   696  		b = b.overflow(t)
   697  		if b == nil {
   698  			goto done
   699  		}
   700  	}
   701  
   702  done:
   703  	if h.flags&hashWriting == 0 {
   704  		throw("concurrent map writes")
   705  	}
   706  	h.flags &^= hashWriting
   707  }
   708  
   709  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   710  	// Clear pointer fields so garbage collector does not complain.
   711  	it.key = nil
   712  	it.value = nil
   713  	it.t = nil
   714  	it.h = nil
   715  	it.buckets = nil
   716  	it.bptr = nil
   717  	it.overflow[0] = nil
   718  	it.overflow[1] = nil
   719  
   720  	if raceenabled && h != nil {
   721  		callerpc := getcallerpc(unsafe.Pointer(&t))
   722  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
   723  	}
   724  
   725  	if h == nil || h.count == 0 {
   726  		it.key = nil
   727  		it.value = nil
   728  		return
   729  	}
   730  
   731  	if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
   732  		throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
   733  	}
   734  	it.t = t
   735  	it.h = h
   736  
   737  	// grab snapshot of bucket state
   738  	it.B = h.B
   739  	it.buckets = h.buckets
   740  	if t.bucket.kind&kindNoPointers != 0 {
   741  		// Allocate the current slice and remember pointers to both current and old.
   742  		// This preserves all relevant overflow buckets alive even if
   743  		// the table grows and/or overflow buckets are added to the table
   744  		// while we are iterating.
   745  		h.createOverflow()
   746  		it.overflow = h.extra.overflow
   747  	}
   748  
   749  	// decide where to start
   750  	r := uintptr(fastrand())
   751  	if h.B > 31-bucketCntBits {
   752  		r += uintptr(fastrand()) << 31
   753  	}
   754  	it.startBucket = r & (uintptr(1)<<h.B - 1)
   755  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
   756  
   757  	// iterator state
   758  	it.bucket = it.startBucket
   759  	it.wrapped = false
   760  	it.bptr = nil
   761  
   762  	// Remember we have an iterator.
   763  	// Can run concurrently with another hash_iter_init().
   764  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   765  		atomic.Or8(&h.flags, iterator|oldIterator)
   766  	}
   767  
   768  	mapiternext(it)
   769  }
   770  
   771  func mapiternext(it *hiter) {
   772  	h := it.h
   773  	if raceenabled {
   774  		callerpc := getcallerpc(unsafe.Pointer(&it))
   775  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
   776  	}
   777  	if h.flags&hashWriting != 0 {
   778  		throw("concurrent map iteration and map write")
   779  	}
   780  	t := it.t
   781  	bucket := it.bucket
   782  	b := it.bptr
   783  	i := it.i
   784  	checkBucket := it.checkBucket
   785  	alg := t.key.alg
   786  
   787  next:
   788  	if b == nil {
   789  		if bucket == it.startBucket && it.wrapped {
   790  			// end of iteration
   791  			it.key = nil
   792  			it.value = nil
   793  			return
   794  		}
   795  		if h.growing() && it.B == h.B {
   796  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   797  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   798  			// bucket hasn't been evacuated) then we need to iterate through the old
   799  			// bucket and only return the ones that will be migrated to this bucket.
   800  			oldbucket := bucket & it.h.oldbucketmask()
   801  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   802  			if !evacuated(b) {
   803  				checkBucket = bucket
   804  			} else {
   805  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   806  				checkBucket = noCheck
   807  			}
   808  		} else {
   809  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   810  			checkBucket = noCheck
   811  		}
   812  		bucket++
   813  		if bucket == uintptr(1)<<it.B {
   814  			bucket = 0
   815  			it.wrapped = true
   816  		}
   817  		i = 0
   818  	}
   819  	for ; i < bucketCnt; i++ {
   820  		offi := (i + it.offset) & (bucketCnt - 1)
   821  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
   822  		v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
   823  		if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
   824  			if checkBucket != noCheck && !h.sameSizeGrow() {
   825  				// Special case: iterator was started during a grow to a larger size
   826  				// and the grow is not done yet. We're working on a bucket whose
   827  				// oldbucket has not been evacuated yet. Or at least, it wasn't
   828  				// evacuated when we started the bucket. So we're iterating
   829  				// through the oldbucket, skipping any keys that will go
   830  				// to the other new bucket (each oldbucket expands to two
   831  				// buckets during a grow).
   832  				k2 := k
   833  				if t.indirectkey {
   834  					k2 = *((*unsafe.Pointer)(k2))
   835  				}
   836  				if t.reflexivekey || alg.equal(k2, k2) {
   837  					// If the item in the oldbucket is not destined for
   838  					// the current new bucket in the iteration, skip it.
   839  					hash := alg.hash(k2, uintptr(h.hash0))
   840  					if hash&(uintptr(1)<<it.B-1) != checkBucket {
   841  						continue
   842  					}
   843  				} else {
   844  					// Hash isn't repeatable if k != k (NaNs).  We need a
   845  					// repeatable and randomish choice of which direction
   846  					// to send NaNs during evacuation. We'll use the low
   847  					// bit of tophash to decide which way NaNs go.
   848  					// NOTE: this case is why we need two evacuate tophash
   849  					// values, evacuatedX and evacuatedY, that differ in
   850  					// their low bit.
   851  					if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   852  						continue
   853  					}
   854  				}
   855  			}
   856  			if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
   857  				// this is the golden data, we can return it.
   858  				if t.indirectkey {
   859  					k = *((*unsafe.Pointer)(k))
   860  				}
   861  				it.key = k
   862  				if t.indirectvalue {
   863  					v = *((*unsafe.Pointer)(v))
   864  				}
   865  				it.value = v
   866  			} else {
   867  				// The hash table has grown since the iterator was started.
   868  				// The golden data for this key is now somewhere else.
   869  				k2 := k
   870  				if t.indirectkey {
   871  					k2 = *((*unsafe.Pointer)(k2))
   872  				}
   873  				if t.reflexivekey || alg.equal(k2, k2) {
   874  					// Check the current hash table for the data.
   875  					// This code handles the case where the key
   876  					// has been deleted, updated, or deleted and reinserted.
   877  					// NOTE: we need to regrab the key as it has potentially been
   878  					// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   879  					rk, rv := mapaccessK(t, h, k2)
   880  					if rk == nil {
   881  						continue // key has been deleted
   882  					}
   883  					it.key = rk
   884  					it.value = rv
   885  				} else {
   886  					// if key!=key then the entry can't be deleted or
   887  					// updated, so we can just return it. That's lucky for
   888  					// us because when key!=key we can't look it up
   889  					// successfully in the current table.
   890  					it.key = k2
   891  					if t.indirectvalue {
   892  						v = *((*unsafe.Pointer)(v))
   893  					}
   894  					it.value = v
   895  				}
   896  			}
   897  			it.bucket = bucket
   898  			if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
   899  				it.bptr = b
   900  			}
   901  			it.i = i + 1
   902  			it.checkBucket = checkBucket
   903  			return
   904  		}
   905  	}
   906  	b = b.overflow(t)
   907  	i = 0
   908  	goto next
   909  }
   910  
   911  func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow *bmap) {
   912  	base := uintptr(1 << b)
   913  	nbuckets := base
   914  	// For small b, overflow buckets are unlikely.
   915  	// Avoid the overhead of the calculation.
   916  	if b >= 4 {
   917  		// Add on the estimated number of overflow buckets
   918  		// required to insert the median number of elements
   919  		// used with this value of b.
   920  		nbuckets += 1 << (b - 4)
   921  		sz := t.bucket.size * nbuckets
   922  		up := roundupsize(sz)
   923  		if up != sz {
   924  			nbuckets = up / t.bucket.size
   925  		}
   926  	}
   927  	buckets = newarray(t.bucket, int(nbuckets))
   928  	if base != nbuckets {
   929  		// We preallocated some overflow buckets.
   930  		// To keep the overhead of tracking these overflow buckets to a minimum,
   931  		// we use the convention that if a preallocated overflow bucket's overflow
   932  		// pointer is nil, then there are more available by bumping the pointer.
   933  		// We need a safe non-nil pointer for the last overflow bucket; just use buckets.
   934  		nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize)))
   935  		last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize)))
   936  		last.setoverflow(t, (*bmap)(buckets))
   937  	}
   938  	return buckets, nextOverflow
   939  }
   940  
   941  func hashGrow(t *maptype, h *hmap) {
   942  	// If we've hit the load factor, get bigger.
   943  	// Otherwise, there are too many overflow buckets,
   944  	// so keep the same number of buckets and "grow" laterally.
   945  	bigger := uint8(1)
   946  	if !overLoadFactor(int64(h.count), h.B) {
   947  		bigger = 0
   948  		h.flags |= sameSizeGrow
   949  	}
   950  	oldbuckets := h.buckets
   951  	newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger)
   952  
   953  	flags := h.flags &^ (iterator | oldIterator)
   954  	if h.flags&iterator != 0 {
   955  		flags |= oldIterator
   956  	}
   957  	// commit the grow (atomic wrt gc)
   958  	h.B += bigger
   959  	h.flags = flags
   960  	h.oldbuckets = oldbuckets
   961  	h.buckets = newbuckets
   962  	h.nevacuate = 0
   963  	h.noverflow = 0
   964  
   965  	if h.extra != nil && h.extra.overflow[0] != nil {
   966  		// Promote current overflow buckets to the old generation.
   967  		if h.extra.overflow[1] != nil {
   968  			throw("overflow is not nil")
   969  		}
   970  		h.extra.overflow[1] = h.extra.overflow[0]
   971  		h.extra.overflow[0] = nil
   972  	}
   973  	if nextOverflow != nil {
   974  		if h.extra == nil {
   975  			h.extra = new(mapextra)
   976  		}
   977  		h.extra.nextOverflow = nextOverflow
   978  	}
   979  
   980  	// the actual copying of the hash table data is done incrementally
   981  	// by growWork() and evacuate().
   982  }
   983  
   984  // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor.
   985  func overLoadFactor(count int64, B uint8) bool {
   986  	// TODO: rewrite to use integer math and comparison?
   987  	return count >= bucketCnt && float32(count) >= loadFactor*float32((uint64(1)<<B))
   988  }
   989  
   990  // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets.
   991  // Note that most of these overflow buckets must be in sparse use;
   992  // if use was dense, then we'd have already triggered regular map growth.
   993  func tooManyOverflowBuckets(noverflow uint16, B uint8) bool {
   994  	// If the threshold is too low, we do extraneous work.
   995  	// If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory.
   996  	// "too many" means (approximately) as many overflow buckets as regular buckets.
   997  	// See incrnoverflow for more details.
   998  	if B < 16 {
   999  		return noverflow >= uint16(1)<<B
  1000  	}
  1001  	return noverflow >= 1<<15
  1002  }
  1003  
  1004  // growing reports whether h is growing. The growth may be to the same size or bigger.
  1005  func (h *hmap) growing() bool {
  1006  	return h.oldbuckets != nil
  1007  }
  1008  
  1009  // sameSizeGrow reports whether the current growth is to a map of the same size.
  1010  func (h *hmap) sameSizeGrow() bool {
  1011  	return h.flags&sameSizeGrow != 0
  1012  }
  1013  
  1014  // noldbuckets calculates the number of buckets prior to the current map growth.
  1015  func (h *hmap) noldbuckets() uintptr {
  1016  	oldB := h.B
  1017  	if !h.sameSizeGrow() {
  1018  		oldB--
  1019  	}
  1020  	return uintptr(1) << oldB
  1021  }
  1022  
  1023  // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets().
  1024  func (h *hmap) oldbucketmask() uintptr {
  1025  	return h.noldbuckets() - 1
  1026  }
  1027  
  1028  func growWork(t *maptype, h *hmap, bucket uintptr) {
  1029  	// make sure we evacuate the oldbucket corresponding
  1030  	// to the bucket we're about to use
  1031  	evacuate(t, h, bucket&h.oldbucketmask())
  1032  
  1033  	// evacuate one more oldbucket to make progress on growing
  1034  	if h.growing() {
  1035  		evacuate(t, h, h.nevacuate)
  1036  	}
  1037  }
  1038  
  1039  func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool {
  1040  	b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize)))
  1041  	return evacuated(b)
  1042  }
  1043  
  1044  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
  1045  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
  1046  	newbit := h.noldbuckets()
  1047  	alg := t.key.alg
  1048  	if !evacuated(b) {
  1049  		// TODO: reuse overflow buckets instead of using new ones, if there
  1050  		// is no iterator using the old buckets.  (If !oldIterator.)
  1051  
  1052  		var (
  1053  			x, y   *bmap          // current low/high buckets in new map
  1054  			xi, yi int            // key/val indices into x and y
  1055  			xk, yk unsafe.Pointer // pointers to current x and y key storage
  1056  			xv, yv unsafe.Pointer // pointers to current x and y value storage
  1057  		)
  1058  		x = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
  1059  		xi = 0
  1060  		xk = add(unsafe.Pointer(x), dataOffset)
  1061  		xv = add(xk, bucketCnt*uintptr(t.keysize))
  1062  		if !h.sameSizeGrow() {
  1063  			// Only calculate y pointers if we're growing bigger.
  1064  			// Otherwise GC can see bad pointers.
  1065  			y = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
  1066  			yi = 0
  1067  			yk = add(unsafe.Pointer(y), dataOffset)
  1068  			yv = add(yk, bucketCnt*uintptr(t.keysize))
  1069  		}
  1070  		for ; b != nil; b = b.overflow(t) {
  1071  			k := add(unsafe.Pointer(b), dataOffset)
  1072  			v := add(k, bucketCnt*uintptr(t.keysize))
  1073  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
  1074  				top := b.tophash[i]
  1075  				if top == empty {
  1076  					b.tophash[i] = evacuatedEmpty
  1077  					continue
  1078  				}
  1079  				if top < minTopHash {
  1080  					throw("bad map state")
  1081  				}
  1082  				k2 := k
  1083  				if t.indirectkey {
  1084  					k2 = *((*unsafe.Pointer)(k2))
  1085  				}
  1086  				useX := true
  1087  				if !h.sameSizeGrow() {
  1088  					// Compute hash to make our evacuation decision (whether we need
  1089  					// to send this key/value to bucket x or bucket y).
  1090  					hash := alg.hash(k2, uintptr(h.hash0))
  1091  					if h.flags&iterator != 0 {
  1092  						if !t.reflexivekey && !alg.equal(k2, k2) {
  1093  							// If key != key (NaNs), then the hash could be (and probably
  1094  							// will be) entirely different from the old hash. Moreover,
  1095  							// it isn't reproducible. Reproducibility is required in the
  1096  							// presence of iterators, as our evacuation decision must
  1097  							// match whatever decision the iterator made.
  1098  							// Fortunately, we have the freedom to send these keys either
  1099  							// way. Also, tophash is meaningless for these kinds of keys.
  1100  							// We let the low bit of tophash drive the evacuation decision.
  1101  							// We recompute a new random tophash for the next level so
  1102  							// these keys will get evenly distributed across all buckets
  1103  							// after multiple grows.
  1104  							if top&1 != 0 {
  1105  								hash |= newbit
  1106  							} else {
  1107  								hash &^= newbit
  1108  							}
  1109  							top = uint8(hash >> (sys.PtrSize*8 - 8))
  1110  							if top < minTopHash {
  1111  								top += minTopHash
  1112  							}
  1113  						}
  1114  					}
  1115  					useX = hash&newbit == 0
  1116  				}
  1117  				if useX {
  1118  					b.tophash[i] = evacuatedX
  1119  					if xi == bucketCnt {
  1120  						newx := h.newoverflow(t, x)
  1121  						x = newx
  1122  						xi = 0
  1123  						xk = add(unsafe.Pointer(x), dataOffset)
  1124  						xv = add(xk, bucketCnt*uintptr(t.keysize))
  1125  					}
  1126  					x.tophash[xi] = top
  1127  					if t.indirectkey {
  1128  						*(*unsafe.Pointer)(xk) = k2 // copy pointer
  1129  					} else {
  1130  						typedmemmove(t.key, xk, k) // copy value
  1131  					}
  1132  					if t.indirectvalue {
  1133  						*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
  1134  					} else {
  1135  						typedmemmove(t.elem, xv, v)
  1136  					}
  1137  					xi++
  1138  					xk = add(xk, uintptr(t.keysize))
  1139  					xv = add(xv, uintptr(t.valuesize))
  1140  				} else {
  1141  					b.tophash[i] = evacuatedY
  1142  					if yi == bucketCnt {
  1143  						newy := h.newoverflow(t, y)
  1144  						y = newy
  1145  						yi = 0
  1146  						yk = add(unsafe.Pointer(y), dataOffset)
  1147  						yv = add(yk, bucketCnt*uintptr(t.keysize))
  1148  					}
  1149  					y.tophash[yi] = top
  1150  					if t.indirectkey {
  1151  						*(*unsafe.Pointer)(yk) = k2
  1152  					} else {
  1153  						typedmemmove(t.key, yk, k)
  1154  					}
  1155  					if t.indirectvalue {
  1156  						*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
  1157  					} else {
  1158  						typedmemmove(t.elem, yv, v)
  1159  					}
  1160  					yi++
  1161  					yk = add(yk, uintptr(t.keysize))
  1162  					yv = add(yv, uintptr(t.valuesize))
  1163  				}
  1164  			}
  1165  		}
  1166  		// Unlink the overflow buckets & clear key/value to help GC.
  1167  		if h.flags&oldIterator == 0 {
  1168  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
  1169  			// Preserve b.tophash because the evacuation
  1170  			// state is maintained there.
  1171  			if t.bucket.kind&kindNoPointers == 0 {
  1172  				memclrHasPointers(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
  1173  			} else {
  1174  				memclrNoHeapPointers(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
  1175  			}
  1176  		}
  1177  	}
  1178  
  1179  	// Advance evacuation mark
  1180  	if oldbucket == h.nevacuate {
  1181  		h.nevacuate = oldbucket + 1
  1182  		// Experiments suggest that 1024 is overkill by at least an order of magnitude.
  1183  		// Put it in there as a safeguard anyway, to ensure O(1) behavior.
  1184  		stop := h.nevacuate + 1024
  1185  		if stop > newbit {
  1186  			stop = newbit
  1187  		}
  1188  		for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) {
  1189  			h.nevacuate++
  1190  		}
  1191  		if h.nevacuate == newbit { // newbit == # of oldbuckets
  1192  			// Growing is all done. Free old main bucket array.
  1193  			h.oldbuckets = nil
  1194  			// Can discard old overflow buckets as well.
  1195  			// If they are still referenced by an iterator,
  1196  			// then the iterator holds a pointers to the slice.
  1197  			if h.extra != nil {
  1198  				h.extra.overflow[1] = nil
  1199  			}
  1200  			h.flags &^= sameSizeGrow
  1201  		}
  1202  	}
  1203  }
  1204  
  1205  func ismapkey(t *_type) bool {
  1206  	return t.alg.hash != nil
  1207  }
  1208  
  1209  // Reflect stubs. Called from ../reflect/asm_*.s
  1210  
  1211  //go:linkname reflect_makemap reflect.makemap
  1212  func reflect_makemap(t *maptype, cap int) *hmap {
  1213  	return makemap(t, int64(cap), nil, nil)
  1214  }
  1215  
  1216  //go:linkname reflect_mapaccess reflect.mapaccess
  1217  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
  1218  	val, ok := mapaccess2(t, h, key)
  1219  	if !ok {
  1220  		// reflect wants nil for a missing element
  1221  		val = nil
  1222  	}
  1223  	return val
  1224  }
  1225  
  1226  //go:linkname reflect_mapassign reflect.mapassign
  1227  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
  1228  	p := mapassign(t, h, key)
  1229  	typedmemmove(t.elem, p, val)
  1230  }
  1231  
  1232  //go:linkname reflect_mapdelete reflect.mapdelete
  1233  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
  1234  	mapdelete(t, h, key)
  1235  }
  1236  
  1237  //go:linkname reflect_mapiterinit reflect.mapiterinit
  1238  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
  1239  	it := new(hiter)
  1240  	mapiterinit(t, h, it)
  1241  	return it
  1242  }
  1243  
  1244  //go:linkname reflect_mapiternext reflect.mapiternext
  1245  func reflect_mapiternext(it *hiter) {
  1246  	mapiternext(it)
  1247  }
  1248  
  1249  //go:linkname reflect_mapiterkey reflect.mapiterkey
  1250  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
  1251  	return it.key
  1252  }
  1253  
  1254  //go:linkname reflect_maplen reflect.maplen
  1255  func reflect_maplen(h *hmap) int {
  1256  	if h == nil {
  1257  		return 0
  1258  	}
  1259  	if raceenabled {
  1260  		callerpc := getcallerpc(unsafe.Pointer(&h))
  1261  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
  1262  	}
  1263  	return h.count
  1264  }
  1265  
  1266  //go:linkname reflect_ismapkey reflect.ismapkey
  1267  func reflect_ismapkey(t *_type) bool {
  1268  	return ismapkey(t)
  1269  }
  1270  
  1271  const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go
  1272  var zeroVal [maxZero]byte