github.com/c0deoo1/golang1.5@v0.0.0-20220525150107-c87c805d4593/src/runtime/hashmap.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table.  The data is arranged
    10  // into an array of buckets.  Each bucket contains up to
    11  // 8 key/value pairs.  The low-order bits of the hash are
    12  // used to select a bucket.  Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big.  Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space.  I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and values)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/value pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows.  Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"unsafe"
    58  )
    59  
    60  const (
    61  	// Maximum number of key/value pairs a bucket can hold.
    62  	bucketCntBits = 3
    63  	bucketCnt     = 1 << bucketCntBits
    64  
    65  	// Maximum average load of a bucket that triggers growth.
    66  	loadFactor = 6.5
    67  
    68  	// Maximum key or value size to keep inline (instead of mallocing per element).
    69  	// Must fit in a uint8.
    70  	// Fast versions cannot handle big values - the cutoff size for
    71  	// fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
    72  	maxKeySize   = 128
    73  	maxValueSize = 128
    74  
    75  	// data offset should be the size of the bmap struct, but needs to be
    76  	// aligned correctly.  For amd64p32 this means 64-bit alignment
    77  	// even though pointers are 32 bit.
    78  	dataOffset = unsafe.Offsetof(struct {
    79  		b bmap
    80  		v int64
    81  	}{}.v)
    82  
    83  	// Possible tophash values.  We reserve a few possibilities for special marks.
    84  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    85  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    86  	// during map writes and thus no one else can observe the map during that time).
    87  	empty          = 0 // cell is empty
    88  	evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
    89  	evacuatedX     = 2 // key/value is valid.  Entry has been evacuated to first half of larger table.
    90  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    91  	minTopHash     = 4 // minimum tophash for a normal filled cell.
    92  
    93  	// flags
    94  	iterator    = 1 // there may be an iterator using buckets
    95  	oldIterator = 2 // there may be an iterator using oldbuckets
    96  
    97  	// sentinel bucket ID for iterator checks
    98  	noCheck = 1<<(8*ptrSize) - 1
    99  )
   100  
   101  // A header for a Go map.
   102  type hmap struct {
   103  	// Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
   104  	// ../reflect/type.go.  Don't change this structure without also changing that code!
   105  	count int // # live cells == size of map.  Must be first (used by len() builtin)
   106  	flags uint8
   107  	B     uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   108  	hash0 uint32 // hash seed
   109  
   110  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   111  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   112  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   113  
   114  	// If both key and value do not contain pointers and are inline, then we mark bucket
   115  	// type as containing no pointers. This avoids scanning such maps.
   116  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   117  	// alive, we store pointers to all overflow buckets in hmap.overflow.
   118  	// Overflow is used only if key and value do not contain pointers.
   119  	// overflow[0] contains overflow buckets for hmap.buckets.
   120  	// overflow[1] contains overflow buckets for hmap.oldbuckets.
   121  	// The first indirection allows us to reduce static size of hmap.
   122  	// The second indirection allows to store a pointer to the slice in hiter.
   123  	overflow *[2]*[]*bmap
   124  }
   125  
   126  // A bucket for a Go map.
   127  type bmap struct {
   128  	tophash [bucketCnt]uint8
   129  	// Followed by bucketCnt keys and then bucketCnt values.
   130  	// NOTE: packing all the keys together and then all the values together makes the
   131  	// code a bit more complicated than alternating key/value/key/value/... but it allows
   132  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   133  	// Followed by an overflow pointer.
   134  }
   135  
   136  // A hash iteration structure.
   137  // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
   138  // the layout of this structure.
   139  type hiter struct {
   140  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
   141  	value       unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
   142  	t           *maptype
   143  	h           *hmap
   144  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   145  	bptr        *bmap          // current bucket
   146  	overflow    [2]*[]*bmap    // keeps overflow buckets alive
   147  	startBucket uintptr        // bucket iteration started at
   148  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   149  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   150  	B           uint8
   151  	i           uint8
   152  	bucket      uintptr
   153  	checkBucket uintptr
   154  }
   155  
   156  func evacuated(b *bmap) bool {
   157  	h := b.tophash[0]
   158  	return h > empty && h < minTopHash
   159  }
   160  
   161  func (b *bmap) overflow(t *maptype) *bmap {
   162  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-ptrSize))
   163  }
   164  
   165  func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
   166  	if t.bucket.kind&kindNoPointers != 0 {
   167  		h.createOverflow()
   168  		*h.overflow[0] = append(*h.overflow[0], ovf)
   169  	}
   170  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-ptrSize)) = ovf
   171  }
   172  
   173  func (h *hmap) createOverflow() {
   174  	if h.overflow == nil {
   175  		h.overflow = new([2]*[]*bmap)
   176  	}
   177  	if h.overflow[0] == nil {
   178  		h.overflow[0] = new([]*bmap)
   179  	}
   180  }
   181  
   182  // makemap implements a Go map creation make(map[k]v, hint)
   183  // If the compiler has determined that the map or the first bucket
   184  // can be created on the stack, h and/or bucket may be non-nil.
   185  // If h != nil, the map can be created directly in h.
   186  // If bucket != nil, bucket can be used as the first bucket.
   187  func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
   188  	if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
   189  		println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
   190  		throw("bad hmap size")
   191  	}
   192  
   193  	if hint < 0 || int64(int32(hint)) != hint {
   194  		panic("makemap: size out of range")
   195  		// TODO: make hint an int, then none of this nonsense
   196  	}
   197  
   198  	if !ismapkey(t.key) {
   199  		throw("runtime.makemap: unsupported map key type")
   200  	}
   201  
   202  	// check compiler's and reflect's math
   203  	if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
   204  		t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
   205  		throw("key size wrong")
   206  	}
   207  	if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
   208  		t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
   209  		throw("value size wrong")
   210  	}
   211  
   212  	// invariants we depend on.  We should probably check these at compile time
   213  	// somewhere, but for now we'll do it here.
   214  	if t.key.align > bucketCnt {
   215  		throw("key align too big")
   216  	}
   217  	if t.elem.align > bucketCnt {
   218  		throw("value align too big")
   219  	}
   220  	if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
   221  		throw("key size not a multiple of key align")
   222  	}
   223  	if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
   224  		throw("value size not a multiple of value align")
   225  	}
   226  	if bucketCnt < 8 {
   227  		throw("bucketsize too small for proper alignment")
   228  	}
   229  	if dataOffset%uintptr(t.key.align) != 0 {
   230  		throw("need padding in bucket (key)")
   231  	}
   232  	if dataOffset%uintptr(t.elem.align) != 0 {
   233  		throw("need padding in bucket (value)")
   234  	}
   235  
   236  	// make sure zero of element type is available.
   237  	mapzero(t.elem)
   238  
   239  	// find size parameter which will hold the requested # of elements
   240  	B := uint8(0)
   241  	for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
   242  	}
   243  
   244  	// allocate initial hash table
   245  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   246  	// If hint is large zeroing this memory could take a while.
   247  	buckets := bucket
   248  	if B != 0 {
   249  		buckets = newarray(t.bucket, uintptr(1)<<B)
   250  	}
   251  
   252  	// initialize Hmap
   253  	if h == nil {
   254  		h = (*hmap)(newobject(t.hmap))
   255  	}
   256  	h.count = 0
   257  	h.B = B
   258  	h.flags = 0
   259  	h.hash0 = fastrand1()
   260  	h.buckets = buckets
   261  	h.oldbuckets = nil
   262  	h.nevacuate = 0
   263  
   264  	return h
   265  }
   266  
   267  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   268  // it will return a reference to the zero object for the value type if
   269  // the key is not in the map.
   270  // NOTE: The returned pointer may keep the whole map live, so don't
   271  // hold onto it for very long.
   272  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   273  	if raceenabled && h != nil {
   274  		callerpc := getcallerpc(unsafe.Pointer(&t))
   275  		pc := funcPC(mapaccess1)
   276  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   277  		raceReadObjectPC(t.key, key, callerpc, pc)
   278  	}
   279  	if h == nil || h.count == 0 {
   280  		return unsafe.Pointer(t.elem.zero)
   281  	}
   282  	alg := t.key.alg
   283  	hash := alg.hash(key, uintptr(h.hash0))
   284  	m := uintptr(1)<<h.B - 1
   285  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   286  	if c := h.oldbuckets; c != nil {
   287  		oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   288  		if !evacuated(oldb) {
   289  			b = oldb
   290  		}
   291  	}
   292  	top := uint8(hash >> (ptrSize*8 - 8))
   293  	if top < minTopHash {
   294  		top += minTopHash
   295  	}
   296  	for {
   297  		for i := uintptr(0); i < bucketCnt; i++ {
   298  			if b.tophash[i] != top {
   299  				continue
   300  			}
   301  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   302  			if t.indirectkey {
   303  				k = *((*unsafe.Pointer)(k))
   304  			}
   305  			if alg.equal(key, k) {
   306  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   307  				if t.indirectvalue {
   308  					v = *((*unsafe.Pointer)(v))
   309  				}
   310  				return v
   311  			}
   312  		}
   313  		b = b.overflow(t)
   314  		if b == nil {
   315  			return unsafe.Pointer(t.elem.zero)
   316  		}
   317  	}
   318  }
   319  
   320  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   321  	if raceenabled && h != nil {
   322  		callerpc := getcallerpc(unsafe.Pointer(&t))
   323  		pc := funcPC(mapaccess2)
   324  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   325  		raceReadObjectPC(t.key, key, callerpc, pc)
   326  	}
   327  	if h == nil || h.count == 0 {
   328  		return unsafe.Pointer(t.elem.zero), false
   329  	}
   330  	alg := t.key.alg
   331  	hash := alg.hash(key, uintptr(h.hash0))
   332  	m := uintptr(1)<<h.B - 1
   333  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   334  	if c := h.oldbuckets; c != nil {
   335  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   336  		if !evacuated(oldb) {
   337  			b = oldb
   338  		}
   339  	}
   340  	top := uint8(hash >> (ptrSize*8 - 8))
   341  	if top < minTopHash {
   342  		top += minTopHash
   343  	}
   344  	for {
   345  		for i := uintptr(0); i < bucketCnt; i++ {
   346  			if b.tophash[i] != top {
   347  				continue
   348  			}
   349  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   350  			if t.indirectkey {
   351  				k = *((*unsafe.Pointer)(k))
   352  			}
   353  			if alg.equal(key, k) {
   354  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   355  				if t.indirectvalue {
   356  					v = *((*unsafe.Pointer)(v))
   357  				}
   358  				return v, true
   359  			}
   360  		}
   361  		b = b.overflow(t)
   362  		if b == nil {
   363  			return unsafe.Pointer(t.elem.zero), false
   364  		}
   365  	}
   366  }
   367  
   368  // returns both key and value.  Used by map iterator
   369  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   370  	if h == nil || h.count == 0 {
   371  		return nil, nil
   372  	}
   373  	alg := t.key.alg
   374  	hash := alg.hash(key, uintptr(h.hash0))
   375  	m := uintptr(1)<<h.B - 1
   376  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   377  	if c := h.oldbuckets; c != nil {
   378  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   379  		if !evacuated(oldb) {
   380  			b = oldb
   381  		}
   382  	}
   383  	top := uint8(hash >> (ptrSize*8 - 8))
   384  	if top < minTopHash {
   385  		top += minTopHash
   386  	}
   387  	for {
   388  		for i := uintptr(0); i < bucketCnt; i++ {
   389  			if b.tophash[i] != top {
   390  				continue
   391  			}
   392  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   393  			if t.indirectkey {
   394  				k = *((*unsafe.Pointer)(k))
   395  			}
   396  			if alg.equal(key, k) {
   397  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   398  				if t.indirectvalue {
   399  					v = *((*unsafe.Pointer)(v))
   400  				}
   401  				return k, v
   402  			}
   403  		}
   404  		b = b.overflow(t)
   405  		if b == nil {
   406  			return nil, nil
   407  		}
   408  	}
   409  }
   410  
   411  func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
   412  	if h == nil {
   413  		panic("assignment to entry in nil map")
   414  	}
   415  	if raceenabled {
   416  		callerpc := getcallerpc(unsafe.Pointer(&t))
   417  		pc := funcPC(mapassign1)
   418  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   419  		raceReadObjectPC(t.key, key, callerpc, pc)
   420  		raceReadObjectPC(t.elem, val, callerpc, pc)
   421  	}
   422  
   423  	alg := t.key.alg
   424  	hash := alg.hash(key, uintptr(h.hash0))
   425  
   426  	if h.buckets == nil {
   427  		h.buckets = newarray(t.bucket, 1)
   428  	}
   429  
   430  again:
   431  	bucket := hash & (uintptr(1)<<h.B - 1)
   432  	if h.oldbuckets != nil {
   433  		growWork(t, h, bucket)
   434  	}
   435  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   436  	top := uint8(hash >> (ptrSize*8 - 8))
   437  	if top < minTopHash {
   438  		top += minTopHash
   439  	}
   440  
   441  	var inserti *uint8
   442  	var insertk unsafe.Pointer
   443  	var insertv unsafe.Pointer
   444  	for {
   445  		for i := uintptr(0); i < bucketCnt; i++ {
   446  			if b.tophash[i] != top {
   447  				if b.tophash[i] == empty && inserti == nil {
   448  					inserti = &b.tophash[i]
   449  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   450  					insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   451  				}
   452  				continue
   453  			}
   454  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   455  			k2 := k
   456  			if t.indirectkey {
   457  				k2 = *((*unsafe.Pointer)(k2))
   458  			}
   459  			if !alg.equal(key, k2) {
   460  				continue
   461  			}
   462  			// already have a mapping for key.  Update it.
   463  			typedmemmove(t.key, k2, key)
   464  			v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   465  			v2 := v
   466  			if t.indirectvalue {
   467  				v2 = *((*unsafe.Pointer)(v2))
   468  			}
   469  			typedmemmove(t.elem, v2, val)
   470  			return
   471  		}
   472  		ovf := b.overflow(t)
   473  		if ovf == nil {
   474  			break
   475  		}
   476  		b = ovf
   477  	}
   478  
   479  	// did not find mapping for key.  Allocate new cell & add entry.
   480  	if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
   481  		hashGrow(t, h)
   482  		goto again // Growing the table invalidates everything, so try again
   483  	}
   484  
   485  	if inserti == nil {
   486  		// all current buckets are full, allocate a new one.
   487  		newb := (*bmap)(newobject(t.bucket))
   488  		h.setoverflow(t, b, newb)
   489  		inserti = &newb.tophash[0]
   490  		insertk = add(unsafe.Pointer(newb), dataOffset)
   491  		insertv = add(insertk, bucketCnt*uintptr(t.keysize))
   492  	}
   493  
   494  	// store new key/value at insert position
   495  	if t.indirectkey {
   496  		kmem := newobject(t.key)
   497  		*(*unsafe.Pointer)(insertk) = kmem
   498  		insertk = kmem
   499  	}
   500  	if t.indirectvalue {
   501  		vmem := newobject(t.elem)
   502  		*(*unsafe.Pointer)(insertv) = vmem
   503  		insertv = vmem
   504  	}
   505  	typedmemmove(t.key, insertk, key)
   506  	typedmemmove(t.elem, insertv, val)
   507  	*inserti = top
   508  	h.count++
   509  }
   510  
   511  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   512  	if raceenabled && h != nil {
   513  		callerpc := getcallerpc(unsafe.Pointer(&t))
   514  		pc := funcPC(mapdelete)
   515  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   516  		raceReadObjectPC(t.key, key, callerpc, pc)
   517  	}
   518  	if h == nil || h.count == 0 {
   519  		return
   520  	}
   521  	alg := t.key.alg
   522  	hash := alg.hash(key, uintptr(h.hash0))
   523  	bucket := hash & (uintptr(1)<<h.B - 1)
   524  	if h.oldbuckets != nil {
   525  		growWork(t, h, bucket)
   526  	}
   527  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   528  	top := uint8(hash >> (ptrSize*8 - 8))
   529  	if top < minTopHash {
   530  		top += minTopHash
   531  	}
   532  	for {
   533  		for i := uintptr(0); i < bucketCnt; i++ {
   534  			if b.tophash[i] != top {
   535  				continue
   536  			}
   537  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   538  			k2 := k
   539  			if t.indirectkey {
   540  				k2 = *((*unsafe.Pointer)(k2))
   541  			}
   542  			if !alg.equal(key, k2) {
   543  				continue
   544  			}
   545  			memclr(k, uintptr(t.keysize))
   546  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
   547  			memclr(v, uintptr(t.valuesize))
   548  			b.tophash[i] = empty
   549  			h.count--
   550  			return
   551  		}
   552  		b = b.overflow(t)
   553  		if b == nil {
   554  			return
   555  		}
   556  	}
   557  }
   558  
   559  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   560  	// Clear pointer fields so garbage collector does not complain.
   561  	it.key = nil
   562  	it.value = nil
   563  	it.t = nil
   564  	it.h = nil
   565  	it.buckets = nil
   566  	it.bptr = nil
   567  	it.overflow[0] = nil
   568  	it.overflow[1] = nil
   569  
   570  	if raceenabled && h != nil {
   571  		callerpc := getcallerpc(unsafe.Pointer(&t))
   572  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
   573  	}
   574  
   575  	if h == nil || h.count == 0 {
   576  		it.key = nil
   577  		it.value = nil
   578  		return
   579  	}
   580  
   581  	if unsafe.Sizeof(hiter{})/ptrSize != 12 {
   582  		throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
   583  	}
   584  	it.t = t
   585  	it.h = h
   586  
   587  	// grab snapshot of bucket state
   588  	it.B = h.B
   589  	it.buckets = h.buckets
   590  	if t.bucket.kind&kindNoPointers != 0 {
   591  		// Allocate the current slice and remember pointers to both current and old.
   592  		// This preserves all relevant overflow buckets alive even if
   593  		// the table grows and/or overflow buckets are added to the table
   594  		// while we are iterating.
   595  		h.createOverflow()
   596  		it.overflow = *h.overflow
   597  	}
   598  
   599  	// decide where to start
   600  	r := uintptr(fastrand1())
   601  	if h.B > 31-bucketCntBits {
   602  		r += uintptr(fastrand1()) << 31
   603  	}
   604  	it.startBucket = r & (uintptr(1)<<h.B - 1)
   605  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
   606  
   607  	// iterator state
   608  	it.bucket = it.startBucket
   609  	it.wrapped = false
   610  	it.bptr = nil
   611  
   612  	// Remember we have an iterator.
   613  	// Can run concurrently with another hash_iter_init().
   614  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   615  		atomicor8(&h.flags, iterator|oldIterator)
   616  	}
   617  
   618  	mapiternext(it)
   619  }
   620  
   621  func mapiternext(it *hiter) {
   622  	h := it.h
   623  	if raceenabled {
   624  		callerpc := getcallerpc(unsafe.Pointer(&it))
   625  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
   626  	}
   627  	t := it.t
   628  	bucket := it.bucket
   629  	b := it.bptr
   630  	i := it.i
   631  	checkBucket := it.checkBucket
   632  	alg := t.key.alg
   633  
   634  next:
   635  	if b == nil {
   636  		if bucket == it.startBucket && it.wrapped {
   637  			// end of iteration
   638  			it.key = nil
   639  			it.value = nil
   640  			return
   641  		}
   642  		if h.oldbuckets != nil && it.B == h.B {
   643  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   644  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   645  			// bucket hasn't been evacuated) then we need to iterate through the old
   646  			// bucket and only return the ones that will be migrated to this bucket.
   647  			oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
   648  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   649  			if !evacuated(b) {
   650  				checkBucket = bucket
   651  			} else {
   652  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   653  				checkBucket = noCheck
   654  			}
   655  		} else {
   656  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   657  			checkBucket = noCheck
   658  		}
   659  		bucket++
   660  		if bucket == uintptr(1)<<it.B {
   661  			bucket = 0
   662  			it.wrapped = true
   663  		}
   664  		i = 0
   665  	}
   666  	for ; i < bucketCnt; i++ {
   667  		offi := (i + it.offset) & (bucketCnt - 1)
   668  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
   669  		v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
   670  		if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
   671  			if checkBucket != noCheck {
   672  				// Special case: iterator was started during a grow and the
   673  				// grow is not done yet.  We're working on a bucket whose
   674  				// oldbucket has not been evacuated yet.  Or at least, it wasn't
   675  				// evacuated when we started the bucket.  So we're iterating
   676  				// through the oldbucket, skipping any keys that will go
   677  				// to the other new bucket (each oldbucket expands to two
   678  				// buckets during a grow).
   679  				k2 := k
   680  				if t.indirectkey {
   681  					k2 = *((*unsafe.Pointer)(k2))
   682  				}
   683  				if t.reflexivekey || alg.equal(k2, k2) {
   684  					// If the item in the oldbucket is not destined for
   685  					// the current new bucket in the iteration, skip it.
   686  					hash := alg.hash(k2, uintptr(h.hash0))
   687  					if hash&(uintptr(1)<<it.B-1) != checkBucket {
   688  						continue
   689  					}
   690  				} else {
   691  					// Hash isn't repeatable if k != k (NaNs).  We need a
   692  					// repeatable and randomish choice of which direction
   693  					// to send NaNs during evacuation.  We'll use the low
   694  					// bit of tophash to decide which way NaNs go.
   695  					// NOTE: this case is why we need two evacuate tophash
   696  					// values, evacuatedX and evacuatedY, that differ in
   697  					// their low bit.
   698  					if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   699  						continue
   700  					}
   701  				}
   702  			}
   703  			if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
   704  				// this is the golden data, we can return it.
   705  				if t.indirectkey {
   706  					k = *((*unsafe.Pointer)(k))
   707  				}
   708  				it.key = k
   709  				if t.indirectvalue {
   710  					v = *((*unsafe.Pointer)(v))
   711  				}
   712  				it.value = v
   713  			} else {
   714  				// The hash table has grown since the iterator was started.
   715  				// The golden data for this key is now somewhere else.
   716  				k2 := k
   717  				if t.indirectkey {
   718  					k2 = *((*unsafe.Pointer)(k2))
   719  				}
   720  				if t.reflexivekey || alg.equal(k2, k2) {
   721  					// Check the current hash table for the data.
   722  					// This code handles the case where the key
   723  					// has been deleted, updated, or deleted and reinserted.
   724  					// NOTE: we need to regrab the key as it has potentially been
   725  					// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   726  					rk, rv := mapaccessK(t, h, k2)
   727  					if rk == nil {
   728  						continue // key has been deleted
   729  					}
   730  					it.key = rk
   731  					it.value = rv
   732  				} else {
   733  					// if key!=key then the entry can't be deleted or
   734  					// updated, so we can just return it.  That's lucky for
   735  					// us because when key!=key we can't look it up
   736  					// successfully in the current table.
   737  					it.key = k2
   738  					if t.indirectvalue {
   739  						v = *((*unsafe.Pointer)(v))
   740  					}
   741  					it.value = v
   742  				}
   743  			}
   744  			it.bucket = bucket
   745  			it.bptr = b
   746  			it.i = i + 1
   747  			it.checkBucket = checkBucket
   748  			return
   749  		}
   750  	}
   751  	b = b.overflow(t)
   752  	i = 0
   753  	goto next
   754  }
   755  
   756  func hashGrow(t *maptype, h *hmap) {
   757  	if h.oldbuckets != nil {
   758  		throw("evacuation not done in time")
   759  	}
   760  	oldbuckets := h.buckets
   761  	newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1))
   762  	flags := h.flags &^ (iterator | oldIterator)
   763  	if h.flags&iterator != 0 {
   764  		flags |= oldIterator
   765  	}
   766  	// commit the grow (atomic wrt gc)
   767  	h.B++
   768  	h.flags = flags
   769  	h.oldbuckets = oldbuckets
   770  	h.buckets = newbuckets
   771  	h.nevacuate = 0
   772  
   773  	if h.overflow != nil {
   774  		// Promote current overflow buckets to the old generation.
   775  		if h.overflow[1] != nil {
   776  			throw("overflow is not nil")
   777  		}
   778  		h.overflow[1] = h.overflow[0]
   779  		h.overflow[0] = nil
   780  	}
   781  
   782  	// the actual copying of the hash table data is done incrementally
   783  	// by growWork() and evacuate().
   784  }
   785  
   786  func growWork(t *maptype, h *hmap, bucket uintptr) {
   787  	noldbuckets := uintptr(1) << (h.B - 1)
   788  
   789  	// make sure we evacuate the oldbucket corresponding
   790  	// to the bucket we're about to use
   791  	evacuate(t, h, bucket&(noldbuckets-1))
   792  
   793  	// evacuate one more oldbucket to make progress on growing
   794  	if h.oldbuckets != nil {
   795  		evacuate(t, h, h.nevacuate)
   796  	}
   797  }
   798  
   799  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
   800  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   801  	newbit := uintptr(1) << (h.B - 1)
   802  	alg := t.key.alg
   803  	if !evacuated(b) {
   804  		// TODO: reuse overflow buckets instead of using new ones, if there
   805  		// is no iterator using the old buckets.  (If !oldIterator.)
   806  
   807  		x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   808  		y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   809  		xi := 0
   810  		yi := 0
   811  		xk := add(unsafe.Pointer(x), dataOffset)
   812  		yk := add(unsafe.Pointer(y), dataOffset)
   813  		xv := add(xk, bucketCnt*uintptr(t.keysize))
   814  		yv := add(yk, bucketCnt*uintptr(t.keysize))
   815  		for ; b != nil; b = b.overflow(t) {
   816  			k := add(unsafe.Pointer(b), dataOffset)
   817  			v := add(k, bucketCnt*uintptr(t.keysize))
   818  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
   819  				top := b.tophash[i]
   820  				if top == empty {
   821  					b.tophash[i] = evacuatedEmpty
   822  					continue
   823  				}
   824  				if top < minTopHash {
   825  					throw("bad map state")
   826  				}
   827  				k2 := k
   828  				if t.indirectkey {
   829  					k2 = *((*unsafe.Pointer)(k2))
   830  				}
   831  				// Compute hash to make our evacuation decision (whether we need
   832  				// to send this key/value to bucket x or bucket y).
   833  				hash := alg.hash(k2, uintptr(h.hash0))
   834  				if h.flags&iterator != 0 {
   835  					if !t.reflexivekey && !alg.equal(k2, k2) {
   836  						// If key != key (NaNs), then the hash could be (and probably
   837  						// will be) entirely different from the old hash.  Moreover,
   838  						// it isn't reproducible.  Reproducibility is required in the
   839  						// presence of iterators, as our evacuation decision must
   840  						// match whatever decision the iterator made.
   841  						// Fortunately, we have the freedom to send these keys either
   842  						// way.  Also, tophash is meaningless for these kinds of keys.
   843  						// We let the low bit of tophash drive the evacuation decision.
   844  						// We recompute a new random tophash for the next level so
   845  						// these keys will get evenly distributed across all buckets
   846  						// after multiple grows.
   847  						if (top & 1) != 0 {
   848  							hash |= newbit
   849  						} else {
   850  							hash &^= newbit
   851  						}
   852  						top = uint8(hash >> (ptrSize*8 - 8))
   853  						if top < minTopHash {
   854  							top += minTopHash
   855  						}
   856  					}
   857  				}
   858  				if (hash & newbit) == 0 {
   859  					b.tophash[i] = evacuatedX
   860  					if xi == bucketCnt {
   861  						newx := (*bmap)(newobject(t.bucket))
   862  						h.setoverflow(t, x, newx)
   863  						x = newx
   864  						xi = 0
   865  						xk = add(unsafe.Pointer(x), dataOffset)
   866  						xv = add(xk, bucketCnt*uintptr(t.keysize))
   867  					}
   868  					x.tophash[xi] = top
   869  					if t.indirectkey {
   870  						*(*unsafe.Pointer)(xk) = k2 // copy pointer
   871  					} else {
   872  						typedmemmove(t.key, xk, k) // copy value
   873  					}
   874  					if t.indirectvalue {
   875  						*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
   876  					} else {
   877  						typedmemmove(t.elem, xv, v)
   878  					}
   879  					xi++
   880  					xk = add(xk, uintptr(t.keysize))
   881  					xv = add(xv, uintptr(t.valuesize))
   882  				} else {
   883  					b.tophash[i] = evacuatedY
   884  					if yi == bucketCnt {
   885  						newy := (*bmap)(newobject(t.bucket))
   886  						h.setoverflow(t, y, newy)
   887  						y = newy
   888  						yi = 0
   889  						yk = add(unsafe.Pointer(y), dataOffset)
   890  						yv = add(yk, bucketCnt*uintptr(t.keysize))
   891  					}
   892  					y.tophash[yi] = top
   893  					if t.indirectkey {
   894  						*(*unsafe.Pointer)(yk) = k2
   895  					} else {
   896  						typedmemmove(t.key, yk, k)
   897  					}
   898  					if t.indirectvalue {
   899  						*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
   900  					} else {
   901  						typedmemmove(t.elem, yv, v)
   902  					}
   903  					yi++
   904  					yk = add(yk, uintptr(t.keysize))
   905  					yv = add(yv, uintptr(t.valuesize))
   906  				}
   907  			}
   908  		}
   909  		// Unlink the overflow buckets & clear key/value to help GC.
   910  		if h.flags&oldIterator == 0 {
   911  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   912  			memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
   913  		}
   914  	}
   915  
   916  	// Advance evacuation mark
   917  	if oldbucket == h.nevacuate {
   918  		h.nevacuate = oldbucket + 1
   919  		if oldbucket+1 == newbit { // newbit == # of oldbuckets
   920  			// Growing is all done.  Free old main bucket array.
   921  			h.oldbuckets = nil
   922  			// Can discard old overflow buckets as well.
   923  			// If they are still referenced by an iterator,
   924  			// then the iterator holds a pointers to the slice.
   925  			if h.overflow != nil {
   926  				h.overflow[1] = nil
   927  			}
   928  		}
   929  	}
   930  }
   931  
   932  func ismapkey(t *_type) bool {
   933  	return t.alg.hash != nil
   934  }
   935  
   936  // Reflect stubs.  Called from ../reflect/asm_*.s
   937  
   938  //go:linkname reflect_makemap reflect.makemap
   939  func reflect_makemap(t *maptype) *hmap {
   940  	return makemap(t, 0, nil, nil)
   941  }
   942  
   943  //go:linkname reflect_mapaccess reflect.mapaccess
   944  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   945  	val, ok := mapaccess2(t, h, key)
   946  	if !ok {
   947  		// reflect wants nil for a missing element
   948  		val = nil
   949  	}
   950  	return val
   951  }
   952  
   953  //go:linkname reflect_mapassign reflect.mapassign
   954  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
   955  	mapassign1(t, h, key, val)
   956  }
   957  
   958  //go:linkname reflect_mapdelete reflect.mapdelete
   959  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   960  	mapdelete(t, h, key)
   961  }
   962  
   963  //go:linkname reflect_mapiterinit reflect.mapiterinit
   964  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
   965  	it := new(hiter)
   966  	mapiterinit(t, h, it)
   967  	return it
   968  }
   969  
   970  //go:linkname reflect_mapiternext reflect.mapiternext
   971  func reflect_mapiternext(it *hiter) {
   972  	mapiternext(it)
   973  }
   974  
   975  //go:linkname reflect_mapiterkey reflect.mapiterkey
   976  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
   977  	return it.key
   978  }
   979  
   980  //go:linkname reflect_maplen reflect.maplen
   981  func reflect_maplen(h *hmap) int {
   982  	if h == nil {
   983  		return 0
   984  	}
   985  	if raceenabled {
   986  		callerpc := getcallerpc(unsafe.Pointer(&h))
   987  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
   988  	}
   989  	return h.count
   990  }
   991  
   992  //go:linkname reflect_ismapkey reflect.ismapkey
   993  func reflect_ismapkey(t *_type) bool {
   994  	return ismapkey(t)
   995  }
   996  
   997  var zerobuf struct {
   998  	lock mutex
   999  	p    *byte
  1000  	size uintptr
  1001  }
  1002  
  1003  var zerotiny [1024]byte
  1004  
  1005  // mapzero ensures that t.zero points at a zero value for type t.
  1006  // Types known to the compiler are in read-only memory and all point
  1007  // to a single zero in the bss of a large enough size.
  1008  // Types allocated by package reflect are in writable memory and
  1009  // start out with zero set to nil; we initialize those on demand.
  1010  func mapzero(t *_type) {
  1011  	// On ARM, atomicloadp is implemented as xadd(p, 0),
  1012  	// so we cannot use atomicloadp on read-only memory.
  1013  	// Check whether the pointer is in the heap; if not, it's not writable
  1014  	// so the zero value must already be set.
  1015  	if GOARCH == "arm" && !inheap(uintptr(unsafe.Pointer(t))) {
  1016  		if t.zero == nil {
  1017  			print("runtime: map element ", *t._string, " missing zero value\n")
  1018  			throw("mapzero")
  1019  		}
  1020  		return
  1021  	}
  1022  
  1023  	// Already done?
  1024  	// Check without lock, so must use atomicload to sync with atomicstore in allocation case below.
  1025  	if atomicloadp(unsafe.Pointer(&t.zero)) != nil {
  1026  		return
  1027  	}
  1028  
  1029  	// Small enough for static buffer?
  1030  	if t.size <= uintptr(len(zerotiny)) {
  1031  		atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(&zerotiny[0]))
  1032  		return
  1033  	}
  1034  
  1035  	// Use allocated buffer.
  1036  	lock(&zerobuf.lock)
  1037  	if zerobuf.size < t.size {
  1038  		if zerobuf.size == 0 {
  1039  			zerobuf.size = 4 * 1024
  1040  		}
  1041  		for zerobuf.size < t.size {
  1042  			zerobuf.size *= 2
  1043  			if zerobuf.size == 0 {
  1044  				// need >2GB zero on 32-bit machine
  1045  				throw("map element too large")
  1046  			}
  1047  		}
  1048  		zerobuf.p = (*byte)(persistentalloc(zerobuf.size, 64, &memstats.other_sys))
  1049  	}
  1050  	atomicstorep(unsafe.Pointer(&t.zero), unsafe.Pointer(zerobuf.p))
  1051  	unlock(&zerobuf.lock)
  1052  }