github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/runtime/hashmap.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table.  The data is arranged
    10  // into an array of buckets.  Each bucket contains up to
    11  // 8 key/value pairs.  The low-order bits of the hash are
    12  // used to select a bucket.  Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big.  Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space.  I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and values)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/value pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows.  Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"runtime/internal/atomic"
    58  	"runtime/internal/sys"
    59  	"unsafe"
    60  )
    61  
    62  const (
    63  	// Maximum number of key/value pairs a bucket can hold.
    64  	bucketCntBits = 3
    65  	bucketCnt     = 1 << bucketCntBits
    66  
    67  	// Maximum average load of a bucket that triggers growth.
    68  	loadFactor = 6.5
    69  
    70  	// Maximum key or value size to keep inline (instead of mallocing per element).
    71  	// Must fit in a uint8.
    72  	// Fast versions cannot handle big values - the cutoff size for
    73  	// fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
    74  	maxKeySize   = 128
    75  	maxValueSize = 128
    76  
    77  	// data offset should be the size of the bmap struct, but needs to be
    78  	// aligned correctly.  For amd64p32 this means 64-bit alignment
    79  	// even though pointers are 32 bit.
    80  	dataOffset = unsafe.Offsetof(struct {
    81  		b bmap
    82  		v int64
    83  	}{}.v)
    84  
    85  	// Possible tophash values.  We reserve a few possibilities for special marks.
    86  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    87  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    88  	// during map writes and thus no one else can observe the map during that time).
    89  	empty          = 0 // cell is empty
    90  	evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
    91  	evacuatedX     = 2 // key/value is valid.  Entry has been evacuated to first half of larger table.
    92  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    93  	minTopHash     = 4 // minimum tophash for a normal filled cell.
    94  
    95  	// flags
    96  	iterator    = 1 // there may be an iterator using buckets
    97  	oldIterator = 2 // there may be an iterator using oldbuckets
    98  
    99  	// sentinel bucket ID for iterator checks
   100  	noCheck = 1<<(8*sys.PtrSize) - 1
   101  )
   102  
   103  // A header for a Go map.
   104  type hmap struct {
   105  	// Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
   106  	// ../reflect/type.go.  Don't change this structure without also changing that code!
   107  	count int // # live cells == size of map.  Must be first (used by len() builtin)
   108  	flags uint8
   109  	B     uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   110  	hash0 uint32 // hash seed
   111  
   112  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   113  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   114  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   115  
   116  	// If both key and value do not contain pointers and are inline, then we mark bucket
   117  	// type as containing no pointers. This avoids scanning such maps.
   118  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   119  	// alive, we store pointers to all overflow buckets in hmap.overflow.
   120  	// Overflow is used only if key and value do not contain pointers.
   121  	// overflow[0] contains overflow buckets for hmap.buckets.
   122  	// overflow[1] contains overflow buckets for hmap.oldbuckets.
   123  	// The first indirection allows us to reduce static size of hmap.
   124  	// The second indirection allows to store a pointer to the slice in hiter.
   125  	overflow *[2]*[]*bmap
   126  }
   127  
   128  // A bucket for a Go map.
   129  type bmap struct {
   130  	tophash [bucketCnt]uint8
   131  	// Followed by bucketCnt keys and then bucketCnt values.
   132  	// NOTE: packing all the keys together and then all the values together makes the
   133  	// code a bit more complicated than alternating key/value/key/value/... but it allows
   134  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   135  	// Followed by an overflow pointer.
   136  }
   137  
   138  // A hash iteration structure.
   139  // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
   140  // the layout of this structure.
   141  type hiter struct {
   142  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
   143  	value       unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
   144  	t           *maptype
   145  	h           *hmap
   146  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   147  	bptr        *bmap          // current bucket
   148  	overflow    [2]*[]*bmap    // keeps overflow buckets alive
   149  	startBucket uintptr        // bucket iteration started at
   150  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   151  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   152  	B           uint8
   153  	i           uint8
   154  	bucket      uintptr
   155  	checkBucket uintptr
   156  }
   157  
   158  func evacuated(b *bmap) bool {
   159  	h := b.tophash[0]
   160  	return h > empty && h < minTopHash
   161  }
   162  
   163  func (b *bmap) overflow(t *maptype) *bmap {
   164  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
   165  }
   166  
   167  func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
   168  	if t.bucket.kind&kindNoPointers != 0 {
   169  		h.createOverflow()
   170  		*h.overflow[0] = append(*h.overflow[0], ovf)
   171  	}
   172  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
   173  }
   174  
   175  func (h *hmap) createOverflow() {
   176  	if h.overflow == nil {
   177  		h.overflow = new([2]*[]*bmap)
   178  	}
   179  	if h.overflow[0] == nil {
   180  		h.overflow[0] = new([]*bmap)
   181  	}
   182  }
   183  
   184  // makemap implements a Go map creation make(map[k]v, hint)
   185  // If the compiler has determined that the map or the first bucket
   186  // can be created on the stack, h and/or bucket may be non-nil.
   187  // If h != nil, the map can be created directly in h.
   188  // If bucket != nil, bucket can be used as the first bucket.
   189  func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
   190  	if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
   191  		println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
   192  		throw("bad hmap size")
   193  	}
   194  
   195  	if hint < 0 || int64(int32(hint)) != hint {
   196  		panic("makemap: size out of range")
   197  		// TODO: make hint an int, then none of this nonsense
   198  	}
   199  
   200  	if !ismapkey(t.key) {
   201  		throw("runtime.makemap: unsupported map key type")
   202  	}
   203  
   204  	// check compiler's and reflect's math
   205  	if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
   206  		t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
   207  		throw("key size wrong")
   208  	}
   209  	if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
   210  		t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
   211  		throw("value size wrong")
   212  	}
   213  
   214  	// invariants we depend on.  We should probably check these at compile time
   215  	// somewhere, but for now we'll do it here.
   216  	if t.key.align > bucketCnt {
   217  		throw("key align too big")
   218  	}
   219  	if t.elem.align > bucketCnt {
   220  		throw("value align too big")
   221  	}
   222  	if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
   223  		throw("key size not a multiple of key align")
   224  	}
   225  	if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
   226  		throw("value size not a multiple of value align")
   227  	}
   228  	if bucketCnt < 8 {
   229  		throw("bucketsize too small for proper alignment")
   230  	}
   231  	if dataOffset%uintptr(t.key.align) != 0 {
   232  		throw("need padding in bucket (key)")
   233  	}
   234  	if dataOffset%uintptr(t.elem.align) != 0 {
   235  		throw("need padding in bucket (value)")
   236  	}
   237  
   238  	// make sure zeroptr is large enough
   239  	mapzero(t.elem)
   240  
   241  	// find size parameter which will hold the requested # of elements
   242  	B := uint8(0)
   243  	for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
   244  	}
   245  
   246  	// allocate initial hash table
   247  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   248  	// If hint is large zeroing this memory could take a while.
   249  	buckets := bucket
   250  	if B != 0 {
   251  		buckets = newarray(t.bucket, uintptr(1)<<B)
   252  	}
   253  
   254  	// initialize Hmap
   255  	if h == nil {
   256  		h = (*hmap)(newobject(t.hmap))
   257  	}
   258  	h.count = 0
   259  	h.B = B
   260  	h.flags = 0
   261  	h.hash0 = fastrand1()
   262  	h.buckets = buckets
   263  	h.oldbuckets = nil
   264  	h.nevacuate = 0
   265  
   266  	return h
   267  }
   268  
   269  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   270  // it will return a reference to the zero object for the value type if
   271  // the key is not in the map.
   272  // NOTE: The returned pointer may keep the whole map live, so don't
   273  // hold onto it for very long.
   274  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   275  	if raceenabled && h != nil {
   276  		callerpc := getcallerpc(unsafe.Pointer(&t))
   277  		pc := funcPC(mapaccess1)
   278  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   279  		raceReadObjectPC(t.key, key, callerpc, pc)
   280  	}
   281  	if msanenabled && h != nil {
   282  		msanread(key, t.key.size)
   283  	}
   284  	if h == nil || h.count == 0 {
   285  		return atomic.Loadp(unsafe.Pointer(&zeroptr))
   286  	}
   287  	alg := t.key.alg
   288  	hash := alg.hash(key, uintptr(h.hash0))
   289  	m := uintptr(1)<<h.B - 1
   290  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   291  	if c := h.oldbuckets; c != nil {
   292  		oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   293  		if !evacuated(oldb) {
   294  			b = oldb
   295  		}
   296  	}
   297  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   298  	if top < minTopHash {
   299  		top += minTopHash
   300  	}
   301  	for {
   302  		for i := uintptr(0); i < bucketCnt; i++ {
   303  			if b.tophash[i] != top {
   304  				continue
   305  			}
   306  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   307  			if t.indirectkey {
   308  				k = *((*unsafe.Pointer)(k))
   309  			}
   310  			if alg.equal(key, k) {
   311  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   312  				if t.indirectvalue {
   313  					v = *((*unsafe.Pointer)(v))
   314  				}
   315  				return v
   316  			}
   317  		}
   318  		b = b.overflow(t)
   319  		if b == nil {
   320  			return atomic.Loadp(unsafe.Pointer(&zeroptr))
   321  		}
   322  	}
   323  }
   324  
   325  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   326  	if raceenabled && h != nil {
   327  		callerpc := getcallerpc(unsafe.Pointer(&t))
   328  		pc := funcPC(mapaccess2)
   329  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   330  		raceReadObjectPC(t.key, key, callerpc, pc)
   331  	}
   332  	if msanenabled && h != nil {
   333  		msanread(key, t.key.size)
   334  	}
   335  	if h == nil || h.count == 0 {
   336  		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   337  	}
   338  	alg := t.key.alg
   339  	hash := alg.hash(key, uintptr(h.hash0))
   340  	m := uintptr(1)<<h.B - 1
   341  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   342  	if c := h.oldbuckets; c != nil {
   343  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   344  		if !evacuated(oldb) {
   345  			b = oldb
   346  		}
   347  	}
   348  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   349  	if top < minTopHash {
   350  		top += minTopHash
   351  	}
   352  	for {
   353  		for i := uintptr(0); i < bucketCnt; i++ {
   354  			if b.tophash[i] != top {
   355  				continue
   356  			}
   357  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   358  			if t.indirectkey {
   359  				k = *((*unsafe.Pointer)(k))
   360  			}
   361  			if alg.equal(key, k) {
   362  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   363  				if t.indirectvalue {
   364  					v = *((*unsafe.Pointer)(v))
   365  				}
   366  				return v, true
   367  			}
   368  		}
   369  		b = b.overflow(t)
   370  		if b == nil {
   371  			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   372  		}
   373  	}
   374  }
   375  
   376  // returns both key and value.  Used by map iterator
   377  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   378  	if h == nil || h.count == 0 {
   379  		return nil, nil
   380  	}
   381  	alg := t.key.alg
   382  	hash := alg.hash(key, uintptr(h.hash0))
   383  	m := uintptr(1)<<h.B - 1
   384  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   385  	if c := h.oldbuckets; c != nil {
   386  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   387  		if !evacuated(oldb) {
   388  			b = oldb
   389  		}
   390  	}
   391  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   392  	if top < minTopHash {
   393  		top += minTopHash
   394  	}
   395  	for {
   396  		for i := uintptr(0); i < bucketCnt; i++ {
   397  			if b.tophash[i] != top {
   398  				continue
   399  			}
   400  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   401  			if t.indirectkey {
   402  				k = *((*unsafe.Pointer)(k))
   403  			}
   404  			if alg.equal(key, k) {
   405  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   406  				if t.indirectvalue {
   407  					v = *((*unsafe.Pointer)(v))
   408  				}
   409  				return k, v
   410  			}
   411  		}
   412  		b = b.overflow(t)
   413  		if b == nil {
   414  			return nil, nil
   415  		}
   416  	}
   417  }
   418  
   419  func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
   420  	if h == nil {
   421  		panic("assignment to entry in nil map")
   422  	}
   423  	if raceenabled {
   424  		callerpc := getcallerpc(unsafe.Pointer(&t))
   425  		pc := funcPC(mapassign1)
   426  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   427  		raceReadObjectPC(t.key, key, callerpc, pc)
   428  		raceReadObjectPC(t.elem, val, callerpc, pc)
   429  	}
   430  	if msanenabled {
   431  		msanread(key, t.key.size)
   432  		msanread(val, t.elem.size)
   433  	}
   434  
   435  	alg := t.key.alg
   436  	hash := alg.hash(key, uintptr(h.hash0))
   437  
   438  	if h.buckets == nil {
   439  		h.buckets = newarray(t.bucket, 1)
   440  	}
   441  
   442  again:
   443  	bucket := hash & (uintptr(1)<<h.B - 1)
   444  	if h.oldbuckets != nil {
   445  		growWork(t, h, bucket)
   446  	}
   447  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   448  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   449  	if top < minTopHash {
   450  		top += minTopHash
   451  	}
   452  
   453  	var inserti *uint8
   454  	var insertk unsafe.Pointer
   455  	var insertv unsafe.Pointer
   456  	for {
   457  		for i := uintptr(0); i < bucketCnt; i++ {
   458  			if b.tophash[i] != top {
   459  				if b.tophash[i] == empty && inserti == nil {
   460  					inserti = &b.tophash[i]
   461  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   462  					insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   463  				}
   464  				continue
   465  			}
   466  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   467  			k2 := k
   468  			if t.indirectkey {
   469  				k2 = *((*unsafe.Pointer)(k2))
   470  			}
   471  			if !alg.equal(key, k2) {
   472  				continue
   473  			}
   474  			// already have a mapping for key.  Update it.
   475  			if t.needkeyupdate {
   476  				typedmemmove(t.key, k2, key)
   477  			}
   478  			v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   479  			v2 := v
   480  			if t.indirectvalue {
   481  				v2 = *((*unsafe.Pointer)(v2))
   482  			}
   483  			typedmemmove(t.elem, v2, val)
   484  			return
   485  		}
   486  		ovf := b.overflow(t)
   487  		if ovf == nil {
   488  			break
   489  		}
   490  		b = ovf
   491  	}
   492  
   493  	// did not find mapping for key.  Allocate new cell & add entry.
   494  	if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
   495  		hashGrow(t, h)
   496  		goto again // Growing the table invalidates everything, so try again
   497  	}
   498  
   499  	if inserti == nil {
   500  		// all current buckets are full, allocate a new one.
   501  		newb := (*bmap)(newobject(t.bucket))
   502  		h.setoverflow(t, b, newb)
   503  		inserti = &newb.tophash[0]
   504  		insertk = add(unsafe.Pointer(newb), dataOffset)
   505  		insertv = add(insertk, bucketCnt*uintptr(t.keysize))
   506  	}
   507  
   508  	// store new key/value at insert position
   509  	if t.indirectkey {
   510  		kmem := newobject(t.key)
   511  		*(*unsafe.Pointer)(insertk) = kmem
   512  		insertk = kmem
   513  	}
   514  	if t.indirectvalue {
   515  		vmem := newobject(t.elem)
   516  		*(*unsafe.Pointer)(insertv) = vmem
   517  		insertv = vmem
   518  	}
   519  	typedmemmove(t.key, insertk, key)
   520  	typedmemmove(t.elem, insertv, val)
   521  	*inserti = top
   522  	h.count++
   523  }
   524  
   525  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   526  	if raceenabled && h != nil {
   527  		callerpc := getcallerpc(unsafe.Pointer(&t))
   528  		pc := funcPC(mapdelete)
   529  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   530  		raceReadObjectPC(t.key, key, callerpc, pc)
   531  	}
   532  	if msanenabled && h != nil {
   533  		msanread(key, t.key.size)
   534  	}
   535  	if h == nil || h.count == 0 {
   536  		return
   537  	}
   538  	alg := t.key.alg
   539  	hash := alg.hash(key, uintptr(h.hash0))
   540  	bucket := hash & (uintptr(1)<<h.B - 1)
   541  	if h.oldbuckets != nil {
   542  		growWork(t, h, bucket)
   543  	}
   544  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   545  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   546  	if top < minTopHash {
   547  		top += minTopHash
   548  	}
   549  	for {
   550  		for i := uintptr(0); i < bucketCnt; i++ {
   551  			if b.tophash[i] != top {
   552  				continue
   553  			}
   554  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   555  			k2 := k
   556  			if t.indirectkey {
   557  				k2 = *((*unsafe.Pointer)(k2))
   558  			}
   559  			if !alg.equal(key, k2) {
   560  				continue
   561  			}
   562  			memclr(k, uintptr(t.keysize))
   563  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
   564  			memclr(v, uintptr(t.valuesize))
   565  			b.tophash[i] = empty
   566  			h.count--
   567  			return
   568  		}
   569  		b = b.overflow(t)
   570  		if b == nil {
   571  			return
   572  		}
   573  	}
   574  }
   575  
   576  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   577  	// Clear pointer fields so garbage collector does not complain.
   578  	it.key = nil
   579  	it.value = nil
   580  	it.t = nil
   581  	it.h = nil
   582  	it.buckets = nil
   583  	it.bptr = nil
   584  	it.overflow[0] = nil
   585  	it.overflow[1] = nil
   586  
   587  	if raceenabled && h != nil {
   588  		callerpc := getcallerpc(unsafe.Pointer(&t))
   589  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
   590  	}
   591  
   592  	if h == nil || h.count == 0 {
   593  		it.key = nil
   594  		it.value = nil
   595  		return
   596  	}
   597  
   598  	if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
   599  		throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
   600  	}
   601  	it.t = t
   602  	it.h = h
   603  
   604  	// grab snapshot of bucket state
   605  	it.B = h.B
   606  	it.buckets = h.buckets
   607  	if t.bucket.kind&kindNoPointers != 0 {
   608  		// Allocate the current slice and remember pointers to both current and old.
   609  		// This preserves all relevant overflow buckets alive even if
   610  		// the table grows and/or overflow buckets are added to the table
   611  		// while we are iterating.
   612  		h.createOverflow()
   613  		it.overflow = *h.overflow
   614  	}
   615  
   616  	// decide where to start
   617  	r := uintptr(fastrand1())
   618  	if h.B > 31-bucketCntBits {
   619  		r += uintptr(fastrand1()) << 31
   620  	}
   621  	it.startBucket = r & (uintptr(1)<<h.B - 1)
   622  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
   623  
   624  	// iterator state
   625  	it.bucket = it.startBucket
   626  	it.wrapped = false
   627  	it.bptr = nil
   628  
   629  	// Remember we have an iterator.
   630  	// Can run concurrently with another hash_iter_init().
   631  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   632  		atomic.Or8(&h.flags, iterator|oldIterator)
   633  	}
   634  
   635  	mapiternext(it)
   636  }
   637  
   638  func mapiternext(it *hiter) {
   639  	h := it.h
   640  	if raceenabled {
   641  		callerpc := getcallerpc(unsafe.Pointer(&it))
   642  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
   643  	}
   644  	t := it.t
   645  	bucket := it.bucket
   646  	b := it.bptr
   647  	i := it.i
   648  	checkBucket := it.checkBucket
   649  	alg := t.key.alg
   650  
   651  next:
   652  	if b == nil {
   653  		if bucket == it.startBucket && it.wrapped {
   654  			// end of iteration
   655  			it.key = nil
   656  			it.value = nil
   657  			return
   658  		}
   659  		if h.oldbuckets != nil && it.B == h.B {
   660  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   661  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   662  			// bucket hasn't been evacuated) then we need to iterate through the old
   663  			// bucket and only return the ones that will be migrated to this bucket.
   664  			oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
   665  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   666  			if !evacuated(b) {
   667  				checkBucket = bucket
   668  			} else {
   669  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   670  				checkBucket = noCheck
   671  			}
   672  		} else {
   673  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   674  			checkBucket = noCheck
   675  		}
   676  		bucket++
   677  		if bucket == uintptr(1)<<it.B {
   678  			bucket = 0
   679  			it.wrapped = true
   680  		}
   681  		i = 0
   682  	}
   683  	for ; i < bucketCnt; i++ {
   684  		offi := (i + it.offset) & (bucketCnt - 1)
   685  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
   686  		v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
   687  		if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
   688  			if checkBucket != noCheck {
   689  				// Special case: iterator was started during a grow and the
   690  				// grow is not done yet.  We're working on a bucket whose
   691  				// oldbucket has not been evacuated yet.  Or at least, it wasn't
   692  				// evacuated when we started the bucket.  So we're iterating
   693  				// through the oldbucket, skipping any keys that will go
   694  				// to the other new bucket (each oldbucket expands to two
   695  				// buckets during a grow).
   696  				k2 := k
   697  				if t.indirectkey {
   698  					k2 = *((*unsafe.Pointer)(k2))
   699  				}
   700  				if t.reflexivekey || alg.equal(k2, k2) {
   701  					// If the item in the oldbucket is not destined for
   702  					// the current new bucket in the iteration, skip it.
   703  					hash := alg.hash(k2, uintptr(h.hash0))
   704  					if hash&(uintptr(1)<<it.B-1) != checkBucket {
   705  						continue
   706  					}
   707  				} else {
   708  					// Hash isn't repeatable if k != k (NaNs).  We need a
   709  					// repeatable and randomish choice of which direction
   710  					// to send NaNs during evacuation.  We'll use the low
   711  					// bit of tophash to decide which way NaNs go.
   712  					// NOTE: this case is why we need two evacuate tophash
   713  					// values, evacuatedX and evacuatedY, that differ in
   714  					// their low bit.
   715  					if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   716  						continue
   717  					}
   718  				}
   719  			}
   720  			if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
   721  				// this is the golden data, we can return it.
   722  				if t.indirectkey {
   723  					k = *((*unsafe.Pointer)(k))
   724  				}
   725  				it.key = k
   726  				if t.indirectvalue {
   727  					v = *((*unsafe.Pointer)(v))
   728  				}
   729  				it.value = v
   730  			} else {
   731  				// The hash table has grown since the iterator was started.
   732  				// The golden data for this key is now somewhere else.
   733  				k2 := k
   734  				if t.indirectkey {
   735  					k2 = *((*unsafe.Pointer)(k2))
   736  				}
   737  				if t.reflexivekey || alg.equal(k2, k2) {
   738  					// Check the current hash table for the data.
   739  					// This code handles the case where the key
   740  					// has been deleted, updated, or deleted and reinserted.
   741  					// NOTE: we need to regrab the key as it has potentially been
   742  					// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   743  					rk, rv := mapaccessK(t, h, k2)
   744  					if rk == nil {
   745  						continue // key has been deleted
   746  					}
   747  					it.key = rk
   748  					it.value = rv
   749  				} else {
   750  					// if key!=key then the entry can't be deleted or
   751  					// updated, so we can just return it.  That's lucky for
   752  					// us because when key!=key we can't look it up
   753  					// successfully in the current table.
   754  					it.key = k2
   755  					if t.indirectvalue {
   756  						v = *((*unsafe.Pointer)(v))
   757  					}
   758  					it.value = v
   759  				}
   760  			}
   761  			it.bucket = bucket
   762  			it.bptr = b
   763  			it.i = i + 1
   764  			it.checkBucket = checkBucket
   765  			return
   766  		}
   767  	}
   768  	b = b.overflow(t)
   769  	i = 0
   770  	goto next
   771  }
   772  
   773  func hashGrow(t *maptype, h *hmap) {
   774  	if h.oldbuckets != nil {
   775  		throw("evacuation not done in time")
   776  	}
   777  	oldbuckets := h.buckets
   778  	newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1))
   779  	flags := h.flags &^ (iterator | oldIterator)
   780  	if h.flags&iterator != 0 {
   781  		flags |= oldIterator
   782  	}
   783  	// commit the grow (atomic wrt gc)
   784  	h.B++
   785  	h.flags = flags
   786  	h.oldbuckets = oldbuckets
   787  	h.buckets = newbuckets
   788  	h.nevacuate = 0
   789  
   790  	if h.overflow != nil {
   791  		// Promote current overflow buckets to the old generation.
   792  		if h.overflow[1] != nil {
   793  			throw("overflow is not nil")
   794  		}
   795  		h.overflow[1] = h.overflow[0]
   796  		h.overflow[0] = nil
   797  	}
   798  
   799  	// the actual copying of the hash table data is done incrementally
   800  	// by growWork() and evacuate().
   801  }
   802  
   803  func growWork(t *maptype, h *hmap, bucket uintptr) {
   804  	noldbuckets := uintptr(1) << (h.B - 1)
   805  
   806  	// make sure we evacuate the oldbucket corresponding
   807  	// to the bucket we're about to use
   808  	evacuate(t, h, bucket&(noldbuckets-1))
   809  
   810  	// evacuate one more oldbucket to make progress on growing
   811  	if h.oldbuckets != nil {
   812  		evacuate(t, h, h.nevacuate)
   813  	}
   814  }
   815  
   816  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
   817  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   818  	newbit := uintptr(1) << (h.B - 1)
   819  	alg := t.key.alg
   820  	if !evacuated(b) {
   821  		// TODO: reuse overflow buckets instead of using new ones, if there
   822  		// is no iterator using the old buckets.  (If !oldIterator.)
   823  
   824  		x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   825  		y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   826  		xi := 0
   827  		yi := 0
   828  		xk := add(unsafe.Pointer(x), dataOffset)
   829  		yk := add(unsafe.Pointer(y), dataOffset)
   830  		xv := add(xk, bucketCnt*uintptr(t.keysize))
   831  		yv := add(yk, bucketCnt*uintptr(t.keysize))
   832  		for ; b != nil; b = b.overflow(t) {
   833  			k := add(unsafe.Pointer(b), dataOffset)
   834  			v := add(k, bucketCnt*uintptr(t.keysize))
   835  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
   836  				top := b.tophash[i]
   837  				if top == empty {
   838  					b.tophash[i] = evacuatedEmpty
   839  					continue
   840  				}
   841  				if top < minTopHash {
   842  					throw("bad map state")
   843  				}
   844  				k2 := k
   845  				if t.indirectkey {
   846  					k2 = *((*unsafe.Pointer)(k2))
   847  				}
   848  				// Compute hash to make our evacuation decision (whether we need
   849  				// to send this key/value to bucket x or bucket y).
   850  				hash := alg.hash(k2, uintptr(h.hash0))
   851  				if h.flags&iterator != 0 {
   852  					if !t.reflexivekey && !alg.equal(k2, k2) {
   853  						// If key != key (NaNs), then the hash could be (and probably
   854  						// will be) entirely different from the old hash.  Moreover,
   855  						// it isn't reproducible.  Reproducibility is required in the
   856  						// presence of iterators, as our evacuation decision must
   857  						// match whatever decision the iterator made.
   858  						// Fortunately, we have the freedom to send these keys either
   859  						// way.  Also, tophash is meaningless for these kinds of keys.
   860  						// We let the low bit of tophash drive the evacuation decision.
   861  						// We recompute a new random tophash for the next level so
   862  						// these keys will get evenly distributed across all buckets
   863  						// after multiple grows.
   864  						if (top & 1) != 0 {
   865  							hash |= newbit
   866  						} else {
   867  							hash &^= newbit
   868  						}
   869  						top = uint8(hash >> (sys.PtrSize*8 - 8))
   870  						if top < minTopHash {
   871  							top += minTopHash
   872  						}
   873  					}
   874  				}
   875  				if (hash & newbit) == 0 {
   876  					b.tophash[i] = evacuatedX
   877  					if xi == bucketCnt {
   878  						newx := (*bmap)(newobject(t.bucket))
   879  						h.setoverflow(t, x, newx)
   880  						x = newx
   881  						xi = 0
   882  						xk = add(unsafe.Pointer(x), dataOffset)
   883  						xv = add(xk, bucketCnt*uintptr(t.keysize))
   884  					}
   885  					x.tophash[xi] = top
   886  					if t.indirectkey {
   887  						*(*unsafe.Pointer)(xk) = k2 // copy pointer
   888  					} else {
   889  						typedmemmove(t.key, xk, k) // copy value
   890  					}
   891  					if t.indirectvalue {
   892  						*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
   893  					} else {
   894  						typedmemmove(t.elem, xv, v)
   895  					}
   896  					xi++
   897  					xk = add(xk, uintptr(t.keysize))
   898  					xv = add(xv, uintptr(t.valuesize))
   899  				} else {
   900  					b.tophash[i] = evacuatedY
   901  					if yi == bucketCnt {
   902  						newy := (*bmap)(newobject(t.bucket))
   903  						h.setoverflow(t, y, newy)
   904  						y = newy
   905  						yi = 0
   906  						yk = add(unsafe.Pointer(y), dataOffset)
   907  						yv = add(yk, bucketCnt*uintptr(t.keysize))
   908  					}
   909  					y.tophash[yi] = top
   910  					if t.indirectkey {
   911  						*(*unsafe.Pointer)(yk) = k2
   912  					} else {
   913  						typedmemmove(t.key, yk, k)
   914  					}
   915  					if t.indirectvalue {
   916  						*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
   917  					} else {
   918  						typedmemmove(t.elem, yv, v)
   919  					}
   920  					yi++
   921  					yk = add(yk, uintptr(t.keysize))
   922  					yv = add(yv, uintptr(t.valuesize))
   923  				}
   924  			}
   925  		}
   926  		// Unlink the overflow buckets & clear key/value to help GC.
   927  		if h.flags&oldIterator == 0 {
   928  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   929  			memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
   930  		}
   931  	}
   932  
   933  	// Advance evacuation mark
   934  	if oldbucket == h.nevacuate {
   935  		h.nevacuate = oldbucket + 1
   936  		if oldbucket+1 == newbit { // newbit == # of oldbuckets
   937  			// Growing is all done.  Free old main bucket array.
   938  			h.oldbuckets = nil
   939  			// Can discard old overflow buckets as well.
   940  			// If they are still referenced by an iterator,
   941  			// then the iterator holds a pointers to the slice.
   942  			if h.overflow != nil {
   943  				h.overflow[1] = nil
   944  			}
   945  		}
   946  	}
   947  }
   948  
   949  func ismapkey(t *_type) bool {
   950  	return t.alg.hash != nil
   951  }
   952  
   953  // Reflect stubs.  Called from ../reflect/asm_*.s
   954  
   955  //go:linkname reflect_makemap reflect.makemap
   956  func reflect_makemap(t *maptype) *hmap {
   957  	return makemap(t, 0, nil, nil)
   958  }
   959  
   960  //go:linkname reflect_mapaccess reflect.mapaccess
   961  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   962  	val, ok := mapaccess2(t, h, key)
   963  	if !ok {
   964  		// reflect wants nil for a missing element
   965  		val = nil
   966  	}
   967  	return val
   968  }
   969  
   970  //go:linkname reflect_mapassign reflect.mapassign
   971  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
   972  	mapassign1(t, h, key, val)
   973  }
   974  
   975  //go:linkname reflect_mapdelete reflect.mapdelete
   976  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   977  	mapdelete(t, h, key)
   978  }
   979  
   980  //go:linkname reflect_mapiterinit reflect.mapiterinit
   981  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
   982  	it := new(hiter)
   983  	mapiterinit(t, h, it)
   984  	return it
   985  }
   986  
   987  //go:linkname reflect_mapiternext reflect.mapiternext
   988  func reflect_mapiternext(it *hiter) {
   989  	mapiternext(it)
   990  }
   991  
   992  //go:linkname reflect_mapiterkey reflect.mapiterkey
   993  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
   994  	return it.key
   995  }
   996  
   997  //go:linkname reflect_maplen reflect.maplen
   998  func reflect_maplen(h *hmap) int {
   999  	if h == nil {
  1000  		return 0
  1001  	}
  1002  	if raceenabled {
  1003  		callerpc := getcallerpc(unsafe.Pointer(&h))
  1004  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
  1005  	}
  1006  	return h.count
  1007  }
  1008  
  1009  //go:linkname reflect_ismapkey reflect.ismapkey
  1010  func reflect_ismapkey(t *_type) bool {
  1011  	return ismapkey(t)
  1012  }
  1013  
  1014  var zerolock mutex
  1015  
  1016  const initialZeroSize = 1024
  1017  
  1018  var zeroinitial [initialZeroSize]byte
  1019  
  1020  // All accesses to zeroptr and zerosize must be atomic so that they
  1021  // can be accessed without locks in the common case.
  1022  var zeroptr unsafe.Pointer = unsafe.Pointer(&zeroinitial)
  1023  var zerosize uintptr = initialZeroSize
  1024  
  1025  // mapzero ensures that zeroptr points to a buffer large enough to
  1026  // serve as the zero value for t.
  1027  func mapzero(t *_type) {
  1028  	// Is the type small enough for existing buffer?
  1029  	cursize := uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
  1030  	if t.size <= cursize {
  1031  		return
  1032  	}
  1033  
  1034  	// Allocate a new buffer.
  1035  	lock(&zerolock)
  1036  	cursize = uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
  1037  	if cursize < t.size {
  1038  		for cursize < t.size {
  1039  			cursize *= 2
  1040  			if cursize == 0 {
  1041  				// need >2GB zero on 32-bit machine
  1042  				throw("map element too large")
  1043  			}
  1044  		}
  1045  		atomic.Storep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
  1046  		atomic.Storep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
  1047  	}
  1048  	unlock(&zerolock)
  1049  }