github.com/brownsys/tracing-framework-go@v0.0.0-20161210174012-0542a62412fe/go/darwin_amd64/src/runtime/hashmap.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table. The data is arranged
    10  // into an array of buckets. Each bucket contains up to
    11  // 8 key/value pairs. The low-order bits of the hash are
    12  // used to select a bucket. Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big. Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space. I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and values)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/value pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows. Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"runtime/internal/atomic"
    58  	"runtime/internal/sys"
    59  	"unsafe"
    60  )
    61  
    62  const (
    63  	// Maximum number of key/value pairs a bucket can hold.
    64  	bucketCntBits = 3
    65  	bucketCnt     = 1 << bucketCntBits
    66  
    67  	// Maximum average load of a bucket that triggers growth.
    68  	loadFactor = 6.5
    69  
    70  	// Maximum key or value size to keep inline (instead of mallocing per element).
    71  	// Must fit in a uint8.
    72  	// Fast versions cannot handle big values - the cutoff size for
    73  	// fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
    74  	maxKeySize   = 128
    75  	maxValueSize = 128
    76  
    77  	// data offset should be the size of the bmap struct, but needs to be
    78  	// aligned correctly. For amd64p32 this means 64-bit alignment
    79  	// even though pointers are 32 bit.
    80  	dataOffset = unsafe.Offsetof(struct {
    81  		b bmap
    82  		v int64
    83  	}{}.v)
    84  
    85  	// Possible tophash values. We reserve a few possibilities for special marks.
    86  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    87  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    88  	// during map writes and thus no one else can observe the map during that time).
    89  	empty          = 0 // cell is empty
    90  	evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
    91  	evacuatedX     = 2 // key/value is valid.  Entry has been evacuated to first half of larger table.
    92  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    93  	minTopHash     = 4 // minimum tophash for a normal filled cell.
    94  
    95  	// flags
    96  	iterator    = 1 // there may be an iterator using buckets
    97  	oldIterator = 2 // there may be an iterator using oldbuckets
    98  	hashWriting = 4 // a goroutine is writing to the map
    99  
   100  	// sentinel bucket ID for iterator checks
   101  	noCheck = 1<<(8*sys.PtrSize) - 1
   102  )
   103  
   104  // A header for a Go map.
   105  type hmap struct {
   106  	// Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
   107  	// ../reflect/type.go. Don't change this structure without also changing that code!
   108  	count int // # live cells == size of map.  Must be first (used by len() builtin)
   109  	flags uint8
   110  	B     uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   111  	hash0 uint32 // hash seed
   112  
   113  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   114  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   115  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   116  
   117  	// If both key and value do not contain pointers and are inline, then we mark bucket
   118  	// type as containing no pointers. This avoids scanning such maps.
   119  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   120  	// alive, we store pointers to all overflow buckets in hmap.overflow.
   121  	// Overflow is used only if key and value do not contain pointers.
   122  	// overflow[0] contains overflow buckets for hmap.buckets.
   123  	// overflow[1] contains overflow buckets for hmap.oldbuckets.
   124  	// The first indirection allows us to reduce static size of hmap.
   125  	// The second indirection allows to store a pointer to the slice in hiter.
   126  	overflow *[2]*[]*bmap
   127  }
   128  
   129  // A bucket for a Go map.
   130  type bmap struct {
   131  	tophash [bucketCnt]uint8
   132  	// Followed by bucketCnt keys and then bucketCnt values.
   133  	// NOTE: packing all the keys together and then all the values together makes the
   134  	// code a bit more complicated than alternating key/value/key/value/... but it allows
   135  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   136  	// Followed by an overflow pointer.
   137  }
   138  
   139  // A hash iteration structure.
   140  // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
   141  // the layout of this structure.
   142  type hiter struct {
   143  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
   144  	value       unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
   145  	t           *maptype
   146  	h           *hmap
   147  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   148  	bptr        *bmap          // current bucket
   149  	overflow    [2]*[]*bmap    // keeps overflow buckets alive
   150  	startBucket uintptr        // bucket iteration started at
   151  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   152  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   153  	B           uint8
   154  	i           uint8
   155  	bucket      uintptr
   156  	checkBucket uintptr
   157  }
   158  
   159  func evacuated(b *bmap) bool {
   160  	h := b.tophash[0]
   161  	return h > empty && h < minTopHash
   162  }
   163  
   164  func (b *bmap) overflow(t *maptype) *bmap {
   165  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
   166  }
   167  
   168  func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
   169  	if t.bucket.kind&kindNoPointers != 0 {
   170  		h.createOverflow()
   171  		*h.overflow[0] = append(*h.overflow[0], ovf)
   172  	}
   173  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
   174  }
   175  
   176  func (h *hmap) createOverflow() {
   177  	if h.overflow == nil {
   178  		h.overflow = new([2]*[]*bmap)
   179  	}
   180  	if h.overflow[0] == nil {
   181  		h.overflow[0] = new([]*bmap)
   182  	}
   183  }
   184  
   185  // makemap implements a Go map creation make(map[k]v, hint)
   186  // If the compiler has determined that the map or the first bucket
   187  // can be created on the stack, h and/or bucket may be non-nil.
   188  // If h != nil, the map can be created directly in h.
   189  // If bucket != nil, bucket can be used as the first bucket.
   190  func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
   191  	if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != t.hmap.size {
   192  		println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
   193  		throw("bad hmap size")
   194  	}
   195  
   196  	if hint < 0 || int64(int32(hint)) != hint {
   197  		panic(plainError("makemap: size out of range"))
   198  		// TODO: make hint an int, then none of this nonsense
   199  	}
   200  
   201  	if !ismapkey(t.key) {
   202  		throw("runtime.makemap: unsupported map key type")
   203  	}
   204  
   205  	// check compiler's and reflect's math
   206  	if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
   207  		t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
   208  		throw("key size wrong")
   209  	}
   210  	if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
   211  		t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
   212  		throw("value size wrong")
   213  	}
   214  
   215  	// invariants we depend on. We should probably check these at compile time
   216  	// somewhere, but for now we'll do it here.
   217  	if t.key.align > bucketCnt {
   218  		throw("key align too big")
   219  	}
   220  	if t.elem.align > bucketCnt {
   221  		throw("value align too big")
   222  	}
   223  	if t.key.size%uintptr(t.key.align) != 0 {
   224  		throw("key size not a multiple of key align")
   225  	}
   226  	if t.elem.size%uintptr(t.elem.align) != 0 {
   227  		throw("value size not a multiple of value align")
   228  	}
   229  	if bucketCnt < 8 {
   230  		throw("bucketsize too small for proper alignment")
   231  	}
   232  	if dataOffset%uintptr(t.key.align) != 0 {
   233  		throw("need padding in bucket (key)")
   234  	}
   235  	if dataOffset%uintptr(t.elem.align) != 0 {
   236  		throw("need padding in bucket (value)")
   237  	}
   238  
   239  	// find size parameter which will hold the requested # of elements
   240  	B := uint8(0)
   241  	for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
   242  	}
   243  
   244  	// allocate initial hash table
   245  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   246  	// If hint is large zeroing this memory could take a while.
   247  	buckets := bucket
   248  	if B != 0 {
   249  		buckets = newarray(t.bucket, 1<<B)
   250  	}
   251  
   252  	// initialize Hmap
   253  	if h == nil {
   254  		h = (*hmap)(newobject(t.hmap))
   255  	}
   256  	h.count = 0
   257  	h.B = B
   258  	h.flags = 0
   259  	h.hash0 = fastrand1()
   260  	h.buckets = buckets
   261  	h.oldbuckets = nil
   262  	h.nevacuate = 0
   263  
   264  	return h
   265  }
   266  
   267  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   268  // it will return a reference to the zero object for the value type if
   269  // the key is not in the map.
   270  // NOTE: The returned pointer may keep the whole map live, so don't
   271  // hold onto it for very long.
   272  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   273  	if raceenabled && h != nil {
   274  		callerpc := getcallerpc(unsafe.Pointer(&t))
   275  		pc := funcPC(mapaccess1)
   276  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   277  		raceReadObjectPC(t.key, key, callerpc, pc)
   278  	}
   279  	if msanenabled && h != nil {
   280  		msanread(key, t.key.size)
   281  	}
   282  	if h == nil || h.count == 0 {
   283  		return unsafe.Pointer(&zeroVal[0])
   284  	}
   285  	if h.flags&hashWriting != 0 {
   286  		throw("concurrent map read and map write")
   287  	}
   288  	alg := t.key.alg
   289  	hash := alg.hash(key, uintptr(h.hash0))
   290  	m := uintptr(1)<<h.B - 1
   291  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   292  	if c := h.oldbuckets; c != nil {
   293  		oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   294  		if !evacuated(oldb) {
   295  			b = oldb
   296  		}
   297  	}
   298  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   299  	if top < minTopHash {
   300  		top += minTopHash
   301  	}
   302  	for {
   303  		for i := uintptr(0); i < bucketCnt; i++ {
   304  			if b.tophash[i] != top {
   305  				continue
   306  			}
   307  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   308  			if t.indirectkey {
   309  				k = *((*unsafe.Pointer)(k))
   310  			}
   311  			if alg.equal(key, k) {
   312  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   313  				if t.indirectvalue {
   314  					v = *((*unsafe.Pointer)(v))
   315  				}
   316  				return v
   317  			}
   318  		}
   319  		b = b.overflow(t)
   320  		if b == nil {
   321  			return unsafe.Pointer(&zeroVal[0])
   322  		}
   323  	}
   324  }
   325  
   326  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   327  	if raceenabled && h != nil {
   328  		callerpc := getcallerpc(unsafe.Pointer(&t))
   329  		pc := funcPC(mapaccess2)
   330  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   331  		raceReadObjectPC(t.key, key, callerpc, pc)
   332  	}
   333  	if msanenabled && h != nil {
   334  		msanread(key, t.key.size)
   335  	}
   336  	if h == nil || h.count == 0 {
   337  		return unsafe.Pointer(&zeroVal[0]), false
   338  	}
   339  	if h.flags&hashWriting != 0 {
   340  		throw("concurrent map read and map write")
   341  	}
   342  	alg := t.key.alg
   343  	hash := alg.hash(key, uintptr(h.hash0))
   344  	m := uintptr(1)<<h.B - 1
   345  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   346  	if c := h.oldbuckets; c != nil {
   347  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   348  		if !evacuated(oldb) {
   349  			b = oldb
   350  		}
   351  	}
   352  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   353  	if top < minTopHash {
   354  		top += minTopHash
   355  	}
   356  	for {
   357  		for i := uintptr(0); i < bucketCnt; i++ {
   358  			if b.tophash[i] != top {
   359  				continue
   360  			}
   361  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   362  			if t.indirectkey {
   363  				k = *((*unsafe.Pointer)(k))
   364  			}
   365  			if alg.equal(key, k) {
   366  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   367  				if t.indirectvalue {
   368  					v = *((*unsafe.Pointer)(v))
   369  				}
   370  				return v, true
   371  			}
   372  		}
   373  		b = b.overflow(t)
   374  		if b == nil {
   375  			return unsafe.Pointer(&zeroVal[0]), false
   376  		}
   377  	}
   378  }
   379  
   380  // returns both key and value. Used by map iterator
   381  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   382  	if h == nil || h.count == 0 {
   383  		return nil, nil
   384  	}
   385  	if h.flags&hashWriting != 0 {
   386  		throw("concurrent map read and map write")
   387  	}
   388  	alg := t.key.alg
   389  	hash := alg.hash(key, uintptr(h.hash0))
   390  	m := uintptr(1)<<h.B - 1
   391  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   392  	if c := h.oldbuckets; c != nil {
   393  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   394  		if !evacuated(oldb) {
   395  			b = oldb
   396  		}
   397  	}
   398  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   399  	if top < minTopHash {
   400  		top += minTopHash
   401  	}
   402  	for {
   403  		for i := uintptr(0); i < bucketCnt; i++ {
   404  			if b.tophash[i] != top {
   405  				continue
   406  			}
   407  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   408  			if t.indirectkey {
   409  				k = *((*unsafe.Pointer)(k))
   410  			}
   411  			if alg.equal(key, k) {
   412  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   413  				if t.indirectvalue {
   414  					v = *((*unsafe.Pointer)(v))
   415  				}
   416  				return k, v
   417  			}
   418  		}
   419  		b = b.overflow(t)
   420  		if b == nil {
   421  			return nil, nil
   422  		}
   423  	}
   424  }
   425  
   426  func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer {
   427  	v := mapaccess1(t, h, key)
   428  	if v == unsafe.Pointer(&zeroVal[0]) {
   429  		return zero
   430  	}
   431  	return v
   432  }
   433  
   434  func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) {
   435  	v := mapaccess1(t, h, key)
   436  	if v == unsafe.Pointer(&zeroVal[0]) {
   437  		return zero, false
   438  	}
   439  	return v, true
   440  }
   441  
   442  func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
   443  	if h == nil {
   444  		panic(plainError("assignment to entry in nil map"))
   445  	}
   446  	if raceenabled {
   447  		callerpc := getcallerpc(unsafe.Pointer(&t))
   448  		pc := funcPC(mapassign1)
   449  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   450  		raceReadObjectPC(t.key, key, callerpc, pc)
   451  		raceReadObjectPC(t.elem, val, callerpc, pc)
   452  	}
   453  	if msanenabled {
   454  		msanread(key, t.key.size)
   455  		msanread(val, t.elem.size)
   456  	}
   457  	if h.flags&hashWriting != 0 {
   458  		throw("concurrent map writes")
   459  	}
   460  	h.flags |= hashWriting
   461  
   462  	alg := t.key.alg
   463  	hash := alg.hash(key, uintptr(h.hash0))
   464  
   465  	if h.buckets == nil {
   466  		h.buckets = newarray(t.bucket, 1)
   467  	}
   468  
   469  again:
   470  	bucket := hash & (uintptr(1)<<h.B - 1)
   471  	if h.oldbuckets != nil {
   472  		growWork(t, h, bucket)
   473  	}
   474  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   475  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   476  	if top < minTopHash {
   477  		top += minTopHash
   478  	}
   479  
   480  	var inserti *uint8
   481  	var insertk unsafe.Pointer
   482  	var insertv unsafe.Pointer
   483  	for {
   484  		for i := uintptr(0); i < bucketCnt; i++ {
   485  			if b.tophash[i] != top {
   486  				if b.tophash[i] == empty && inserti == nil {
   487  					inserti = &b.tophash[i]
   488  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   489  					insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   490  				}
   491  				continue
   492  			}
   493  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   494  			k2 := k
   495  			if t.indirectkey {
   496  				k2 = *((*unsafe.Pointer)(k2))
   497  			}
   498  			if !alg.equal(key, k2) {
   499  				continue
   500  			}
   501  			// already have a mapping for key. Update it.
   502  			if t.needkeyupdate {
   503  				typedmemmove(t.key, k2, key)
   504  			}
   505  			v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   506  			v2 := v
   507  			if t.indirectvalue {
   508  				v2 = *((*unsafe.Pointer)(v2))
   509  			}
   510  			typedmemmove(t.elem, v2, val)
   511  			goto done
   512  		}
   513  		ovf := b.overflow(t)
   514  		if ovf == nil {
   515  			break
   516  		}
   517  		b = ovf
   518  	}
   519  
   520  	// did not find mapping for key. Allocate new cell & add entry.
   521  	if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
   522  		hashGrow(t, h)
   523  		goto again // Growing the table invalidates everything, so try again
   524  	}
   525  
   526  	if inserti == nil {
   527  		// all current buckets are full, allocate a new one.
   528  		newb := (*bmap)(newobject(t.bucket))
   529  		h.setoverflow(t, b, newb)
   530  		inserti = &newb.tophash[0]
   531  		insertk = add(unsafe.Pointer(newb), dataOffset)
   532  		insertv = add(insertk, bucketCnt*uintptr(t.keysize))
   533  	}
   534  
   535  	// store new key/value at insert position
   536  	if t.indirectkey {
   537  		kmem := newobject(t.key)
   538  		*(*unsafe.Pointer)(insertk) = kmem
   539  		insertk = kmem
   540  	}
   541  	if t.indirectvalue {
   542  		vmem := newobject(t.elem)
   543  		*(*unsafe.Pointer)(insertv) = vmem
   544  		insertv = vmem
   545  	}
   546  	typedmemmove(t.key, insertk, key)
   547  	typedmemmove(t.elem, insertv, val)
   548  	*inserti = top
   549  	h.count++
   550  
   551  done:
   552  	if h.flags&hashWriting == 0 {
   553  		throw("concurrent map writes")
   554  	}
   555  	h.flags &^= hashWriting
   556  }
   557  
   558  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   559  	if raceenabled && h != nil {
   560  		callerpc := getcallerpc(unsafe.Pointer(&t))
   561  		pc := funcPC(mapdelete)
   562  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   563  		raceReadObjectPC(t.key, key, callerpc, pc)
   564  	}
   565  	if msanenabled && h != nil {
   566  		msanread(key, t.key.size)
   567  	}
   568  	if h == nil || h.count == 0 {
   569  		return
   570  	}
   571  	if h.flags&hashWriting != 0 {
   572  		throw("concurrent map writes")
   573  	}
   574  	h.flags |= hashWriting
   575  
   576  	alg := t.key.alg
   577  	hash := alg.hash(key, uintptr(h.hash0))
   578  	bucket := hash & (uintptr(1)<<h.B - 1)
   579  	if h.oldbuckets != nil {
   580  		growWork(t, h, bucket)
   581  	}
   582  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   583  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   584  	if top < minTopHash {
   585  		top += minTopHash
   586  	}
   587  	for {
   588  		for i := uintptr(0); i < bucketCnt; i++ {
   589  			if b.tophash[i] != top {
   590  				continue
   591  			}
   592  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   593  			k2 := k
   594  			if t.indirectkey {
   595  				k2 = *((*unsafe.Pointer)(k2))
   596  			}
   597  			if !alg.equal(key, k2) {
   598  				continue
   599  			}
   600  			memclr(k, uintptr(t.keysize))
   601  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
   602  			memclr(v, uintptr(t.valuesize))
   603  			b.tophash[i] = empty
   604  			h.count--
   605  			goto done
   606  		}
   607  		b = b.overflow(t)
   608  		if b == nil {
   609  			goto done
   610  		}
   611  	}
   612  
   613  done:
   614  	if h.flags&hashWriting == 0 {
   615  		throw("concurrent map writes")
   616  	}
   617  	h.flags &^= hashWriting
   618  }
   619  
   620  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   621  	// Clear pointer fields so garbage collector does not complain.
   622  	it.key = nil
   623  	it.value = nil
   624  	it.t = nil
   625  	it.h = nil
   626  	it.buckets = nil
   627  	it.bptr = nil
   628  	it.overflow[0] = nil
   629  	it.overflow[1] = nil
   630  
   631  	if raceenabled && h != nil {
   632  		callerpc := getcallerpc(unsafe.Pointer(&t))
   633  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
   634  	}
   635  
   636  	if h == nil || h.count == 0 {
   637  		it.key = nil
   638  		it.value = nil
   639  		return
   640  	}
   641  
   642  	if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
   643  		throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
   644  	}
   645  	it.t = t
   646  	it.h = h
   647  
   648  	// grab snapshot of bucket state
   649  	it.B = h.B
   650  	it.buckets = h.buckets
   651  	if t.bucket.kind&kindNoPointers != 0 {
   652  		// Allocate the current slice and remember pointers to both current and old.
   653  		// This preserves all relevant overflow buckets alive even if
   654  		// the table grows and/or overflow buckets are added to the table
   655  		// while we are iterating.
   656  		h.createOverflow()
   657  		it.overflow = *h.overflow
   658  	}
   659  
   660  	// decide where to start
   661  	r := uintptr(fastrand1())
   662  	if h.B > 31-bucketCntBits {
   663  		r += uintptr(fastrand1()) << 31
   664  	}
   665  	it.startBucket = r & (uintptr(1)<<h.B - 1)
   666  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
   667  
   668  	// iterator state
   669  	it.bucket = it.startBucket
   670  	it.wrapped = false
   671  	it.bptr = nil
   672  
   673  	// Remember we have an iterator.
   674  	// Can run concurrently with another hash_iter_init().
   675  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   676  		atomic.Or8(&h.flags, iterator|oldIterator)
   677  	}
   678  
   679  	mapiternext(it)
   680  }
   681  
   682  func mapiternext(it *hiter) {
   683  	h := it.h
   684  	if raceenabled {
   685  		callerpc := getcallerpc(unsafe.Pointer(&it))
   686  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
   687  	}
   688  	t := it.t
   689  	bucket := it.bucket
   690  	b := it.bptr
   691  	i := it.i
   692  	checkBucket := it.checkBucket
   693  	alg := t.key.alg
   694  
   695  next:
   696  	if b == nil {
   697  		if bucket == it.startBucket && it.wrapped {
   698  			// end of iteration
   699  			it.key = nil
   700  			it.value = nil
   701  			return
   702  		}
   703  		if h.oldbuckets != nil && it.B == h.B {
   704  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   705  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   706  			// bucket hasn't been evacuated) then we need to iterate through the old
   707  			// bucket and only return the ones that will be migrated to this bucket.
   708  			oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
   709  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   710  			if !evacuated(b) {
   711  				checkBucket = bucket
   712  			} else {
   713  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   714  				checkBucket = noCheck
   715  			}
   716  		} else {
   717  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   718  			checkBucket = noCheck
   719  		}
   720  		bucket++
   721  		if bucket == uintptr(1)<<it.B {
   722  			bucket = 0
   723  			it.wrapped = true
   724  		}
   725  		i = 0
   726  	}
   727  	for ; i < bucketCnt; i++ {
   728  		offi := (i + it.offset) & (bucketCnt - 1)
   729  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
   730  		v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
   731  		if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
   732  			if checkBucket != noCheck {
   733  				// Special case: iterator was started during a grow and the
   734  				// grow is not done yet. We're working on a bucket whose
   735  				// oldbucket has not been evacuated yet. Or at least, it wasn't
   736  				// evacuated when we started the bucket. So we're iterating
   737  				// through the oldbucket, skipping any keys that will go
   738  				// to the other new bucket (each oldbucket expands to two
   739  				// buckets during a grow).
   740  				k2 := k
   741  				if t.indirectkey {
   742  					k2 = *((*unsafe.Pointer)(k2))
   743  				}
   744  				if t.reflexivekey || alg.equal(k2, k2) {
   745  					// If the item in the oldbucket is not destined for
   746  					// the current new bucket in the iteration, skip it.
   747  					hash := alg.hash(k2, uintptr(h.hash0))
   748  					if hash&(uintptr(1)<<it.B-1) != checkBucket {
   749  						continue
   750  					}
   751  				} else {
   752  					// Hash isn't repeatable if k != k (NaNs).  We need a
   753  					// repeatable and randomish choice of which direction
   754  					// to send NaNs during evacuation. We'll use the low
   755  					// bit of tophash to decide which way NaNs go.
   756  					// NOTE: this case is why we need two evacuate tophash
   757  					// values, evacuatedX and evacuatedY, that differ in
   758  					// their low bit.
   759  					if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   760  						continue
   761  					}
   762  				}
   763  			}
   764  			if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
   765  				// this is the golden data, we can return it.
   766  				if t.indirectkey {
   767  					k = *((*unsafe.Pointer)(k))
   768  				}
   769  				it.key = k
   770  				if t.indirectvalue {
   771  					v = *((*unsafe.Pointer)(v))
   772  				}
   773  				it.value = v
   774  			} else {
   775  				// The hash table has grown since the iterator was started.
   776  				// The golden data for this key is now somewhere else.
   777  				k2 := k
   778  				if t.indirectkey {
   779  					k2 = *((*unsafe.Pointer)(k2))
   780  				}
   781  				if t.reflexivekey || alg.equal(k2, k2) {
   782  					// Check the current hash table for the data.
   783  					// This code handles the case where the key
   784  					// has been deleted, updated, or deleted and reinserted.
   785  					// NOTE: we need to regrab the key as it has potentially been
   786  					// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   787  					rk, rv := mapaccessK(t, h, k2)
   788  					if rk == nil {
   789  						continue // key has been deleted
   790  					}
   791  					it.key = rk
   792  					it.value = rv
   793  				} else {
   794  					// if key!=key then the entry can't be deleted or
   795  					// updated, so we can just return it. That's lucky for
   796  					// us because when key!=key we can't look it up
   797  					// successfully in the current table.
   798  					it.key = k2
   799  					if t.indirectvalue {
   800  						v = *((*unsafe.Pointer)(v))
   801  					}
   802  					it.value = v
   803  				}
   804  			}
   805  			it.bucket = bucket
   806  			if it.bptr != b { // avoid unnecessary write barrier; see issue 14921
   807  				it.bptr = b
   808  			}
   809  			it.i = i + 1
   810  			it.checkBucket = checkBucket
   811  			return
   812  		}
   813  	}
   814  	b = b.overflow(t)
   815  	i = 0
   816  	goto next
   817  }
   818  
   819  func hashGrow(t *maptype, h *hmap) {
   820  	if h.oldbuckets != nil {
   821  		throw("evacuation not done in time")
   822  	}
   823  	oldbuckets := h.buckets
   824  	newbuckets := newarray(t.bucket, 1<<(h.B+1))
   825  	flags := h.flags &^ (iterator | oldIterator)
   826  	if h.flags&iterator != 0 {
   827  		flags |= oldIterator
   828  	}
   829  	// commit the grow (atomic wrt gc)
   830  	h.B++
   831  	h.flags = flags
   832  	h.oldbuckets = oldbuckets
   833  	h.buckets = newbuckets
   834  	h.nevacuate = 0
   835  
   836  	if h.overflow != nil {
   837  		// Promote current overflow buckets to the old generation.
   838  		if h.overflow[1] != nil {
   839  			throw("overflow is not nil")
   840  		}
   841  		h.overflow[1] = h.overflow[0]
   842  		h.overflow[0] = nil
   843  	}
   844  
   845  	// the actual copying of the hash table data is done incrementally
   846  	// by growWork() and evacuate().
   847  }
   848  
   849  func growWork(t *maptype, h *hmap, bucket uintptr) {
   850  	noldbuckets := uintptr(1) << (h.B - 1)
   851  
   852  	// make sure we evacuate the oldbucket corresponding
   853  	// to the bucket we're about to use
   854  	evacuate(t, h, bucket&(noldbuckets-1))
   855  
   856  	// evacuate one more oldbucket to make progress on growing
   857  	if h.oldbuckets != nil {
   858  		evacuate(t, h, h.nevacuate)
   859  	}
   860  }
   861  
   862  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
   863  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   864  	newbit := uintptr(1) << (h.B - 1)
   865  	alg := t.key.alg
   866  	if !evacuated(b) {
   867  		// TODO: reuse overflow buckets instead of using new ones, if there
   868  		// is no iterator using the old buckets.  (If !oldIterator.)
   869  
   870  		x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   871  		y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   872  		xi := 0
   873  		yi := 0
   874  		xk := add(unsafe.Pointer(x), dataOffset)
   875  		yk := add(unsafe.Pointer(y), dataOffset)
   876  		xv := add(xk, bucketCnt*uintptr(t.keysize))
   877  		yv := add(yk, bucketCnt*uintptr(t.keysize))
   878  		for ; b != nil; b = b.overflow(t) {
   879  			k := add(unsafe.Pointer(b), dataOffset)
   880  			v := add(k, bucketCnt*uintptr(t.keysize))
   881  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
   882  				top := b.tophash[i]
   883  				if top == empty {
   884  					b.tophash[i] = evacuatedEmpty
   885  					continue
   886  				}
   887  				if top < minTopHash {
   888  					throw("bad map state")
   889  				}
   890  				k2 := k
   891  				if t.indirectkey {
   892  					k2 = *((*unsafe.Pointer)(k2))
   893  				}
   894  				// Compute hash to make our evacuation decision (whether we need
   895  				// to send this key/value to bucket x or bucket y).
   896  				hash := alg.hash(k2, uintptr(h.hash0))
   897  				if h.flags&iterator != 0 {
   898  					if !t.reflexivekey && !alg.equal(k2, k2) {
   899  						// If key != key (NaNs), then the hash could be (and probably
   900  						// will be) entirely different from the old hash. Moreover,
   901  						// it isn't reproducible. Reproducibility is required in the
   902  						// presence of iterators, as our evacuation decision must
   903  						// match whatever decision the iterator made.
   904  						// Fortunately, we have the freedom to send these keys either
   905  						// way. Also, tophash is meaningless for these kinds of keys.
   906  						// We let the low bit of tophash drive the evacuation decision.
   907  						// We recompute a new random tophash for the next level so
   908  						// these keys will get evenly distributed across all buckets
   909  						// after multiple grows.
   910  						if (top & 1) != 0 {
   911  							hash |= newbit
   912  						} else {
   913  							hash &^= newbit
   914  						}
   915  						top = uint8(hash >> (sys.PtrSize*8 - 8))
   916  						if top < minTopHash {
   917  							top += minTopHash
   918  						}
   919  					}
   920  				}
   921  				if (hash & newbit) == 0 {
   922  					b.tophash[i] = evacuatedX
   923  					if xi == bucketCnt {
   924  						newx := (*bmap)(newobject(t.bucket))
   925  						h.setoverflow(t, x, newx)
   926  						x = newx
   927  						xi = 0
   928  						xk = add(unsafe.Pointer(x), dataOffset)
   929  						xv = add(xk, bucketCnt*uintptr(t.keysize))
   930  					}
   931  					x.tophash[xi] = top
   932  					if t.indirectkey {
   933  						*(*unsafe.Pointer)(xk) = k2 // copy pointer
   934  					} else {
   935  						typedmemmove(t.key, xk, k) // copy value
   936  					}
   937  					if t.indirectvalue {
   938  						*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
   939  					} else {
   940  						typedmemmove(t.elem, xv, v)
   941  					}
   942  					xi++
   943  					xk = add(xk, uintptr(t.keysize))
   944  					xv = add(xv, uintptr(t.valuesize))
   945  				} else {
   946  					b.tophash[i] = evacuatedY
   947  					if yi == bucketCnt {
   948  						newy := (*bmap)(newobject(t.bucket))
   949  						h.setoverflow(t, y, newy)
   950  						y = newy
   951  						yi = 0
   952  						yk = add(unsafe.Pointer(y), dataOffset)
   953  						yv = add(yk, bucketCnt*uintptr(t.keysize))
   954  					}
   955  					y.tophash[yi] = top
   956  					if t.indirectkey {
   957  						*(*unsafe.Pointer)(yk) = k2
   958  					} else {
   959  						typedmemmove(t.key, yk, k)
   960  					}
   961  					if t.indirectvalue {
   962  						*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
   963  					} else {
   964  						typedmemmove(t.elem, yv, v)
   965  					}
   966  					yi++
   967  					yk = add(yk, uintptr(t.keysize))
   968  					yv = add(yv, uintptr(t.valuesize))
   969  				}
   970  			}
   971  		}
   972  		// Unlink the overflow buckets & clear key/value to help GC.
   973  		if h.flags&oldIterator == 0 {
   974  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   975  			memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
   976  		}
   977  	}
   978  
   979  	// Advance evacuation mark
   980  	if oldbucket == h.nevacuate {
   981  		h.nevacuate = oldbucket + 1
   982  		if oldbucket+1 == newbit { // newbit == # of oldbuckets
   983  			// Growing is all done. Free old main bucket array.
   984  			h.oldbuckets = nil
   985  			// Can discard old overflow buckets as well.
   986  			// If they are still referenced by an iterator,
   987  			// then the iterator holds a pointers to the slice.
   988  			if h.overflow != nil {
   989  				h.overflow[1] = nil
   990  			}
   991  		}
   992  	}
   993  }
   994  
   995  func ismapkey(t *_type) bool {
   996  	return t.alg.hash != nil
   997  }
   998  
   999  // Reflect stubs. Called from ../reflect/asm_*.s
  1000  
  1001  //go:linkname reflect_makemap reflect.makemap
  1002  func reflect_makemap(t *maptype) *hmap {
  1003  	return makemap(t, 0, nil, nil)
  1004  }
  1005  
  1006  //go:linkname reflect_mapaccess reflect.mapaccess
  1007  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
  1008  	val, ok := mapaccess2(t, h, key)
  1009  	if !ok {
  1010  		// reflect wants nil for a missing element
  1011  		val = nil
  1012  	}
  1013  	return val
  1014  }
  1015  
  1016  //go:linkname reflect_mapassign reflect.mapassign
  1017  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
  1018  	mapassign1(t, h, key, val)
  1019  }
  1020  
  1021  //go:linkname reflect_mapdelete reflect.mapdelete
  1022  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
  1023  	mapdelete(t, h, key)
  1024  }
  1025  
  1026  //go:linkname reflect_mapiterinit reflect.mapiterinit
  1027  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
  1028  	it := new(hiter)
  1029  	mapiterinit(t, h, it)
  1030  	return it
  1031  }
  1032  
  1033  //go:linkname reflect_mapiternext reflect.mapiternext
  1034  func reflect_mapiternext(it *hiter) {
  1035  	mapiternext(it)
  1036  }
  1037  
  1038  //go:linkname reflect_mapiterkey reflect.mapiterkey
  1039  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
  1040  	return it.key
  1041  }
  1042  
  1043  //go:linkname reflect_maplen reflect.maplen
  1044  func reflect_maplen(h *hmap) int {
  1045  	if h == nil {
  1046  		return 0
  1047  	}
  1048  	if raceenabled {
  1049  		callerpc := getcallerpc(unsafe.Pointer(&h))
  1050  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
  1051  	}
  1052  	return h.count
  1053  }
  1054  
  1055  //go:linkname reflect_ismapkey reflect.ismapkey
  1056  func reflect_ismapkey(t *_type) bool {
  1057  	return ismapkey(t)
  1058  }
  1059  
  1060  const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go
  1061  var zeroVal [maxZero]byte