github.com/aloncn/graphics-go@v0.0.1/src/runtime/hashmap.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table.  The data is arranged
    10  // into an array of buckets.  Each bucket contains up to
    11  // 8 key/value pairs.  The low-order bits of the hash are
    12  // used to select a bucket.  Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big.  Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space.  I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and values)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/value pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows.  Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"runtime/internal/atomic"
    58  	"runtime/internal/sys"
    59  	"unsafe"
    60  )
    61  
    62  const (
    63  	// Maximum number of key/value pairs a bucket can hold.
    64  	bucketCntBits = 3
    65  	bucketCnt     = 1 << bucketCntBits
    66  
    67  	// Maximum average load of a bucket that triggers growth.
    68  	loadFactor = 6.5
    69  
    70  	// Maximum key or value size to keep inline (instead of mallocing per element).
    71  	// Must fit in a uint8.
    72  	// Fast versions cannot handle big values - the cutoff size for
    73  	// fast versions in ../../cmd/internal/gc/walk.go must be at most this value.
    74  	maxKeySize   = 128
    75  	maxValueSize = 128
    76  
    77  	// data offset should be the size of the bmap struct, but needs to be
    78  	// aligned correctly.  For amd64p32 this means 64-bit alignment
    79  	// even though pointers are 32 bit.
    80  	dataOffset = unsafe.Offsetof(struct {
    81  		b bmap
    82  		v int64
    83  	}{}.v)
    84  
    85  	// Possible tophash values.  We reserve a few possibilities for special marks.
    86  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    87  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    88  	// during map writes and thus no one else can observe the map during that time).
    89  	empty          = 0 // cell is empty
    90  	evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
    91  	evacuatedX     = 2 // key/value is valid.  Entry has been evacuated to first half of larger table.
    92  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    93  	minTopHash     = 4 // minimum tophash for a normal filled cell.
    94  
    95  	// flags
    96  	iterator    = 1 // there may be an iterator using buckets
    97  	oldIterator = 2 // there may be an iterator using oldbuckets
    98  	hashWriting = 4 // a goroutine is writing to the map
    99  
   100  	// sentinel bucket ID for iterator checks
   101  	noCheck = 1<<(8*sys.PtrSize) - 1
   102  )
   103  
   104  // A header for a Go map.
   105  type hmap struct {
   106  	// Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and
   107  	// ../reflect/type.go.  Don't change this structure without also changing that code!
   108  	count int // # live cells == size of map.  Must be first (used by len() builtin)
   109  	flags uint8
   110  	B     uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   111  	hash0 uint32 // hash seed
   112  
   113  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   114  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   115  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   116  
   117  	// If both key and value do not contain pointers and are inline, then we mark bucket
   118  	// type as containing no pointers. This avoids scanning such maps.
   119  	// However, bmap.overflow is a pointer. In order to keep overflow buckets
   120  	// alive, we store pointers to all overflow buckets in hmap.overflow.
   121  	// Overflow is used only if key and value do not contain pointers.
   122  	// overflow[0] contains overflow buckets for hmap.buckets.
   123  	// overflow[1] contains overflow buckets for hmap.oldbuckets.
   124  	// The first indirection allows us to reduce static size of hmap.
   125  	// The second indirection allows to store a pointer to the slice in hiter.
   126  	overflow *[2]*[]*bmap
   127  }
   128  
   129  // A bucket for a Go map.
   130  type bmap struct {
   131  	tophash [bucketCnt]uint8
   132  	// Followed by bucketCnt keys and then bucketCnt values.
   133  	// NOTE: packing all the keys together and then all the values together makes the
   134  	// code a bit more complicated than alternating key/value/key/value/... but it allows
   135  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   136  	// Followed by an overflow pointer.
   137  }
   138  
   139  // A hash iteration structure.
   140  // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
   141  // the layout of this structure.
   142  type hiter struct {
   143  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
   144  	value       unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
   145  	t           *maptype
   146  	h           *hmap
   147  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   148  	bptr        *bmap          // current bucket
   149  	overflow    [2]*[]*bmap    // keeps overflow buckets alive
   150  	startBucket uintptr        // bucket iteration started at
   151  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   152  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   153  	B           uint8
   154  	i           uint8
   155  	bucket      uintptr
   156  	checkBucket uintptr
   157  }
   158  
   159  func evacuated(b *bmap) bool {
   160  	h := b.tophash[0]
   161  	return h > empty && h < minTopHash
   162  }
   163  
   164  func (b *bmap) overflow(t *maptype) *bmap {
   165  	return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize))
   166  }
   167  
   168  func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) {
   169  	if t.bucket.kind&kindNoPointers != 0 {
   170  		h.createOverflow()
   171  		*h.overflow[0] = append(*h.overflow[0], ovf)
   172  	}
   173  	*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf
   174  }
   175  
   176  func (h *hmap) createOverflow() {
   177  	if h.overflow == nil {
   178  		h.overflow = new([2]*[]*bmap)
   179  	}
   180  	if h.overflow[0] == nil {
   181  		h.overflow[0] = new([]*bmap)
   182  	}
   183  }
   184  
   185  // makemap implements a Go map creation make(map[k]v, hint)
   186  // If the compiler has determined that the map or the first bucket
   187  // can be created on the stack, h and/or bucket may be non-nil.
   188  // If h != nil, the map can be created directly in h.
   189  // If bucket != nil, bucket can be used as the first bucket.
   190  func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap {
   191  	if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
   192  		println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size)
   193  		throw("bad hmap size")
   194  	}
   195  
   196  	if hint < 0 || int64(int32(hint)) != hint {
   197  		panic("makemap: size out of range")
   198  		// TODO: make hint an int, then none of this nonsense
   199  	}
   200  
   201  	if !ismapkey(t.key) {
   202  		throw("runtime.makemap: unsupported map key type")
   203  	}
   204  
   205  	// check compiler's and reflect's math
   206  	if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
   207  		t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
   208  		throw("key size wrong")
   209  	}
   210  	if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
   211  		t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
   212  		throw("value size wrong")
   213  	}
   214  
   215  	// invariants we depend on.  We should probably check these at compile time
   216  	// somewhere, but for now we'll do it here.
   217  	if t.key.align > bucketCnt {
   218  		throw("key align too big")
   219  	}
   220  	if t.elem.align > bucketCnt {
   221  		throw("value align too big")
   222  	}
   223  	if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
   224  		throw("key size not a multiple of key align")
   225  	}
   226  	if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
   227  		throw("value size not a multiple of value align")
   228  	}
   229  	if bucketCnt < 8 {
   230  		throw("bucketsize too small for proper alignment")
   231  	}
   232  	if dataOffset%uintptr(t.key.align) != 0 {
   233  		throw("need padding in bucket (key)")
   234  	}
   235  	if dataOffset%uintptr(t.elem.align) != 0 {
   236  		throw("need padding in bucket (value)")
   237  	}
   238  
   239  	// make sure zeroptr is large enough
   240  	mapzero(t.elem)
   241  
   242  	// find size parameter which will hold the requested # of elements
   243  	B := uint8(0)
   244  	for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
   245  	}
   246  
   247  	// allocate initial hash table
   248  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   249  	// If hint is large zeroing this memory could take a while.
   250  	buckets := bucket
   251  	if B != 0 {
   252  		buckets = newarray(t.bucket, uintptr(1)<<B)
   253  	}
   254  
   255  	// initialize Hmap
   256  	if h == nil {
   257  		h = (*hmap)(newobject(t.hmap))
   258  	}
   259  	h.count = 0
   260  	h.B = B
   261  	h.flags = 0
   262  	h.hash0 = fastrand1()
   263  	h.buckets = buckets
   264  	h.oldbuckets = nil
   265  	h.nevacuate = 0
   266  
   267  	return h
   268  }
   269  
   270  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   271  // it will return a reference to the zero object for the value type if
   272  // the key is not in the map.
   273  // NOTE: The returned pointer may keep the whole map live, so don't
   274  // hold onto it for very long.
   275  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   276  	if raceenabled && h != nil {
   277  		callerpc := getcallerpc(unsafe.Pointer(&t))
   278  		pc := funcPC(mapaccess1)
   279  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   280  		raceReadObjectPC(t.key, key, callerpc, pc)
   281  	}
   282  	if msanenabled && h != nil {
   283  		msanread(key, t.key.size)
   284  	}
   285  	if h == nil || h.count == 0 {
   286  		return atomic.Loadp(unsafe.Pointer(&zeroptr))
   287  	}
   288  	if h.flags&hashWriting != 0 {
   289  		throw("concurrent map read and map write")
   290  	}
   291  	alg := t.key.alg
   292  	hash := alg.hash(key, uintptr(h.hash0))
   293  	m := uintptr(1)<<h.B - 1
   294  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   295  	if c := h.oldbuckets; c != nil {
   296  		oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   297  		if !evacuated(oldb) {
   298  			b = oldb
   299  		}
   300  	}
   301  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   302  	if top < minTopHash {
   303  		top += minTopHash
   304  	}
   305  	for {
   306  		for i := uintptr(0); i < bucketCnt; i++ {
   307  			if b.tophash[i] != top {
   308  				continue
   309  			}
   310  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   311  			if t.indirectkey {
   312  				k = *((*unsafe.Pointer)(k))
   313  			}
   314  			if alg.equal(key, k) {
   315  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   316  				if t.indirectvalue {
   317  					v = *((*unsafe.Pointer)(v))
   318  				}
   319  				return v
   320  			}
   321  		}
   322  		b = b.overflow(t)
   323  		if b == nil {
   324  			return atomic.Loadp(unsafe.Pointer(&zeroptr))
   325  		}
   326  	}
   327  }
   328  
   329  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   330  	if raceenabled && h != nil {
   331  		callerpc := getcallerpc(unsafe.Pointer(&t))
   332  		pc := funcPC(mapaccess2)
   333  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   334  		raceReadObjectPC(t.key, key, callerpc, pc)
   335  	}
   336  	if msanenabled && h != nil {
   337  		msanread(key, t.key.size)
   338  	}
   339  	if h == nil || h.count == 0 {
   340  		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   341  	}
   342  	if h.flags&hashWriting != 0 {
   343  		throw("concurrent map read and map write")
   344  	}
   345  	alg := t.key.alg
   346  	hash := alg.hash(key, uintptr(h.hash0))
   347  	m := uintptr(1)<<h.B - 1
   348  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   349  	if c := h.oldbuckets; c != nil {
   350  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   351  		if !evacuated(oldb) {
   352  			b = oldb
   353  		}
   354  	}
   355  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   356  	if top < minTopHash {
   357  		top += minTopHash
   358  	}
   359  	for {
   360  		for i := uintptr(0); i < bucketCnt; i++ {
   361  			if b.tophash[i] != top {
   362  				continue
   363  			}
   364  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   365  			if t.indirectkey {
   366  				k = *((*unsafe.Pointer)(k))
   367  			}
   368  			if alg.equal(key, k) {
   369  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   370  				if t.indirectvalue {
   371  					v = *((*unsafe.Pointer)(v))
   372  				}
   373  				return v, true
   374  			}
   375  		}
   376  		b = b.overflow(t)
   377  		if b == nil {
   378  			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   379  		}
   380  	}
   381  }
   382  
   383  // returns both key and value.  Used by map iterator
   384  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   385  	if h == nil || h.count == 0 {
   386  		return nil, nil
   387  	}
   388  	if h.flags&hashWriting != 0 {
   389  		throw("concurrent map read and map write")
   390  	}
   391  	alg := t.key.alg
   392  	hash := alg.hash(key, uintptr(h.hash0))
   393  	m := uintptr(1)<<h.B - 1
   394  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   395  	if c := h.oldbuckets; c != nil {
   396  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   397  		if !evacuated(oldb) {
   398  			b = oldb
   399  		}
   400  	}
   401  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   402  	if top < minTopHash {
   403  		top += minTopHash
   404  	}
   405  	for {
   406  		for i := uintptr(0); i < bucketCnt; i++ {
   407  			if b.tophash[i] != top {
   408  				continue
   409  			}
   410  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   411  			if t.indirectkey {
   412  				k = *((*unsafe.Pointer)(k))
   413  			}
   414  			if alg.equal(key, k) {
   415  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   416  				if t.indirectvalue {
   417  					v = *((*unsafe.Pointer)(v))
   418  				}
   419  				return k, v
   420  			}
   421  		}
   422  		b = b.overflow(t)
   423  		if b == nil {
   424  			return nil, nil
   425  		}
   426  	}
   427  }
   428  
   429  func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
   430  	if h == nil {
   431  		panic("assignment to entry in nil map")
   432  	}
   433  	if raceenabled {
   434  		callerpc := getcallerpc(unsafe.Pointer(&t))
   435  		pc := funcPC(mapassign1)
   436  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   437  		raceReadObjectPC(t.key, key, callerpc, pc)
   438  		raceReadObjectPC(t.elem, val, callerpc, pc)
   439  	}
   440  	if msanenabled {
   441  		msanread(key, t.key.size)
   442  		msanread(val, t.elem.size)
   443  	}
   444  	if h.flags&hashWriting != 0 {
   445  		throw("concurrent map writes")
   446  	}
   447  	h.flags |= hashWriting
   448  
   449  	alg := t.key.alg
   450  	hash := alg.hash(key, uintptr(h.hash0))
   451  
   452  	if h.buckets == nil {
   453  		h.buckets = newarray(t.bucket, 1)
   454  	}
   455  
   456  again:
   457  	bucket := hash & (uintptr(1)<<h.B - 1)
   458  	if h.oldbuckets != nil {
   459  		growWork(t, h, bucket)
   460  	}
   461  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   462  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   463  	if top < minTopHash {
   464  		top += minTopHash
   465  	}
   466  
   467  	var inserti *uint8
   468  	var insertk unsafe.Pointer
   469  	var insertv unsafe.Pointer
   470  	for {
   471  		for i := uintptr(0); i < bucketCnt; i++ {
   472  			if b.tophash[i] != top {
   473  				if b.tophash[i] == empty && inserti == nil {
   474  					inserti = &b.tophash[i]
   475  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   476  					insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   477  				}
   478  				continue
   479  			}
   480  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   481  			k2 := k
   482  			if t.indirectkey {
   483  				k2 = *((*unsafe.Pointer)(k2))
   484  			}
   485  			if !alg.equal(key, k2) {
   486  				continue
   487  			}
   488  			// already have a mapping for key.  Update it.
   489  			if t.needkeyupdate {
   490  				typedmemmove(t.key, k2, key)
   491  			}
   492  			v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   493  			v2 := v
   494  			if t.indirectvalue {
   495  				v2 = *((*unsafe.Pointer)(v2))
   496  			}
   497  			typedmemmove(t.elem, v2, val)
   498  			goto done
   499  		}
   500  		ovf := b.overflow(t)
   501  		if ovf == nil {
   502  			break
   503  		}
   504  		b = ovf
   505  	}
   506  
   507  	// did not find mapping for key.  Allocate new cell & add entry.
   508  	if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
   509  		hashGrow(t, h)
   510  		goto again // Growing the table invalidates everything, so try again
   511  	}
   512  
   513  	if inserti == nil {
   514  		// all current buckets are full, allocate a new one.
   515  		newb := (*bmap)(newobject(t.bucket))
   516  		h.setoverflow(t, b, newb)
   517  		inserti = &newb.tophash[0]
   518  		insertk = add(unsafe.Pointer(newb), dataOffset)
   519  		insertv = add(insertk, bucketCnt*uintptr(t.keysize))
   520  	}
   521  
   522  	// store new key/value at insert position
   523  	if t.indirectkey {
   524  		kmem := newobject(t.key)
   525  		*(*unsafe.Pointer)(insertk) = kmem
   526  		insertk = kmem
   527  	}
   528  	if t.indirectvalue {
   529  		vmem := newobject(t.elem)
   530  		*(*unsafe.Pointer)(insertv) = vmem
   531  		insertv = vmem
   532  	}
   533  	typedmemmove(t.key, insertk, key)
   534  	typedmemmove(t.elem, insertv, val)
   535  	*inserti = top
   536  	h.count++
   537  
   538  done:
   539  	if h.flags&hashWriting == 0 {
   540  		throw("concurrent map writes")
   541  	}
   542  	h.flags &^= hashWriting
   543  }
   544  
   545  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   546  	if raceenabled && h != nil {
   547  		callerpc := getcallerpc(unsafe.Pointer(&t))
   548  		pc := funcPC(mapdelete)
   549  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   550  		raceReadObjectPC(t.key, key, callerpc, pc)
   551  	}
   552  	if msanenabled && h != nil {
   553  		msanread(key, t.key.size)
   554  	}
   555  	if h == nil || h.count == 0 {
   556  		return
   557  	}
   558  	if h.flags&hashWriting != 0 {
   559  		throw("concurrent map writes")
   560  	}
   561  	h.flags |= hashWriting
   562  
   563  	alg := t.key.alg
   564  	hash := alg.hash(key, uintptr(h.hash0))
   565  	bucket := hash & (uintptr(1)<<h.B - 1)
   566  	if h.oldbuckets != nil {
   567  		growWork(t, h, bucket)
   568  	}
   569  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   570  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   571  	if top < minTopHash {
   572  		top += minTopHash
   573  	}
   574  	for {
   575  		for i := uintptr(0); i < bucketCnt; i++ {
   576  			if b.tophash[i] != top {
   577  				continue
   578  			}
   579  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   580  			k2 := k
   581  			if t.indirectkey {
   582  				k2 = *((*unsafe.Pointer)(k2))
   583  			}
   584  			if !alg.equal(key, k2) {
   585  				continue
   586  			}
   587  			memclr(k, uintptr(t.keysize))
   588  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
   589  			memclr(v, uintptr(t.valuesize))
   590  			b.tophash[i] = empty
   591  			h.count--
   592  			goto done
   593  		}
   594  		b = b.overflow(t)
   595  		if b == nil {
   596  			goto done
   597  		}
   598  	}
   599  
   600  done:
   601  	if h.flags&hashWriting == 0 {
   602  		throw("concurrent map writes")
   603  	}
   604  	h.flags &^= hashWriting
   605  }
   606  
   607  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   608  	// Clear pointer fields so garbage collector does not complain.
   609  	it.key = nil
   610  	it.value = nil
   611  	it.t = nil
   612  	it.h = nil
   613  	it.buckets = nil
   614  	it.bptr = nil
   615  	it.overflow[0] = nil
   616  	it.overflow[1] = nil
   617  
   618  	if raceenabled && h != nil {
   619  		callerpc := getcallerpc(unsafe.Pointer(&t))
   620  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
   621  	}
   622  
   623  	if h == nil || h.count == 0 {
   624  		it.key = nil
   625  		it.value = nil
   626  		return
   627  	}
   628  
   629  	if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 {
   630  		throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go
   631  	}
   632  	it.t = t
   633  	it.h = h
   634  
   635  	// grab snapshot of bucket state
   636  	it.B = h.B
   637  	it.buckets = h.buckets
   638  	if t.bucket.kind&kindNoPointers != 0 {
   639  		// Allocate the current slice and remember pointers to both current and old.
   640  		// This preserves all relevant overflow buckets alive even if
   641  		// the table grows and/or overflow buckets are added to the table
   642  		// while we are iterating.
   643  		h.createOverflow()
   644  		it.overflow = *h.overflow
   645  	}
   646  
   647  	// decide where to start
   648  	r := uintptr(fastrand1())
   649  	if h.B > 31-bucketCntBits {
   650  		r += uintptr(fastrand1()) << 31
   651  	}
   652  	it.startBucket = r & (uintptr(1)<<h.B - 1)
   653  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
   654  
   655  	// iterator state
   656  	it.bucket = it.startBucket
   657  	it.wrapped = false
   658  	it.bptr = nil
   659  
   660  	// Remember we have an iterator.
   661  	// Can run concurrently with another hash_iter_init().
   662  	if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator {
   663  		atomic.Or8(&h.flags, iterator|oldIterator)
   664  	}
   665  
   666  	mapiternext(it)
   667  }
   668  
   669  func mapiternext(it *hiter) {
   670  	h := it.h
   671  	if raceenabled {
   672  		callerpc := getcallerpc(unsafe.Pointer(&it))
   673  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
   674  	}
   675  	t := it.t
   676  	bucket := it.bucket
   677  	b := it.bptr
   678  	i := it.i
   679  	checkBucket := it.checkBucket
   680  	alg := t.key.alg
   681  
   682  next:
   683  	if b == nil {
   684  		if bucket == it.startBucket && it.wrapped {
   685  			// end of iteration
   686  			it.key = nil
   687  			it.value = nil
   688  			return
   689  		}
   690  		if h.oldbuckets != nil && it.B == h.B {
   691  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   692  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   693  			// bucket hasn't been evacuated) then we need to iterate through the old
   694  			// bucket and only return the ones that will be migrated to this bucket.
   695  			oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
   696  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   697  			if !evacuated(b) {
   698  				checkBucket = bucket
   699  			} else {
   700  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   701  				checkBucket = noCheck
   702  			}
   703  		} else {
   704  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   705  			checkBucket = noCheck
   706  		}
   707  		bucket++
   708  		if bucket == uintptr(1)<<it.B {
   709  			bucket = 0
   710  			it.wrapped = true
   711  		}
   712  		i = 0
   713  	}
   714  	for ; i < bucketCnt; i++ {
   715  		offi := (i + it.offset) & (bucketCnt - 1)
   716  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
   717  		v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
   718  		if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
   719  			if checkBucket != noCheck {
   720  				// Special case: iterator was started during a grow and the
   721  				// grow is not done yet.  We're working on a bucket whose
   722  				// oldbucket has not been evacuated yet.  Or at least, it wasn't
   723  				// evacuated when we started the bucket.  So we're iterating
   724  				// through the oldbucket, skipping any keys that will go
   725  				// to the other new bucket (each oldbucket expands to two
   726  				// buckets during a grow).
   727  				k2 := k
   728  				if t.indirectkey {
   729  					k2 = *((*unsafe.Pointer)(k2))
   730  				}
   731  				if t.reflexivekey || alg.equal(k2, k2) {
   732  					// If the item in the oldbucket is not destined for
   733  					// the current new bucket in the iteration, skip it.
   734  					hash := alg.hash(k2, uintptr(h.hash0))
   735  					if hash&(uintptr(1)<<it.B-1) != checkBucket {
   736  						continue
   737  					}
   738  				} else {
   739  					// Hash isn't repeatable if k != k (NaNs).  We need a
   740  					// repeatable and randomish choice of which direction
   741  					// to send NaNs during evacuation.  We'll use the low
   742  					// bit of tophash to decide which way NaNs go.
   743  					// NOTE: this case is why we need two evacuate tophash
   744  					// values, evacuatedX and evacuatedY, that differ in
   745  					// their low bit.
   746  					if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   747  						continue
   748  					}
   749  				}
   750  			}
   751  			if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
   752  				// this is the golden data, we can return it.
   753  				if t.indirectkey {
   754  					k = *((*unsafe.Pointer)(k))
   755  				}
   756  				it.key = k
   757  				if t.indirectvalue {
   758  					v = *((*unsafe.Pointer)(v))
   759  				}
   760  				it.value = v
   761  			} else {
   762  				// The hash table has grown since the iterator was started.
   763  				// The golden data for this key is now somewhere else.
   764  				k2 := k
   765  				if t.indirectkey {
   766  					k2 = *((*unsafe.Pointer)(k2))
   767  				}
   768  				if t.reflexivekey || alg.equal(k2, k2) {
   769  					// Check the current hash table for the data.
   770  					// This code handles the case where the key
   771  					// has been deleted, updated, or deleted and reinserted.
   772  					// NOTE: we need to regrab the key as it has potentially been
   773  					// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   774  					rk, rv := mapaccessK(t, h, k2)
   775  					if rk == nil {
   776  						continue // key has been deleted
   777  					}
   778  					it.key = rk
   779  					it.value = rv
   780  				} else {
   781  					// if key!=key then the entry can't be deleted or
   782  					// updated, so we can just return it.  That's lucky for
   783  					// us because when key!=key we can't look it up
   784  					// successfully in the current table.
   785  					it.key = k2
   786  					if t.indirectvalue {
   787  						v = *((*unsafe.Pointer)(v))
   788  					}
   789  					it.value = v
   790  				}
   791  			}
   792  			it.bucket = bucket
   793  			it.bptr = b
   794  			it.i = i + 1
   795  			it.checkBucket = checkBucket
   796  			return
   797  		}
   798  	}
   799  	b = b.overflow(t)
   800  	i = 0
   801  	goto next
   802  }
   803  
   804  func hashGrow(t *maptype, h *hmap) {
   805  	if h.oldbuckets != nil {
   806  		throw("evacuation not done in time")
   807  	}
   808  	oldbuckets := h.buckets
   809  	newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1))
   810  	flags := h.flags &^ (iterator | oldIterator)
   811  	if h.flags&iterator != 0 {
   812  		flags |= oldIterator
   813  	}
   814  	// commit the grow (atomic wrt gc)
   815  	h.B++
   816  	h.flags = flags
   817  	h.oldbuckets = oldbuckets
   818  	h.buckets = newbuckets
   819  	h.nevacuate = 0
   820  
   821  	if h.overflow != nil {
   822  		// Promote current overflow buckets to the old generation.
   823  		if h.overflow[1] != nil {
   824  			throw("overflow is not nil")
   825  		}
   826  		h.overflow[1] = h.overflow[0]
   827  		h.overflow[0] = nil
   828  	}
   829  
   830  	// the actual copying of the hash table data is done incrementally
   831  	// by growWork() and evacuate().
   832  }
   833  
   834  func growWork(t *maptype, h *hmap, bucket uintptr) {
   835  	noldbuckets := uintptr(1) << (h.B - 1)
   836  
   837  	// make sure we evacuate the oldbucket corresponding
   838  	// to the bucket we're about to use
   839  	evacuate(t, h, bucket&(noldbuckets-1))
   840  
   841  	// evacuate one more oldbucket to make progress on growing
   842  	if h.oldbuckets != nil {
   843  		evacuate(t, h, h.nevacuate)
   844  	}
   845  }
   846  
   847  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
   848  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   849  	newbit := uintptr(1) << (h.B - 1)
   850  	alg := t.key.alg
   851  	if !evacuated(b) {
   852  		// TODO: reuse overflow buckets instead of using new ones, if there
   853  		// is no iterator using the old buckets.  (If !oldIterator.)
   854  
   855  		x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   856  		y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   857  		xi := 0
   858  		yi := 0
   859  		xk := add(unsafe.Pointer(x), dataOffset)
   860  		yk := add(unsafe.Pointer(y), dataOffset)
   861  		xv := add(xk, bucketCnt*uintptr(t.keysize))
   862  		yv := add(yk, bucketCnt*uintptr(t.keysize))
   863  		for ; b != nil; b = b.overflow(t) {
   864  			k := add(unsafe.Pointer(b), dataOffset)
   865  			v := add(k, bucketCnt*uintptr(t.keysize))
   866  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
   867  				top := b.tophash[i]
   868  				if top == empty {
   869  					b.tophash[i] = evacuatedEmpty
   870  					continue
   871  				}
   872  				if top < minTopHash {
   873  					throw("bad map state")
   874  				}
   875  				k2 := k
   876  				if t.indirectkey {
   877  					k2 = *((*unsafe.Pointer)(k2))
   878  				}
   879  				// Compute hash to make our evacuation decision (whether we need
   880  				// to send this key/value to bucket x or bucket y).
   881  				hash := alg.hash(k2, uintptr(h.hash0))
   882  				if h.flags&iterator != 0 {
   883  					if !t.reflexivekey && !alg.equal(k2, k2) {
   884  						// If key != key (NaNs), then the hash could be (and probably
   885  						// will be) entirely different from the old hash.  Moreover,
   886  						// it isn't reproducible.  Reproducibility is required in the
   887  						// presence of iterators, as our evacuation decision must
   888  						// match whatever decision the iterator made.
   889  						// Fortunately, we have the freedom to send these keys either
   890  						// way.  Also, tophash is meaningless for these kinds of keys.
   891  						// We let the low bit of tophash drive the evacuation decision.
   892  						// We recompute a new random tophash for the next level so
   893  						// these keys will get evenly distributed across all buckets
   894  						// after multiple grows.
   895  						if (top & 1) != 0 {
   896  							hash |= newbit
   897  						} else {
   898  							hash &^= newbit
   899  						}
   900  						top = uint8(hash >> (sys.PtrSize*8 - 8))
   901  						if top < minTopHash {
   902  							top += minTopHash
   903  						}
   904  					}
   905  				}
   906  				if (hash & newbit) == 0 {
   907  					b.tophash[i] = evacuatedX
   908  					if xi == bucketCnt {
   909  						newx := (*bmap)(newobject(t.bucket))
   910  						h.setoverflow(t, x, newx)
   911  						x = newx
   912  						xi = 0
   913  						xk = add(unsafe.Pointer(x), dataOffset)
   914  						xv = add(xk, bucketCnt*uintptr(t.keysize))
   915  					}
   916  					x.tophash[xi] = top
   917  					if t.indirectkey {
   918  						*(*unsafe.Pointer)(xk) = k2 // copy pointer
   919  					} else {
   920  						typedmemmove(t.key, xk, k) // copy value
   921  					}
   922  					if t.indirectvalue {
   923  						*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
   924  					} else {
   925  						typedmemmove(t.elem, xv, v)
   926  					}
   927  					xi++
   928  					xk = add(xk, uintptr(t.keysize))
   929  					xv = add(xv, uintptr(t.valuesize))
   930  				} else {
   931  					b.tophash[i] = evacuatedY
   932  					if yi == bucketCnt {
   933  						newy := (*bmap)(newobject(t.bucket))
   934  						h.setoverflow(t, y, newy)
   935  						y = newy
   936  						yi = 0
   937  						yk = add(unsafe.Pointer(y), dataOffset)
   938  						yv = add(yk, bucketCnt*uintptr(t.keysize))
   939  					}
   940  					y.tophash[yi] = top
   941  					if t.indirectkey {
   942  						*(*unsafe.Pointer)(yk) = k2
   943  					} else {
   944  						typedmemmove(t.key, yk, k)
   945  					}
   946  					if t.indirectvalue {
   947  						*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
   948  					} else {
   949  						typedmemmove(t.elem, yv, v)
   950  					}
   951  					yi++
   952  					yk = add(yk, uintptr(t.keysize))
   953  					yv = add(yv, uintptr(t.valuesize))
   954  				}
   955  			}
   956  		}
   957  		// Unlink the overflow buckets & clear key/value to help GC.
   958  		if h.flags&oldIterator == 0 {
   959  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   960  			memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
   961  		}
   962  	}
   963  
   964  	// Advance evacuation mark
   965  	if oldbucket == h.nevacuate {
   966  		h.nevacuate = oldbucket + 1
   967  		if oldbucket+1 == newbit { // newbit == # of oldbuckets
   968  			// Growing is all done.  Free old main bucket array.
   969  			h.oldbuckets = nil
   970  			// Can discard old overflow buckets as well.
   971  			// If they are still referenced by an iterator,
   972  			// then the iterator holds a pointers to the slice.
   973  			if h.overflow != nil {
   974  				h.overflow[1] = nil
   975  			}
   976  		}
   977  	}
   978  }
   979  
   980  func ismapkey(t *_type) bool {
   981  	return t.alg.hash != nil
   982  }
   983  
   984  // Reflect stubs.  Called from ../reflect/asm_*.s
   985  
   986  //go:linkname reflect_makemap reflect.makemap
   987  func reflect_makemap(t *maptype) *hmap {
   988  	return makemap(t, 0, nil, nil)
   989  }
   990  
   991  //go:linkname reflect_mapaccess reflect.mapaccess
   992  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   993  	val, ok := mapaccess2(t, h, key)
   994  	if !ok {
   995  		// reflect wants nil for a missing element
   996  		val = nil
   997  	}
   998  	return val
   999  }
  1000  
  1001  //go:linkname reflect_mapassign reflect.mapassign
  1002  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
  1003  	mapassign1(t, h, key, val)
  1004  }
  1005  
  1006  //go:linkname reflect_mapdelete reflect.mapdelete
  1007  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
  1008  	mapdelete(t, h, key)
  1009  }
  1010  
  1011  //go:linkname reflect_mapiterinit reflect.mapiterinit
  1012  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
  1013  	it := new(hiter)
  1014  	mapiterinit(t, h, it)
  1015  	return it
  1016  }
  1017  
  1018  //go:linkname reflect_mapiternext reflect.mapiternext
  1019  func reflect_mapiternext(it *hiter) {
  1020  	mapiternext(it)
  1021  }
  1022  
  1023  //go:linkname reflect_mapiterkey reflect.mapiterkey
  1024  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
  1025  	return it.key
  1026  }
  1027  
  1028  //go:linkname reflect_maplen reflect.maplen
  1029  func reflect_maplen(h *hmap) int {
  1030  	if h == nil {
  1031  		return 0
  1032  	}
  1033  	if raceenabled {
  1034  		callerpc := getcallerpc(unsafe.Pointer(&h))
  1035  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
  1036  	}
  1037  	return h.count
  1038  }
  1039  
  1040  //go:linkname reflect_ismapkey reflect.ismapkey
  1041  func reflect_ismapkey(t *_type) bool {
  1042  	return ismapkey(t)
  1043  }
  1044  
  1045  var zerolock mutex
  1046  
  1047  const initialZeroSize = 1024
  1048  
  1049  var zeroinitial [initialZeroSize]byte
  1050  
  1051  // All accesses to zeroptr and zerosize must be atomic so that they
  1052  // can be accessed without locks in the common case.
  1053  var zeroptr unsafe.Pointer = unsafe.Pointer(&zeroinitial)
  1054  var zerosize uintptr = initialZeroSize
  1055  
  1056  // mapzero ensures that zeroptr points to a buffer large enough to
  1057  // serve as the zero value for t.
  1058  func mapzero(t *_type) {
  1059  	// Is the type small enough for existing buffer?
  1060  	cursize := uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
  1061  	if t.size <= cursize {
  1062  		return
  1063  	}
  1064  
  1065  	// Allocate a new buffer.
  1066  	lock(&zerolock)
  1067  	cursize = uintptr(atomic.Loadp(unsafe.Pointer(&zerosize)))
  1068  	if cursize < t.size {
  1069  		for cursize < t.size {
  1070  			cursize *= 2
  1071  			if cursize == 0 {
  1072  				// need >2GB zero on 32-bit machine
  1073  				throw("map element too large")
  1074  			}
  1075  		}
  1076  		atomic.Storep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys))
  1077  		atomic.Storep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize))
  1078  	}
  1079  	unlock(&zerolock)
  1080  }