github.com/hbdrawn/golang@v0.0.0-20141214014649-6b835209aba2/src/runtime/hashmap.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  // This file contains the implementation of Go's map type.
     8  //
     9  // A map is just a hash table.  The data is arranged
    10  // into an array of buckets.  Each bucket contains up to
    11  // 8 key/value pairs.  The low-order bits of the hash are
    12  // used to select a bucket.  Each bucket contains a few
    13  // high-order bits of each hash to distinguish the entries
    14  // within a single bucket.
    15  //
    16  // If more than 8 keys hash to a bucket, we chain on
    17  // extra buckets.
    18  //
    19  // When the hashtable grows, we allocate a new array
    20  // of buckets twice as big.  Buckets are incrementally
    21  // copied from the old bucket array to the new bucket array.
    22  //
    23  // Map iterators walk through the array of buckets and
    24  // return the keys in walk order (bucket #, then overflow
    25  // chain order, then bucket index).  To maintain iteration
    26  // semantics, we never move keys within their bucket (if
    27  // we did, keys might be returned 0 or 2 times).  When
    28  // growing the table, iterators remain iterating through the
    29  // old table and must check the new table if the bucket
    30  // they are iterating through has been moved ("evacuated")
    31  // to the new table.
    32  
    33  // Picking loadFactor: too large and we have lots of overflow
    34  // buckets, too small and we waste a lot of space.  I wrote
    35  // a simple program to check some stats for different loads:
    36  // (64-bit, 8 byte keys and values)
    37  //  loadFactor    %overflow  bytes/entry     hitprobe    missprobe
    38  //        4.00         2.13        20.77         3.00         4.00
    39  //        4.50         4.05        17.30         3.25         4.50
    40  //        5.00         6.85        14.77         3.50         5.00
    41  //        5.50        10.55        12.94         3.75         5.50
    42  //        6.00        15.27        11.67         4.00         6.00
    43  //        6.50        20.90        10.79         4.25         6.50
    44  //        7.00        27.14        10.15         4.50         7.00
    45  //        7.50        34.03         9.73         4.75         7.50
    46  //        8.00        41.10         9.40         5.00         8.00
    47  //
    48  // %overflow   = percentage of buckets which have an overflow bucket
    49  // bytes/entry = overhead bytes used per key/value pair
    50  // hitprobe    = # of entries to check when looking up a present key
    51  // missprobe   = # of entries to check when looking up an absent key
    52  //
    53  // Keep in mind this data is for maximally loaded tables, i.e. just
    54  // before the table grows.  Typical tables will be somewhat less loaded.
    55  
    56  import (
    57  	"unsafe"
    58  )
    59  
    60  const (
    61  	// Maximum number of key/value pairs a bucket can hold.
    62  	bucketCntBits = 3
    63  	bucketCnt     = 1 << bucketCntBits
    64  
    65  	// Maximum average load of a bucket that triggers growth.
    66  	loadFactor = 6.5
    67  
    68  	// Maximum key or value size to keep inline (instead of mallocing per element).
    69  	// Must fit in a uint8.
    70  	// Fast versions cannot handle big values - the cutoff size for
    71  	// fast versions in ../../cmd/gc/walk.c must be at most this value.
    72  	maxKeySize   = 128
    73  	maxValueSize = 128
    74  
    75  	// data offset should be the size of the bmap struct, but needs to be
    76  	// aligned correctly.  For amd64p32 this means 64-bit alignment
    77  	// even though pointers are 32 bit.
    78  	dataOffset = unsafe.Offsetof(struct {
    79  		b bmap
    80  		v int64
    81  	}{}.v)
    82  
    83  	// Possible tophash values.  We reserve a few possibilities for special marks.
    84  	// Each bucket (including its overflow buckets, if any) will have either all or none of its
    85  	// entries in the evacuated* states (except during the evacuate() method, which only happens
    86  	// during map writes and thus no one else can observe the map during that time).
    87  	empty          = 0 // cell is empty
    88  	evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
    89  	evacuatedX     = 2 // key/value is valid.  Entry has been evacuated to first half of larger table.
    90  	evacuatedY     = 3 // same as above, but evacuated to second half of larger table.
    91  	minTopHash     = 4 // minimum tophash for a normal filled cell.
    92  
    93  	// flags
    94  	iterator    = 1 // there may be an iterator using buckets
    95  	oldIterator = 2 // there may be an iterator using oldbuckets
    96  
    97  	// sentinel bucket ID for iterator checks
    98  	noCheck = 1<<(8*ptrSize) - 1
    99  
   100  	// trigger a garbage collection at every alloc called from this code
   101  	checkgc = false
   102  )
   103  
   104  // A header for a Go map.
   105  type hmap struct {
   106  	// Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
   107  	// ../reflect/type.go.  Don't change this structure without also changing that code!
   108  	count int // # live cells == size of map.  Must be first (used by len() builtin)
   109  	flags uint32
   110  	hash0 uint32 // hash seed
   111  	B     uint8  // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
   112  
   113  	buckets    unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
   114  	oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
   115  	nevacuate  uintptr        // progress counter for evacuation (buckets less than this have been evacuated)
   116  }
   117  
   118  // A bucket for a Go map.
   119  type bmap struct {
   120  	tophash  [bucketCnt]uint8
   121  	overflow *bmap
   122  	// Followed by bucketCnt keys and then bucketCnt values.
   123  	// NOTE: packing all the keys together and then all the values together makes the
   124  	// code a bit more complicated than alternating key/value/key/value/... but it allows
   125  	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
   126  }
   127  
   128  // A hash iteration structure.
   129  // If you modify hiter, also change cmd/gc/reflect.c to indicate
   130  // the layout of this structure.
   131  type hiter struct {
   132  	key         unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/gc/range.c).
   133  	value       unsafe.Pointer // Must be in second position (see cmd/gc/range.c).
   134  	t           *maptype
   135  	h           *hmap
   136  	buckets     unsafe.Pointer // bucket ptr at hash_iter initialization time
   137  	bptr        *bmap          // current bucket
   138  	startBucket uintptr        // bucket iteration started at
   139  	offset      uint8          // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
   140  	wrapped     bool           // already wrapped around from end of bucket array to beginning
   141  	B           uint8
   142  	i           uint8
   143  	bucket      uintptr
   144  	checkBucket uintptr
   145  }
   146  
   147  func evacuated(b *bmap) bool {
   148  	h := b.tophash[0]
   149  	return h > empty && h < minTopHash
   150  }
   151  
   152  func makemap(t *maptype, hint int64) *hmap {
   153  	if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
   154  		gothrow("bad hmap size")
   155  	}
   156  
   157  	if hint < 0 || int64(int32(hint)) != hint {
   158  		panic("makemap: size out of range")
   159  		// TODO: make hint an int, then none of this nonsense
   160  	}
   161  
   162  	if !ismapkey(t.key) {
   163  		gothrow("runtime.makemap: unsupported map key type")
   164  	}
   165  
   166  	// check compiler's and reflect's math
   167  	if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
   168  		t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
   169  		gothrow("key size wrong")
   170  	}
   171  	if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
   172  		t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
   173  		gothrow("value size wrong")
   174  	}
   175  
   176  	// invariants we depend on.  We should probably check these at compile time
   177  	// somewhere, but for now we'll do it here.
   178  	if t.key.align > bucketCnt {
   179  		gothrow("key align too big")
   180  	}
   181  	if t.elem.align > bucketCnt {
   182  		gothrow("value align too big")
   183  	}
   184  	if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
   185  		gothrow("key size not a multiple of key align")
   186  	}
   187  	if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
   188  		gothrow("value size not a multiple of value align")
   189  	}
   190  	if bucketCnt < 8 {
   191  		gothrow("bucketsize too small for proper alignment")
   192  	}
   193  	if dataOffset%uintptr(t.key.align) != 0 {
   194  		gothrow("need padding in bucket (key)")
   195  	}
   196  	if dataOffset%uintptr(t.elem.align) != 0 {
   197  		gothrow("need padding in bucket (value)")
   198  	}
   199  
   200  	// find size parameter which will hold the requested # of elements
   201  	B := uint8(0)
   202  	for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
   203  	}
   204  
   205  	// allocate initial hash table
   206  	// if B == 0, the buckets field is allocated lazily later (in mapassign)
   207  	// If hint is large zeroing this memory could take a while.
   208  	var buckets unsafe.Pointer
   209  	if B != 0 {
   210  		if checkgc {
   211  			memstats.next_gc = memstats.heap_alloc
   212  		}
   213  		buckets = newarray(t.bucket, uintptr(1)<<B)
   214  	}
   215  
   216  	// initialize Hmap
   217  	if checkgc {
   218  		memstats.next_gc = memstats.heap_alloc
   219  	}
   220  	h := (*hmap)(newobject(t.hmap))
   221  	h.count = 0
   222  	h.B = B
   223  	h.flags = 0
   224  	h.hash0 = fastrand1()
   225  	h.buckets = buckets
   226  	h.oldbuckets = nil
   227  	h.nevacuate = 0
   228  
   229  	return h
   230  }
   231  
   232  // mapaccess1 returns a pointer to h[key].  Never returns nil, instead
   233  // it will return a reference to the zero object for the value type if
   234  // the key is not in the map.
   235  // NOTE: The returned pointer may keep the whole map live, so don't
   236  // hold onto it for very long.
   237  func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   238  	if raceenabled && h != nil {
   239  		callerpc := getcallerpc(unsafe.Pointer(&t))
   240  		pc := funcPC(mapaccess1)
   241  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   242  		raceReadObjectPC(t.key, key, callerpc, pc)
   243  	}
   244  	if h == nil || h.count == 0 {
   245  		return unsafe.Pointer(t.elem.zero)
   246  	}
   247  	alg := goalg(t.key.alg)
   248  	hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
   249  	m := uintptr(1)<<h.B - 1
   250  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   251  	if c := h.oldbuckets; c != nil {
   252  		oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   253  		if !evacuated(oldb) {
   254  			b = oldb
   255  		}
   256  	}
   257  	top := uint8(hash >> (ptrSize*8 - 8))
   258  	if top < minTopHash {
   259  		top += minTopHash
   260  	}
   261  	for {
   262  		for i := uintptr(0); i < bucketCnt; i++ {
   263  			if b.tophash[i] != top {
   264  				continue
   265  			}
   266  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   267  			if t.indirectkey {
   268  				k = *((*unsafe.Pointer)(k))
   269  			}
   270  			if alg.equal(key, k, uintptr(t.key.size)) {
   271  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   272  				if t.indirectvalue {
   273  					v = *((*unsafe.Pointer)(v))
   274  				}
   275  				return v
   276  			}
   277  		}
   278  		b = b.overflow
   279  		if b == nil {
   280  			return unsafe.Pointer(t.elem.zero)
   281  		}
   282  	}
   283  }
   284  
   285  func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
   286  	if raceenabled && h != nil {
   287  		callerpc := getcallerpc(unsafe.Pointer(&t))
   288  		pc := funcPC(mapaccess2)
   289  		racereadpc(unsafe.Pointer(h), callerpc, pc)
   290  		raceReadObjectPC(t.key, key, callerpc, pc)
   291  	}
   292  	if h == nil || h.count == 0 {
   293  		return unsafe.Pointer(t.elem.zero), false
   294  	}
   295  	alg := goalg(t.key.alg)
   296  	hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
   297  	m := uintptr(1)<<h.B - 1
   298  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   299  	if c := h.oldbuckets; c != nil {
   300  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   301  		if !evacuated(oldb) {
   302  			b = oldb
   303  		}
   304  	}
   305  	top := uint8(hash >> (ptrSize*8 - 8))
   306  	if top < minTopHash {
   307  		top += minTopHash
   308  	}
   309  	for {
   310  		for i := uintptr(0); i < bucketCnt; i++ {
   311  			if b.tophash[i] != top {
   312  				continue
   313  			}
   314  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   315  			if t.indirectkey {
   316  				k = *((*unsafe.Pointer)(k))
   317  			}
   318  			if alg.equal(key, k, uintptr(t.key.size)) {
   319  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   320  				if t.indirectvalue {
   321  					v = *((*unsafe.Pointer)(v))
   322  				}
   323  				return v, true
   324  			}
   325  		}
   326  		b = b.overflow
   327  		if b == nil {
   328  			return unsafe.Pointer(t.elem.zero), false
   329  		}
   330  	}
   331  }
   332  
   333  // returns both key and value.  Used by map iterator
   334  func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
   335  	if h == nil || h.count == 0 {
   336  		return nil, nil
   337  	}
   338  	alg := goalg(t.key.alg)
   339  	hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
   340  	m := uintptr(1)<<h.B - 1
   341  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
   342  	if c := h.oldbuckets; c != nil {
   343  		oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
   344  		if !evacuated(oldb) {
   345  			b = oldb
   346  		}
   347  	}
   348  	top := uint8(hash >> (ptrSize*8 - 8))
   349  	if top < minTopHash {
   350  		top += minTopHash
   351  	}
   352  	for {
   353  		for i := uintptr(0); i < bucketCnt; i++ {
   354  			if b.tophash[i] != top {
   355  				continue
   356  			}
   357  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   358  			if t.indirectkey {
   359  				k = *((*unsafe.Pointer)(k))
   360  			}
   361  			if alg.equal(key, k, uintptr(t.key.size)) {
   362  				v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   363  				if t.indirectvalue {
   364  					v = *((*unsafe.Pointer)(v))
   365  				}
   366  				return k, v
   367  			}
   368  		}
   369  		b = b.overflow
   370  		if b == nil {
   371  			return nil, nil
   372  		}
   373  	}
   374  }
   375  
   376  func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
   377  	if h == nil {
   378  		panic("assignment to entry in nil map")
   379  	}
   380  	if raceenabled {
   381  		callerpc := getcallerpc(unsafe.Pointer(&t))
   382  		pc := funcPC(mapassign1)
   383  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   384  		raceReadObjectPC(t.key, key, callerpc, pc)
   385  		raceReadObjectPC(t.elem, val, callerpc, pc)
   386  	}
   387  
   388  	alg := goalg(t.key.alg)
   389  	hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
   390  
   391  	if h.buckets == nil {
   392  		if checkgc {
   393  			memstats.next_gc = memstats.heap_alloc
   394  		}
   395  		h.buckets = newarray(t.bucket, 1)
   396  	}
   397  
   398  again:
   399  	bucket := hash & (uintptr(1)<<h.B - 1)
   400  	if h.oldbuckets != nil {
   401  		growWork(t, h, bucket)
   402  	}
   403  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   404  	top := uint8(hash >> (ptrSize*8 - 8))
   405  	if top < minTopHash {
   406  		top += minTopHash
   407  	}
   408  
   409  	var inserti *uint8
   410  	var insertk unsafe.Pointer
   411  	var insertv unsafe.Pointer
   412  	for {
   413  		for i := uintptr(0); i < bucketCnt; i++ {
   414  			if b.tophash[i] != top {
   415  				if b.tophash[i] == empty && inserti == nil {
   416  					inserti = &b.tophash[i]
   417  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   418  					insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   419  				}
   420  				continue
   421  			}
   422  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   423  			k2 := k
   424  			if t.indirectkey {
   425  				k2 = *((*unsafe.Pointer)(k2))
   426  			}
   427  			if !alg.equal(key, k2, uintptr(t.key.size)) {
   428  				continue
   429  			}
   430  			// already have a mapping for key.  Update it.
   431  			memmove(k2, key, uintptr(t.key.size))
   432  			v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   433  			v2 := v
   434  			if t.indirectvalue {
   435  				v2 = *((*unsafe.Pointer)(v2))
   436  			}
   437  			memmove(v2, val, uintptr(t.elem.size))
   438  			return
   439  		}
   440  		if b.overflow == nil {
   441  			break
   442  		}
   443  		b = b.overflow
   444  	}
   445  
   446  	// did not find mapping for key.  Allocate new cell & add entry.
   447  	if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
   448  		hashGrow(t, h)
   449  		goto again // Growing the table invalidates everything, so try again
   450  	}
   451  
   452  	if inserti == nil {
   453  		// all current buckets are full, allocate a new one.
   454  		if checkgc {
   455  			memstats.next_gc = memstats.heap_alloc
   456  		}
   457  		newb := (*bmap)(newobject(t.bucket))
   458  		b.overflow = newb
   459  		inserti = &newb.tophash[0]
   460  		insertk = add(unsafe.Pointer(newb), dataOffset)
   461  		insertv = add(insertk, bucketCnt*uintptr(t.keysize))
   462  	}
   463  
   464  	// store new key/value at insert position
   465  	if t.indirectkey {
   466  		if checkgc {
   467  			memstats.next_gc = memstats.heap_alloc
   468  		}
   469  		kmem := newobject(t.key)
   470  		*(*unsafe.Pointer)(insertk) = kmem
   471  		insertk = kmem
   472  	}
   473  	if t.indirectvalue {
   474  		if checkgc {
   475  			memstats.next_gc = memstats.heap_alloc
   476  		}
   477  		vmem := newobject(t.elem)
   478  		*(*unsafe.Pointer)(insertv) = vmem
   479  		insertv = vmem
   480  	}
   481  	memmove(insertk, key, uintptr(t.key.size))
   482  	memmove(insertv, val, uintptr(t.elem.size))
   483  	*inserti = top
   484  	h.count++
   485  }
   486  
   487  func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   488  	if raceenabled && h != nil {
   489  		callerpc := getcallerpc(unsafe.Pointer(&t))
   490  		pc := funcPC(mapdelete)
   491  		racewritepc(unsafe.Pointer(h), callerpc, pc)
   492  		raceReadObjectPC(t.key, key, callerpc, pc)
   493  	}
   494  	if h == nil || h.count == 0 {
   495  		return
   496  	}
   497  	alg := goalg(t.key.alg)
   498  	hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
   499  	bucket := hash & (uintptr(1)<<h.B - 1)
   500  	if h.oldbuckets != nil {
   501  		growWork(t, h, bucket)
   502  	}
   503  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   504  	top := uint8(hash >> (ptrSize*8 - 8))
   505  	if top < minTopHash {
   506  		top += minTopHash
   507  	}
   508  	for {
   509  		for i := uintptr(0); i < bucketCnt; i++ {
   510  			if b.tophash[i] != top {
   511  				continue
   512  			}
   513  			k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   514  			k2 := k
   515  			if t.indirectkey {
   516  				k2 = *((*unsafe.Pointer)(k2))
   517  			}
   518  			if !alg.equal(key, k2, uintptr(t.key.size)) {
   519  				continue
   520  			}
   521  			memclr(k, uintptr(t.keysize))
   522  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
   523  			memclr(v, uintptr(t.valuesize))
   524  			b.tophash[i] = empty
   525  			h.count--
   526  			return
   527  		}
   528  		b = b.overflow
   529  		if b == nil {
   530  			return
   531  		}
   532  	}
   533  }
   534  
   535  func mapiterinit(t *maptype, h *hmap, it *hiter) {
   536  	// Clear pointer fields so garbage collector does not complain.
   537  	it.key = nil
   538  	it.value = nil
   539  	it.t = nil
   540  	it.h = nil
   541  	it.buckets = nil
   542  	it.bptr = nil
   543  
   544  	if raceenabled && h != nil {
   545  		callerpc := getcallerpc(unsafe.Pointer(&t))
   546  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
   547  	}
   548  
   549  	if h == nil || h.count == 0 {
   550  		it.key = nil
   551  		it.value = nil
   552  		return
   553  	}
   554  
   555  	if unsafe.Sizeof(hiter{})/ptrSize != 10 {
   556  		gothrow("hash_iter size incorrect") // see ../../cmd/gc/reflect.c
   557  	}
   558  	it.t = t
   559  	it.h = h
   560  
   561  	// grab snapshot of bucket state
   562  	it.B = h.B
   563  	it.buckets = h.buckets
   564  
   565  	// decide where to start
   566  	r := uintptr(fastrand1())
   567  	if h.B > 31-bucketCntBits {
   568  		r += uintptr(fastrand1()) << 31
   569  	}
   570  	it.startBucket = r & (uintptr(1)<<h.B - 1)
   571  	it.offset = uint8(r >> h.B & (bucketCnt - 1))
   572  
   573  	// iterator state
   574  	it.bucket = it.startBucket
   575  	it.wrapped = false
   576  	it.bptr = nil
   577  
   578  	// Remember we have an iterator.
   579  	// Can run concurrently with another hash_iter_init().
   580  	for {
   581  		old := h.flags
   582  		if old == old|iterator|oldIterator {
   583  			break
   584  		}
   585  		if cas(&h.flags, old, old|iterator|oldIterator) {
   586  			break
   587  		}
   588  	}
   589  
   590  	mapiternext(it)
   591  }
   592  
   593  func mapiternext(it *hiter) {
   594  	h := it.h
   595  	if raceenabled {
   596  		callerpc := getcallerpc(unsafe.Pointer(&it))
   597  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
   598  	}
   599  	t := it.t
   600  	bucket := it.bucket
   601  	b := it.bptr
   602  	i := it.i
   603  	checkBucket := it.checkBucket
   604  	alg := goalg(t.key.alg)
   605  
   606  next:
   607  	if b == nil {
   608  		if bucket == it.startBucket && it.wrapped {
   609  			// end of iteration
   610  			it.key = nil
   611  			it.value = nil
   612  			return
   613  		}
   614  		if h.oldbuckets != nil && it.B == h.B {
   615  			// Iterator was started in the middle of a grow, and the grow isn't done yet.
   616  			// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   617  			// bucket hasn't been evacuated) then we need to iterate through the old
   618  			// bucket and only return the ones that will be migrated to this bucket.
   619  			oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
   620  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   621  			if !evacuated(b) {
   622  				checkBucket = bucket
   623  			} else {
   624  				b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   625  				checkBucket = noCheck
   626  			}
   627  		} else {
   628  			b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
   629  			checkBucket = noCheck
   630  		}
   631  		bucket++
   632  		if bucket == uintptr(1)<<it.B {
   633  			bucket = 0
   634  			it.wrapped = true
   635  		}
   636  		i = 0
   637  	}
   638  	for ; i < bucketCnt; i++ {
   639  		offi := (i + it.offset) & (bucketCnt - 1)
   640  		k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
   641  		v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
   642  		if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
   643  			if checkBucket != noCheck {
   644  				// Special case: iterator was started during a grow and the
   645  				// grow is not done yet.  We're working on a bucket whose
   646  				// oldbucket has not been evacuated yet.  Or at least, it wasn't
   647  				// evacuated when we started the bucket.  So we're iterating
   648  				// through the oldbucket, skipping any keys that will go
   649  				// to the other new bucket (each oldbucket expands to two
   650  				// buckets during a grow).
   651  				k2 := k
   652  				if t.indirectkey {
   653  					k2 = *((*unsafe.Pointer)(k2))
   654  				}
   655  				if alg.equal(k2, k2, uintptr(t.key.size)) {
   656  					// If the item in the oldbucket is not destined for
   657  					// the current new bucket in the iteration, skip it.
   658  					hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
   659  					if hash&(uintptr(1)<<it.B-1) != checkBucket {
   660  						continue
   661  					}
   662  				} else {
   663  					// Hash isn't repeatable if k != k (NaNs).  We need a
   664  					// repeatable and randomish choice of which direction
   665  					// to send NaNs during evacuation.  We'll use the low
   666  					// bit of tophash to decide which way NaNs go.
   667  					// NOTE: this case is why we need two evacuate tophash
   668  					// values, evacuatedX and evacuatedY, that differ in
   669  					// their low bit.
   670  					if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
   671  						continue
   672  					}
   673  				}
   674  			}
   675  			if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
   676  				// this is the golden data, we can return it.
   677  				if t.indirectkey {
   678  					k = *((*unsafe.Pointer)(k))
   679  				}
   680  				it.key = k
   681  				if t.indirectvalue {
   682  					v = *((*unsafe.Pointer)(v))
   683  				}
   684  				it.value = v
   685  			} else {
   686  				// The hash table has grown since the iterator was started.
   687  				// The golden data for this key is now somewhere else.
   688  				k2 := k
   689  				if t.indirectkey {
   690  					k2 = *((*unsafe.Pointer)(k2))
   691  				}
   692  				if alg.equal(k2, k2, uintptr(t.key.size)) {
   693  					// Check the current hash table for the data.
   694  					// This code handles the case where the key
   695  					// has been deleted, updated, or deleted and reinserted.
   696  					// NOTE: we need to regrab the key as it has potentially been
   697  					// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   698  					rk, rv := mapaccessK(t, h, k2)
   699  					if rk == nil {
   700  						continue // key has been deleted
   701  					}
   702  					it.key = rk
   703  					it.value = rv
   704  				} else {
   705  					// if key!=key then the entry can't be deleted or
   706  					// updated, so we can just return it.  That's lucky for
   707  					// us because when key!=key we can't look it up
   708  					// successfully in the current table.
   709  					it.key = k2
   710  					if t.indirectvalue {
   711  						v = *((*unsafe.Pointer)(v))
   712  					}
   713  					it.value = v
   714  				}
   715  			}
   716  			it.bucket = bucket
   717  			it.bptr = b
   718  			it.i = i + 1
   719  			it.checkBucket = checkBucket
   720  			return
   721  		}
   722  	}
   723  	b = b.overflow
   724  	i = 0
   725  	goto next
   726  }
   727  
   728  func hashGrow(t *maptype, h *hmap) {
   729  	if h.oldbuckets != nil {
   730  		gothrow("evacuation not done in time")
   731  	}
   732  	oldbuckets := h.buckets
   733  	if checkgc {
   734  		memstats.next_gc = memstats.heap_alloc
   735  	}
   736  	newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1))
   737  	flags := h.flags &^ (iterator | oldIterator)
   738  	if h.flags&iterator != 0 {
   739  		flags |= oldIterator
   740  	}
   741  	// commit the grow (atomic wrt gc)
   742  	h.B++
   743  	h.flags = flags
   744  	h.oldbuckets = oldbuckets
   745  	h.buckets = newbuckets
   746  	h.nevacuate = 0
   747  
   748  	// the actual copying of the hash table data is done incrementally
   749  	// by growWork() and evacuate().
   750  }
   751  
   752  func growWork(t *maptype, h *hmap, bucket uintptr) {
   753  	noldbuckets := uintptr(1) << (h.B - 1)
   754  
   755  	// make sure we evacuate the oldbucket corresponding
   756  	// to the bucket we're about to use
   757  	evacuate(t, h, bucket&(noldbuckets-1))
   758  
   759  	// evacuate one more oldbucket to make progress on growing
   760  	if h.oldbuckets != nil {
   761  		evacuate(t, h, h.nevacuate)
   762  	}
   763  }
   764  
   765  func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
   766  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   767  	newbit := uintptr(1) << (h.B - 1)
   768  	alg := goalg(t.key.alg)
   769  	if !evacuated(b) {
   770  		// TODO: reuse overflow buckets instead of using new ones, if there
   771  		// is no iterator using the old buckets.  (If !oldIterator.)
   772  
   773  		x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   774  		y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   775  		xi := 0
   776  		yi := 0
   777  		xk := add(unsafe.Pointer(x), dataOffset)
   778  		yk := add(unsafe.Pointer(y), dataOffset)
   779  		xv := add(xk, bucketCnt*uintptr(t.keysize))
   780  		yv := add(yk, bucketCnt*uintptr(t.keysize))
   781  		for ; b != nil; b = b.overflow {
   782  			k := add(unsafe.Pointer(b), dataOffset)
   783  			v := add(k, bucketCnt*uintptr(t.keysize))
   784  			for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
   785  				top := b.tophash[i]
   786  				if top == empty {
   787  					b.tophash[i] = evacuatedEmpty
   788  					continue
   789  				}
   790  				if top < minTopHash {
   791  					gothrow("bad map state")
   792  				}
   793  				k2 := k
   794  				if t.indirectkey {
   795  					k2 = *((*unsafe.Pointer)(k2))
   796  				}
   797  				// Compute hash to make our evacuation decision (whether we need
   798  				// to send this key/value to bucket x or bucket y).
   799  				hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
   800  				if h.flags&iterator != 0 {
   801  					if !alg.equal(k2, k2, uintptr(t.key.size)) {
   802  						// If key != key (NaNs), then the hash could be (and probably
   803  						// will be) entirely different from the old hash.  Moreover,
   804  						// it isn't reproducible.  Reproducibility is required in the
   805  						// presence of iterators, as our evacuation decision must
   806  						// match whatever decision the iterator made.
   807  						// Fortunately, we have the freedom to send these keys either
   808  						// way.  Also, tophash is meaningless for these kinds of keys.
   809  						// We let the low bit of tophash drive the evacuation decision.
   810  						// We recompute a new random tophash for the next level so
   811  						// these keys will get evenly distributed across all buckets
   812  						// after multiple grows.
   813  						if (top & 1) != 0 {
   814  							hash |= newbit
   815  						} else {
   816  							hash &^= newbit
   817  						}
   818  						top = uint8(hash >> (ptrSize*8 - 8))
   819  						if top < minTopHash {
   820  							top += minTopHash
   821  						}
   822  					}
   823  				}
   824  				if (hash & newbit) == 0 {
   825  					b.tophash[i] = evacuatedX
   826  					if xi == bucketCnt {
   827  						if checkgc {
   828  							memstats.next_gc = memstats.heap_alloc
   829  						}
   830  						newx := (*bmap)(newobject(t.bucket))
   831  						x.overflow = newx
   832  						x = newx
   833  						xi = 0
   834  						xk = add(unsafe.Pointer(x), dataOffset)
   835  						xv = add(xk, bucketCnt*uintptr(t.keysize))
   836  					}
   837  					x.tophash[xi] = top
   838  					if t.indirectkey {
   839  						*(*unsafe.Pointer)(xk) = k2 // copy pointer
   840  					} else {
   841  						memmove(xk, k, uintptr(t.key.size)) // copy value
   842  					}
   843  					if t.indirectvalue {
   844  						*(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
   845  					} else {
   846  						memmove(xv, v, uintptr(t.elem.size))
   847  					}
   848  					xi++
   849  					xk = add(xk, uintptr(t.keysize))
   850  					xv = add(xv, uintptr(t.valuesize))
   851  				} else {
   852  					b.tophash[i] = evacuatedY
   853  					if yi == bucketCnt {
   854  						if checkgc {
   855  							memstats.next_gc = memstats.heap_alloc
   856  						}
   857  						newy := (*bmap)(newobject(t.bucket))
   858  						y.overflow = newy
   859  						y = newy
   860  						yi = 0
   861  						yk = add(unsafe.Pointer(y), dataOffset)
   862  						yv = add(yk, bucketCnt*uintptr(t.keysize))
   863  					}
   864  					y.tophash[yi] = top
   865  					if t.indirectkey {
   866  						*(*unsafe.Pointer)(yk) = k2
   867  					} else {
   868  						memmove(yk, k, uintptr(t.key.size))
   869  					}
   870  					if t.indirectvalue {
   871  						*(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
   872  					} else {
   873  						memmove(yv, v, uintptr(t.elem.size))
   874  					}
   875  					yi++
   876  					yk = add(yk, uintptr(t.keysize))
   877  					yv = add(yv, uintptr(t.valuesize))
   878  				}
   879  			}
   880  		}
   881  		// Unlink the overflow buckets & clear key/value to help GC.
   882  		if h.flags&oldIterator == 0 {
   883  			b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   884  			b.overflow = nil
   885  			memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
   886  		}
   887  	}
   888  
   889  	// Advance evacuation mark
   890  	if oldbucket == h.nevacuate {
   891  		h.nevacuate = oldbucket + 1
   892  		if oldbucket+1 == newbit { // newbit == # of oldbuckets
   893  			// Growing is all done.  Free old main bucket array.
   894  			h.oldbuckets = nil
   895  		}
   896  	}
   897  }
   898  
   899  func ismapkey(t *_type) bool {
   900  	return goalg(t.alg).hash != nil
   901  }
   902  
   903  // Reflect stubs.  Called from ../reflect/asm_*.s
   904  
   905  func reflect_makemap(t *maptype) *hmap {
   906  	return makemap(t, 0)
   907  }
   908  
   909  func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
   910  	val, ok := mapaccess2(t, h, key)
   911  	if !ok {
   912  		// reflect wants nil for a missing element
   913  		val = nil
   914  	}
   915  	return val
   916  }
   917  
   918  func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
   919  	mapassign1(t, h, key, val)
   920  }
   921  
   922  func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
   923  	mapdelete(t, h, key)
   924  }
   925  
   926  func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
   927  	it := new(hiter)
   928  	mapiterinit(t, h, it)
   929  	return it
   930  }
   931  
   932  func reflect_mapiternext(it *hiter) {
   933  	mapiternext(it)
   934  }
   935  
   936  func reflect_mapiterkey(it *hiter) unsafe.Pointer {
   937  	return it.key
   938  }
   939  
   940  func reflect_maplen(h *hmap) int {
   941  	if h == nil {
   942  		return 0
   943  	}
   944  	if raceenabled {
   945  		callerpc := getcallerpc(unsafe.Pointer(&h))
   946  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
   947  	}
   948  	return h.count
   949  }
   950  
   951  func reflect_ismapkey(t *_type) bool {
   952  	return ismapkey(t)
   953  }