github.com/tidwall/go@v0.0.0-20170415222209-6694a6888b7d/src/runtime/hashmap_fast.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/sys"
     9  	"unsafe"
    10  )
    11  
    12  func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
    13  	if raceenabled && h != nil {
    14  		callerpc := getcallerpc(unsafe.Pointer(&t))
    15  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
    16  	}
    17  	if h == nil || h.count == 0 {
    18  		return unsafe.Pointer(&zeroVal[0])
    19  	}
    20  	if h.flags&hashWriting != 0 {
    21  		throw("concurrent map read and map write")
    22  	}
    23  	var b *bmap
    24  	if h.B == 0 {
    25  		// One-bucket table. No need to hash.
    26  		b = (*bmap)(h.buckets)
    27  	} else {
    28  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    29  		m := uintptr(1)<<h.B - 1
    30  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    31  		if c := h.oldbuckets; c != nil {
    32  			if !h.sameSizeGrow() {
    33  				// There used to be half as many buckets; mask down one more power of two.
    34  				m >>= 1
    35  			}
    36  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    37  			if !evacuated(oldb) {
    38  				b = oldb
    39  			}
    40  		}
    41  	}
    42  	for {
    43  		for i := uintptr(0); i < bucketCnt; i++ {
    44  			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
    45  			if k != key {
    46  				continue
    47  			}
    48  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
    49  			if x == empty {
    50  				continue
    51  			}
    52  			return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
    53  		}
    54  		b = b.overflow(t)
    55  		if b == nil {
    56  			return unsafe.Pointer(&zeroVal[0])
    57  		}
    58  	}
    59  }
    60  
    61  func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
    62  	if raceenabled && h != nil {
    63  		callerpc := getcallerpc(unsafe.Pointer(&t))
    64  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
    65  	}
    66  	if h == nil || h.count == 0 {
    67  		return unsafe.Pointer(&zeroVal[0]), false
    68  	}
    69  	if h.flags&hashWriting != 0 {
    70  		throw("concurrent map read and map write")
    71  	}
    72  	var b *bmap
    73  	if h.B == 0 {
    74  		// One-bucket table. No need to hash.
    75  		b = (*bmap)(h.buckets)
    76  	} else {
    77  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    78  		m := uintptr(1)<<h.B - 1
    79  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    80  		if c := h.oldbuckets; c != nil {
    81  			if !h.sameSizeGrow() {
    82  				// There used to be half as many buckets; mask down one more power of two.
    83  				m >>= 1
    84  			}
    85  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    86  			if !evacuated(oldb) {
    87  				b = oldb
    88  			}
    89  		}
    90  	}
    91  	for {
    92  		for i := uintptr(0); i < bucketCnt; i++ {
    93  			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
    94  			if k != key {
    95  				continue
    96  			}
    97  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
    98  			if x == empty {
    99  				continue
   100  			}
   101  			return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
   102  		}
   103  		b = b.overflow(t)
   104  		if b == nil {
   105  			return unsafe.Pointer(&zeroVal[0]), false
   106  		}
   107  	}
   108  }
   109  
   110  func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
   111  	if raceenabled && h != nil {
   112  		callerpc := getcallerpc(unsafe.Pointer(&t))
   113  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
   114  	}
   115  	if h == nil || h.count == 0 {
   116  		return unsafe.Pointer(&zeroVal[0])
   117  	}
   118  	if h.flags&hashWriting != 0 {
   119  		throw("concurrent map read and map write")
   120  	}
   121  	var b *bmap
   122  	if h.B == 0 {
   123  		// One-bucket table. No need to hash.
   124  		b = (*bmap)(h.buckets)
   125  	} else {
   126  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   127  		m := uintptr(1)<<h.B - 1
   128  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   129  		if c := h.oldbuckets; c != nil {
   130  			if !h.sameSizeGrow() {
   131  				// There used to be half as many buckets; mask down one more power of two.
   132  				m >>= 1
   133  			}
   134  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   135  			if !evacuated(oldb) {
   136  				b = oldb
   137  			}
   138  		}
   139  	}
   140  	for {
   141  		for i := uintptr(0); i < bucketCnt; i++ {
   142  			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   143  			if k != key {
   144  				continue
   145  			}
   146  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
   147  			if x == empty {
   148  				continue
   149  			}
   150  			return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
   151  		}
   152  		b = b.overflow(t)
   153  		if b == nil {
   154  			return unsafe.Pointer(&zeroVal[0])
   155  		}
   156  	}
   157  }
   158  
   159  func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
   160  	if raceenabled && h != nil {
   161  		callerpc := getcallerpc(unsafe.Pointer(&t))
   162  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
   163  	}
   164  	if h == nil || h.count == 0 {
   165  		return unsafe.Pointer(&zeroVal[0]), false
   166  	}
   167  	if h.flags&hashWriting != 0 {
   168  		throw("concurrent map read and map write")
   169  	}
   170  	var b *bmap
   171  	if h.B == 0 {
   172  		// One-bucket table. No need to hash.
   173  		b = (*bmap)(h.buckets)
   174  	} else {
   175  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   176  		m := uintptr(1)<<h.B - 1
   177  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   178  		if c := h.oldbuckets; c != nil {
   179  			if !h.sameSizeGrow() {
   180  				// There used to be half as many buckets; mask down one more power of two.
   181  				m >>= 1
   182  			}
   183  			oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   184  			if !evacuated(oldb) {
   185  				b = oldb
   186  			}
   187  		}
   188  	}
   189  	for {
   190  		for i := uintptr(0); i < bucketCnt; i++ {
   191  			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   192  			if k != key {
   193  				continue
   194  			}
   195  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
   196  			if x == empty {
   197  				continue
   198  			}
   199  			return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
   200  		}
   201  		b = b.overflow(t)
   202  		if b == nil {
   203  			return unsafe.Pointer(&zeroVal[0]), false
   204  		}
   205  	}
   206  }
   207  
   208  func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
   209  	if raceenabled && h != nil {
   210  		callerpc := getcallerpc(unsafe.Pointer(&t))
   211  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
   212  	}
   213  	if h == nil || h.count == 0 {
   214  		return unsafe.Pointer(&zeroVal[0])
   215  	}
   216  	if h.flags&hashWriting != 0 {
   217  		throw("concurrent map read and map write")
   218  	}
   219  	key := stringStructOf(&ky)
   220  	if h.B == 0 {
   221  		// One-bucket table.
   222  		b := (*bmap)(h.buckets)
   223  		if key.len < 32 {
   224  			// short key, doing lots of comparisons is ok
   225  			for i := uintptr(0); i < bucketCnt; i++ {
   226  				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
   227  				if x == empty {
   228  					continue
   229  				}
   230  				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   231  				if k.len != key.len {
   232  					continue
   233  				}
   234  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   235  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   236  				}
   237  			}
   238  			return unsafe.Pointer(&zeroVal[0])
   239  		}
   240  		// long key, try not to do more comparisons than necessary
   241  		keymaybe := uintptr(bucketCnt)
   242  		for i := uintptr(0); i < bucketCnt; i++ {
   243  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
   244  			if x == empty {
   245  				continue
   246  			}
   247  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   248  			if k.len != key.len {
   249  				continue
   250  			}
   251  			if k.str == key.str {
   252  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   253  			}
   254  			// check first 4 bytes
   255  			// TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
   256  			// four 1-byte comparisons.
   257  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   258  				continue
   259  			}
   260  			// check last 4 bytes
   261  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   262  				continue
   263  			}
   264  			if keymaybe != bucketCnt {
   265  				// Two keys are potential matches. Use hash to distinguish them.
   266  				goto dohash
   267  			}
   268  			keymaybe = i
   269  		}
   270  		if keymaybe != bucketCnt {
   271  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   272  			if memequal(k.str, key.str, uintptr(key.len)) {
   273  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
   274  			}
   275  		}
   276  		return unsafe.Pointer(&zeroVal[0])
   277  	}
   278  dohash:
   279  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   280  	m := uintptr(1)<<h.B - 1
   281  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   282  	if c := h.oldbuckets; c != nil {
   283  		if !h.sameSizeGrow() {
   284  			// There used to be half as many buckets; mask down one more power of two.
   285  			m >>= 1
   286  		}
   287  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   288  		if !evacuated(oldb) {
   289  			b = oldb
   290  		}
   291  	}
   292  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   293  	if top < minTopHash {
   294  		top += minTopHash
   295  	}
   296  	for {
   297  		for i := uintptr(0); i < bucketCnt; i++ {
   298  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
   299  			if x != top {
   300  				continue
   301  			}
   302  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   303  			if k.len != key.len {
   304  				continue
   305  			}
   306  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   307  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   308  			}
   309  		}
   310  		b = b.overflow(t)
   311  		if b == nil {
   312  			return unsafe.Pointer(&zeroVal[0])
   313  		}
   314  	}
   315  }
   316  
   317  func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
   318  	if raceenabled && h != nil {
   319  		callerpc := getcallerpc(unsafe.Pointer(&t))
   320  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
   321  	}
   322  	if h == nil || h.count == 0 {
   323  		return unsafe.Pointer(&zeroVal[0]), false
   324  	}
   325  	if h.flags&hashWriting != 0 {
   326  		throw("concurrent map read and map write")
   327  	}
   328  	key := stringStructOf(&ky)
   329  	if h.B == 0 {
   330  		// One-bucket table.
   331  		b := (*bmap)(h.buckets)
   332  		if key.len < 32 {
   333  			// short key, doing lots of comparisons is ok
   334  			for i := uintptr(0); i < bucketCnt; i++ {
   335  				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
   336  				if x == empty {
   337  					continue
   338  				}
   339  				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   340  				if k.len != key.len {
   341  					continue
   342  				}
   343  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   344  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   345  				}
   346  			}
   347  			return unsafe.Pointer(&zeroVal[0]), false
   348  		}
   349  		// long key, try not to do more comparisons than necessary
   350  		keymaybe := uintptr(bucketCnt)
   351  		for i := uintptr(0); i < bucketCnt; i++ {
   352  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
   353  			if x == empty {
   354  				continue
   355  			}
   356  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   357  			if k.len != key.len {
   358  				continue
   359  			}
   360  			if k.str == key.str {
   361  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   362  			}
   363  			// check first 4 bytes
   364  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   365  				continue
   366  			}
   367  			// check last 4 bytes
   368  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   369  				continue
   370  			}
   371  			if keymaybe != bucketCnt {
   372  				// Two keys are potential matches. Use hash to distinguish them.
   373  				goto dohash
   374  			}
   375  			keymaybe = i
   376  		}
   377  		if keymaybe != bucketCnt {
   378  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   379  			if memequal(k.str, key.str, uintptr(key.len)) {
   380  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
   381  			}
   382  		}
   383  		return unsafe.Pointer(&zeroVal[0]), false
   384  	}
   385  dohash:
   386  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   387  	m := uintptr(1)<<h.B - 1
   388  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   389  	if c := h.oldbuckets; c != nil {
   390  		if !h.sameSizeGrow() {
   391  			// There used to be half as many buckets; mask down one more power of two.
   392  			m >>= 1
   393  		}
   394  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   395  		if !evacuated(oldb) {
   396  			b = oldb
   397  		}
   398  	}
   399  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   400  	if top < minTopHash {
   401  		top += minTopHash
   402  	}
   403  	for {
   404  		for i := uintptr(0); i < bucketCnt; i++ {
   405  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
   406  			if x != top {
   407  				continue
   408  			}
   409  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   410  			if k.len != key.len {
   411  				continue
   412  			}
   413  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   414  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   415  			}
   416  		}
   417  		b = b.overflow(t)
   418  		if b == nil {
   419  			return unsafe.Pointer(&zeroVal[0]), false
   420  		}
   421  	}
   422  }
   423  
   424  func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
   425  	if h == nil {
   426  		panic(plainError("assignment to entry in nil map"))
   427  	}
   428  	if raceenabled {
   429  		callerpc := getcallerpc(unsafe.Pointer(&t))
   430  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
   431  	}
   432  	if h.flags&hashWriting != 0 {
   433  		throw("concurrent map writes")
   434  	}
   435  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   436  
   437  	// Set hashWriting after calling alg.hash for consistency with mapassign.
   438  	h.flags |= hashWriting
   439  
   440  	if h.buckets == nil {
   441  		h.buckets = newarray(t.bucket, 1)
   442  	}
   443  
   444  again:
   445  	bucket := hash & (uintptr(1)<<h.B - 1)
   446  	if h.growing() {
   447  		growWork(t, h, bucket)
   448  	}
   449  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   450  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   451  	if top < minTopHash {
   452  		top += minTopHash
   453  	}
   454  
   455  	var inserti *uint8
   456  	var insertk unsafe.Pointer
   457  	var val unsafe.Pointer
   458  	for {
   459  		for i := uintptr(0); i < bucketCnt; i++ {
   460  			if b.tophash[i] != top {
   461  				if b.tophash[i] == empty && inserti == nil {
   462  					inserti = &b.tophash[i]
   463  					insertk = add(unsafe.Pointer(b), dataOffset+i*4)
   464  					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
   465  				}
   466  				continue
   467  			}
   468  			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
   469  			if k != key {
   470  				continue
   471  			}
   472  			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
   473  			goto done
   474  		}
   475  		ovf := b.overflow(t)
   476  		if ovf == nil {
   477  			break
   478  		}
   479  		b = ovf
   480  	}
   481  
   482  	// Did not find mapping for key. Allocate new cell & add entry.
   483  
   484  	// If we hit the max load factor or we have too many overflow buckets,
   485  	// and we're not already in the middle of growing, start growing.
   486  	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   487  		hashGrow(t, h)
   488  		goto again // Growing the table invalidates everything, so try again
   489  	}
   490  
   491  	if inserti == nil {
   492  		// all current buckets are full, allocate a new one.
   493  		newb := (*bmap)(newobject(t.bucket))
   494  		h.setoverflow(t, b, newb)
   495  		inserti = &newb.tophash[0]
   496  		insertk = add(unsafe.Pointer(newb), dataOffset)
   497  		val = add(insertk, bucketCnt*4)
   498  	}
   499  
   500  	// store new key/value at insert position
   501  	*((*uint32)(insertk)) = key
   502  	*inserti = top
   503  	h.count++
   504  
   505  done:
   506  	if h.flags&hashWriting == 0 {
   507  		throw("concurrent map writes")
   508  	}
   509  	h.flags &^= hashWriting
   510  	return val
   511  }
   512  
   513  func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
   514  	if h == nil {
   515  		panic(plainError("assignment to entry in nil map"))
   516  	}
   517  	if raceenabled {
   518  		callerpc := getcallerpc(unsafe.Pointer(&t))
   519  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
   520  	}
   521  	if h.flags&hashWriting != 0 {
   522  		throw("concurrent map writes")
   523  	}
   524  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   525  
   526  	// Set hashWriting after calling alg.hash for consistency with mapassign.
   527  	h.flags |= hashWriting
   528  
   529  	if h.buckets == nil {
   530  		h.buckets = newarray(t.bucket, 1)
   531  	}
   532  
   533  again:
   534  	bucket := hash & (uintptr(1)<<h.B - 1)
   535  	if h.growing() {
   536  		growWork(t, h, bucket)
   537  	}
   538  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   539  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   540  	if top < minTopHash {
   541  		top += minTopHash
   542  	}
   543  
   544  	var inserti *uint8
   545  	var insertk unsafe.Pointer
   546  	var val unsafe.Pointer
   547  	for {
   548  		for i := uintptr(0); i < bucketCnt; i++ {
   549  			if b.tophash[i] != top {
   550  				if b.tophash[i] == empty && inserti == nil {
   551  					inserti = &b.tophash[i]
   552  					insertk = add(unsafe.Pointer(b), dataOffset+i*8)
   553  					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
   554  				}
   555  				continue
   556  			}
   557  			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   558  			if k != key {
   559  				continue
   560  			}
   561  			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
   562  			goto done
   563  		}
   564  		ovf := b.overflow(t)
   565  		if ovf == nil {
   566  			break
   567  		}
   568  		b = ovf
   569  	}
   570  
   571  	// Did not find mapping for key. Allocate new cell & add entry.
   572  
   573  	// If we hit the max load factor or we have too many overflow buckets,
   574  	// and we're not already in the middle of growing, start growing.
   575  	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   576  		hashGrow(t, h)
   577  		goto again // Growing the table invalidates everything, so try again
   578  	}
   579  
   580  	if inserti == nil {
   581  		// all current buckets are full, allocate a new one.
   582  		newb := (*bmap)(newobject(t.bucket))
   583  		h.setoverflow(t, b, newb)
   584  		inserti = &newb.tophash[0]
   585  		insertk = add(unsafe.Pointer(newb), dataOffset)
   586  		val = add(insertk, bucketCnt*8)
   587  	}
   588  
   589  	// store new key/value at insert position
   590  	*((*uint64)(insertk)) = key
   591  	*inserti = top
   592  	h.count++
   593  
   594  done:
   595  	if h.flags&hashWriting == 0 {
   596  		throw("concurrent map writes")
   597  	}
   598  	h.flags &^= hashWriting
   599  	return val
   600  }
   601  
   602  func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
   603  	if h == nil {
   604  		panic(plainError("assignment to entry in nil map"))
   605  	}
   606  	if raceenabled {
   607  		callerpc := getcallerpc(unsafe.Pointer(&t))
   608  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
   609  	}
   610  	if h.flags&hashWriting != 0 {
   611  		throw("concurrent map writes")
   612  	}
   613  	key := stringStructOf(&ky)
   614  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   615  
   616  	// Set hashWriting after calling alg.hash for consistency with mapassign.
   617  	h.flags |= hashWriting
   618  
   619  	if h.buckets == nil {
   620  		h.buckets = newarray(t.bucket, 1)
   621  	}
   622  
   623  again:
   624  	bucket := hash & (uintptr(1)<<h.B - 1)
   625  	if h.growing() {
   626  		growWork(t, h, bucket)
   627  	}
   628  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   629  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   630  	if top < minTopHash {
   631  		top += minTopHash
   632  	}
   633  
   634  	var inserti *uint8
   635  	var insertk unsafe.Pointer
   636  	var val unsafe.Pointer
   637  	for {
   638  		for i := uintptr(0); i < bucketCnt; i++ {
   639  			if b.tophash[i] != top {
   640  				if b.tophash[i] == empty && inserti == nil {
   641  					inserti = &b.tophash[i]
   642  					insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
   643  					val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
   644  				}
   645  				continue
   646  			}
   647  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   648  			if k.len != key.len {
   649  				continue
   650  			}
   651  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   652  				continue
   653  			}
   654  			// already have a mapping for key. Update it.
   655  			val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   656  			goto done
   657  		}
   658  		ovf := b.overflow(t)
   659  		if ovf == nil {
   660  			break
   661  		}
   662  		b = ovf
   663  	}
   664  
   665  	// Did not find mapping for key. Allocate new cell & add entry.
   666  
   667  	// If we hit the max load factor or we have too many overflow buckets,
   668  	// and we're not already in the middle of growing, start growing.
   669  	if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   670  		hashGrow(t, h)
   671  		goto again // Growing the table invalidates everything, so try again
   672  	}
   673  
   674  	if inserti == nil {
   675  		// all current buckets are full, allocate a new one.
   676  		newb := (*bmap)(newobject(t.bucket))
   677  		h.setoverflow(t, b, newb)
   678  		inserti = &newb.tophash[0]
   679  		insertk = add(unsafe.Pointer(newb), dataOffset)
   680  		val = add(insertk, bucketCnt*2*sys.PtrSize)
   681  	}
   682  
   683  	// store new key/value at insert position
   684  	*((*stringStruct)(insertk)) = *key
   685  	*inserti = top
   686  	h.count++
   687  
   688  done:
   689  	if h.flags&hashWriting == 0 {
   690  		throw("concurrent map writes")
   691  	}
   692  	h.flags &^= hashWriting
   693  	return val
   694  }
   695  
   696  func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
   697  	if raceenabled && h != nil {
   698  		callerpc := getcallerpc(unsafe.Pointer(&t))
   699  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
   700  	}
   701  	if h == nil || h.count == 0 {
   702  		return
   703  	}
   704  	if h.flags&hashWriting != 0 {
   705  		throw("concurrent map writes")
   706  	}
   707  
   708  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   709  
   710  	// Set hashWriting after calling alg.hash for consistency with mapdelete
   711  	h.flags |= hashWriting
   712  
   713  	bucket := hash & (uintptr(1)<<h.B - 1)
   714  	if h.growing() {
   715  		growWork(t, h, bucket)
   716  	}
   717  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   718  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   719  	if top < minTopHash {
   720  		top += minTopHash
   721  	}
   722  	for {
   723  		for i := uintptr(0); i < bucketCnt; i++ {
   724  			if b.tophash[i] != top {
   725  				continue
   726  			}
   727  			k := (*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))
   728  			if key != *k {
   729  				continue
   730  			}
   731  			*k = 0
   732  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*4 + i*uintptr(t.valuesize))
   733  			typedmemclr(t.elem, v)
   734  			b.tophash[i] = empty
   735  			h.count--
   736  			goto done
   737  		}
   738  		b = b.overflow(t)
   739  		if b == nil {
   740  			goto done
   741  		}
   742  	}
   743  
   744  done:
   745  	if h.flags&hashWriting == 0 {
   746  		throw("concurrent map writes")
   747  	}
   748  	h.flags &^= hashWriting
   749  }
   750  
   751  func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
   752  	if raceenabled && h != nil {
   753  		callerpc := getcallerpc(unsafe.Pointer(&t))
   754  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
   755  	}
   756  	if h == nil || h.count == 0 {
   757  		return
   758  	}
   759  	if h.flags&hashWriting != 0 {
   760  		throw("concurrent map writes")
   761  	}
   762  
   763  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   764  
   765  	// Set hashWriting after calling alg.hash for consistency with mapdelete
   766  	h.flags |= hashWriting
   767  
   768  	bucket := hash & (uintptr(1)<<h.B - 1)
   769  	if h.growing() {
   770  		growWork(t, h, bucket)
   771  	}
   772  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   773  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   774  	if top < minTopHash {
   775  		top += minTopHash
   776  	}
   777  	for {
   778  		for i := uintptr(0); i < bucketCnt; i++ {
   779  			if b.tophash[i] != top {
   780  				continue
   781  			}
   782  			k := (*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))
   783  			if key != *k {
   784  				continue
   785  			}
   786  			*k = 0
   787  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*8 + i*uintptr(t.valuesize))
   788  			typedmemclr(t.elem, v)
   789  			b.tophash[i] = empty
   790  			h.count--
   791  			goto done
   792  		}
   793  		b = b.overflow(t)
   794  		if b == nil {
   795  			goto done
   796  		}
   797  	}
   798  
   799  done:
   800  	if h.flags&hashWriting == 0 {
   801  		throw("concurrent map writes")
   802  	}
   803  	h.flags &^= hashWriting
   804  }
   805  
   806  func mapdelete_faststr(t *maptype, h *hmap, ky string) {
   807  	if raceenabled && h != nil {
   808  		callerpc := getcallerpc(unsafe.Pointer(&t))
   809  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
   810  	}
   811  	if h == nil || h.count == 0 {
   812  		return
   813  	}
   814  	if h.flags&hashWriting != 0 {
   815  		throw("concurrent map writes")
   816  	}
   817  
   818  	key := stringStructOf(&ky)
   819  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   820  
   821  	// Set hashWriting after calling alg.hash for consistency with mapdelete
   822  	h.flags |= hashWriting
   823  
   824  	bucket := hash & (uintptr(1)<<h.B - 1)
   825  	if h.growing() {
   826  		growWork(t, h, bucket)
   827  	}
   828  	b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
   829  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   830  	if top < minTopHash {
   831  		top += minTopHash
   832  	}
   833  	for {
   834  		for i := uintptr(0); i < bucketCnt; i++ {
   835  			if b.tophash[i] != top {
   836  				continue
   837  			}
   838  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   839  			if k.len != key.len {
   840  				continue
   841  			}
   842  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   843  				continue
   844  			}
   845  			typedmemclr(t.key, unsafe.Pointer(k))
   846  			v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*2*sys.PtrSize + i*uintptr(t.valuesize))
   847  			typedmemclr(t.elem, v)
   848  			b.tophash[i] = empty
   849  			h.count--
   850  			goto done
   851  		}
   852  		b = b.overflow(t)
   853  		if b == nil {
   854  			goto done
   855  		}
   856  	}
   857  
   858  done:
   859  	if h.flags&hashWriting == 0 {
   860  		throw("concurrent map writes")
   861  	}
   862  	h.flags &^= hashWriting
   863  }