github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/map_faststr.go (about)

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/sys"
     9  	"unsafe"
    10  )
    11  
    12  func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
    13  	if raceenabled && h != nil {
    14  		callerpc := getcallerpc()
    15  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
    16  	}
    17  	if h == nil || h.count == 0 {
    18  		return unsafe.Pointer(&zeroVal[0])
    19  	}
    20  	if h.flags&hashWriting != 0 {
    21  		throw("concurrent map read and map write")
    22  	}
    23  	key := stringStructOf(&ky)
    24  	if h.B == 0 {
    25  		// One-bucket table.
    26  		b := (*bmap)(h.buckets)
    27  		if key.len < 32 {
    28  			// short key, doing lots of comparisons is ok
    29  			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
    30  				k := (*stringStruct)(kptr)
    31  				if k.len != key.len || isEmpty(b.tophash[i]) {
    32  					if b.tophash[i] == emptyRest {
    33  						break
    34  					}
    35  					continue
    36  				}
    37  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
    38  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
    39  				}
    40  			}
    41  			return unsafe.Pointer(&zeroVal[0])
    42  		}
    43  		// long key, try not to do more comparisons than necessary
    44  		keymaybe := uintptr(bucketCnt)
    45  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
    46  			k := (*stringStruct)(kptr)
    47  			if k.len != key.len || isEmpty(b.tophash[i]) {
    48  				if b.tophash[i] == emptyRest {
    49  					break
    50  				}
    51  				continue
    52  			}
    53  			if k.str == key.str {
    54  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
    55  			}
    56  			// check first 4 bytes
    57  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
    58  				continue
    59  			}
    60  			// check last 4 bytes
    61  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
    62  				continue
    63  			}
    64  			if keymaybe != bucketCnt {
    65  				// Two keys are potential matches. Use hash to distinguish them.
    66  				goto dohash
    67  			}
    68  			keymaybe = i
    69  		}
    70  		if keymaybe != bucketCnt {
    71  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
    72  			if memequal(k.str, key.str, uintptr(key.len)) {
    73  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize))
    74  			}
    75  		}
    76  		return unsafe.Pointer(&zeroVal[0])
    77  	}
    78  dohash:
    79  	hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
    80  	m := bucketMask(h.B)
    81  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    82  	if c := h.oldbuckets; c != nil {
    83  		if !h.sameSizeGrow() {
    84  			// There used to be half as many buckets; mask down one more power of two.
    85  			m >>= 1
    86  		}
    87  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
    88  		if !evacuated(oldb) {
    89  			b = oldb
    90  		}
    91  	}
    92  	top := tophash(hash)
    93  	for ; b != nil; b = b.overflow(t) {
    94  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
    95  			k := (*stringStruct)(kptr)
    96  			if k.len != key.len || b.tophash[i] != top {
    97  				continue
    98  			}
    99  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   100  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
   101  			}
   102  		}
   103  	}
   104  	return unsafe.Pointer(&zeroVal[0])
   105  }
   106  
   107  func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
   108  	if raceenabled && h != nil {
   109  		callerpc := getcallerpc()
   110  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
   111  	}
   112  	if h == nil || h.count == 0 {
   113  		return unsafe.Pointer(&zeroVal[0]), false
   114  	}
   115  	if h.flags&hashWriting != 0 {
   116  		throw("concurrent map read and map write")
   117  	}
   118  	key := stringStructOf(&ky)
   119  	if h.B == 0 {
   120  		// One-bucket table.
   121  		b := (*bmap)(h.buckets)
   122  		if key.len < 32 {
   123  			// short key, doing lots of comparisons is ok
   124  			for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   125  				k := (*stringStruct)(kptr)
   126  				if k.len != key.len || isEmpty(b.tophash[i]) {
   127  					if b.tophash[i] == emptyRest {
   128  						break
   129  					}
   130  					continue
   131  				}
   132  				if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   133  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
   134  				}
   135  			}
   136  			return unsafe.Pointer(&zeroVal[0]), false
   137  		}
   138  		// long key, try not to do more comparisons than necessary
   139  		keymaybe := uintptr(bucketCnt)
   140  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   141  			k := (*stringStruct)(kptr)
   142  			if k.len != key.len || isEmpty(b.tophash[i]) {
   143  				if b.tophash[i] == emptyRest {
   144  					break
   145  				}
   146  				continue
   147  			}
   148  			if k.str == key.str {
   149  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
   150  			}
   151  			// check first 4 bytes
   152  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   153  				continue
   154  			}
   155  			// check last 4 bytes
   156  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   157  				continue
   158  			}
   159  			if keymaybe != bucketCnt {
   160  				// Two keys are potential matches. Use hash to distinguish them.
   161  				goto dohash
   162  			}
   163  			keymaybe = i
   164  		}
   165  		if keymaybe != bucketCnt {
   166  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   167  			if memequal(k.str, key.str, uintptr(key.len)) {
   168  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.elemsize)), true
   169  			}
   170  		}
   171  		return unsafe.Pointer(&zeroVal[0]), false
   172  	}
   173  dohash:
   174  	hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   175  	m := bucketMask(h.B)
   176  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   177  	if c := h.oldbuckets; c != nil {
   178  		if !h.sameSizeGrow() {
   179  			// There used to be half as many buckets; mask down one more power of two.
   180  			m >>= 1
   181  		}
   182  		oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
   183  		if !evacuated(oldb) {
   184  			b = oldb
   185  		}
   186  	}
   187  	top := tophash(hash)
   188  	for ; b != nil; b = b.overflow(t) {
   189  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   190  			k := (*stringStruct)(kptr)
   191  			if k.len != key.len || b.tophash[i] != top {
   192  				continue
   193  			}
   194  			if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
   195  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize)), true
   196  			}
   197  		}
   198  	}
   199  	return unsafe.Pointer(&zeroVal[0]), false
   200  }
   201  
   202  func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
   203  	if h == nil {
   204  		panic(plainError("assignment to entry in nil map"))
   205  	}
   206  	if raceenabled {
   207  		callerpc := getcallerpc()
   208  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
   209  	}
   210  	if h.flags&hashWriting != 0 {
   211  		throw("concurrent map writes")
   212  	}
   213  	key := stringStructOf(&s)
   214  	hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
   215  
   216  	// Set hashWriting after calling t.hasher for consistency with mapassign.
   217  	h.flags ^= hashWriting
   218  
   219  	if h.buckets == nil {
   220  		h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
   221  	}
   222  
   223  again:
   224  	bucket := hash & bucketMask(h.B)
   225  	if h.growing() {
   226  		growWork_faststr(t, h, bucket)
   227  	}
   228  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   229  	top := tophash(hash)
   230  
   231  	var insertb *bmap
   232  	var inserti uintptr
   233  	var insertk unsafe.Pointer
   234  
   235  bucketloop:
   236  	for {
   237  		for i := uintptr(0); i < bucketCnt; i++ {
   238  			if b.tophash[i] != top {
   239  				if isEmpty(b.tophash[i]) && insertb == nil {
   240  					insertb = b
   241  					inserti = i
   242  				}
   243  				if b.tophash[i] == emptyRest {
   244  					break bucketloop
   245  				}
   246  				continue
   247  			}
   248  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   249  			if k.len != key.len {
   250  				continue
   251  			}
   252  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   253  				continue
   254  			}
   255  			// already have a mapping for key. Update it.
   256  			inserti = i
   257  			insertb = b
   258  			goto done
   259  		}
   260  		ovf := b.overflow(t)
   261  		if ovf == nil {
   262  			break
   263  		}
   264  		b = ovf
   265  	}
   266  
   267  	// Did not find mapping for key. Allocate new cell & add entry.
   268  
   269  	// If we hit the max load factor or we have too many overflow buckets,
   270  	// and we're not already in the middle of growing, start growing.
   271  	if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
   272  		hashGrow(t, h)
   273  		goto again // Growing the table invalidates everything, so try again
   274  	}
   275  
   276  	if insertb == nil {
   277  		// The current bucket and all the overflow buckets connected to it are full, allocate a new one.
   278  		insertb = h.newoverflow(t, b)
   279  		inserti = 0 // not necessary, but avoids needlessly spilling inserti
   280  	}
   281  	insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
   282  
   283  	insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*sys.PtrSize)
   284  	// store new key at insert position
   285  	*((*stringStruct)(insertk)) = *key
   286  	h.count++
   287  
   288  done:
   289  	elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*sys.PtrSize+inserti*uintptr(t.elemsize))
   290  	if h.flags&hashWriting == 0 {
   291  		throw("concurrent map writes")
   292  	}
   293  	h.flags &^= hashWriting
   294  	return elem
   295  }
   296  
   297  func mapdelete_faststr(t *maptype, h *hmap, ky string) {
   298  	if raceenabled && h != nil {
   299  		callerpc := getcallerpc()
   300  		racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
   301  	}
   302  	if h == nil || h.count == 0 {
   303  		return
   304  	}
   305  	if h.flags&hashWriting != 0 {
   306  		throw("concurrent map writes")
   307  	}
   308  
   309  	key := stringStructOf(&ky)
   310  	hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   311  
   312  	// Set hashWriting after calling t.hasher for consistency with mapdelete
   313  	h.flags ^= hashWriting
   314  
   315  	bucket := hash & bucketMask(h.B)
   316  	if h.growing() {
   317  		growWork_faststr(t, h, bucket)
   318  	}
   319  	b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
   320  	bOrig := b
   321  	top := tophash(hash)
   322  search:
   323  	for ; b != nil; b = b.overflow(t) {
   324  		for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) {
   325  			k := (*stringStruct)(kptr)
   326  			if k.len != key.len || b.tophash[i] != top {
   327  				continue
   328  			}
   329  			if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
   330  				continue
   331  			}
   332  			// Clear key's pointer.
   333  			k.str = nil
   334  			e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.elemsize))
   335  			if t.elem.ptrdata != 0 {
   336  				memclrHasPointers(e, t.elem.size)
   337  			} else {
   338  				memclrNoHeapPointers(e, t.elem.size)
   339  			}
   340  			b.tophash[i] = emptyOne
   341  			// If the bucket now ends in a bunch of emptyOne states,
   342  			// change those to emptyRest states.
   343  			if i == bucketCnt-1 {
   344  				if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest {
   345  					goto notLast
   346  				}
   347  			} else {
   348  				if b.tophash[i+1] != emptyRest {
   349  					goto notLast
   350  				}
   351  			}
   352  			for {
   353  				b.tophash[i] = emptyRest
   354  				if i == 0 {
   355  					if b == bOrig {
   356  						break // beginning of initial bucket, we're done.
   357  					}
   358  					// Find previous bucket, continue at its last entry.
   359  					c := b
   360  					for b = bOrig; b.overflow(t) != c; b = b.overflow(t) {
   361  					}
   362  					i = bucketCnt - 1
   363  				} else {
   364  					i--
   365  				}
   366  				if b.tophash[i] != emptyOne {
   367  					break
   368  				}
   369  			}
   370  		notLast:
   371  			h.count--
   372  			// Reset the hash seed to make it more difficult for attackers to
   373  			// repeatedly trigger hash collisions. See issue 25237.
   374  			if h.count == 0 {
   375  				h.hash0 = fastrand()
   376  			}
   377  			break search
   378  		}
   379  	}
   380  
   381  	if h.flags&hashWriting == 0 {
   382  		throw("concurrent map writes")
   383  	}
   384  	h.flags &^= hashWriting
   385  }
   386  
   387  func growWork_faststr(t *maptype, h *hmap, bucket uintptr) {
   388  	// make sure we evacuate the oldbucket corresponding
   389  	// to the bucket we're about to use
   390  	evacuate_faststr(t, h, bucket&h.oldbucketmask())
   391  
   392  	// evacuate one more oldbucket to make progress on growing
   393  	if h.growing() {
   394  		evacuate_faststr(t, h, h.nevacuate)
   395  	}
   396  }
   397  
   398  func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
   399  	b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
   400  	newbit := h.noldbuckets()
   401  	if !evacuated(b) {
   402  		// TODO: reuse overflow buckets instead of using new ones, if there
   403  		// is no iterator using the old buckets.  (If !oldIterator.)
   404  
   405  		// xy contains the x and y (low and high) evacuation destinations.
   406  		var xy [2]evacDst
   407  		x := &xy[0]
   408  		x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
   409  		x.k = add(unsafe.Pointer(x.b), dataOffset)
   410  		x.e = add(x.k, bucketCnt*2*sys.PtrSize)
   411  
   412  		if !h.sameSizeGrow() {
   413  			// Only calculate y pointers if we're growing bigger.
   414  			// Otherwise GC can see bad pointers.
   415  			y := &xy[1]
   416  			y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
   417  			y.k = add(unsafe.Pointer(y.b), dataOffset)
   418  			y.e = add(y.k, bucketCnt*2*sys.PtrSize)
   419  		}
   420  
   421  		for ; b != nil; b = b.overflow(t) {
   422  			k := add(unsafe.Pointer(b), dataOffset)
   423  			e := add(k, bucketCnt*2*sys.PtrSize)
   424  			for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*sys.PtrSize), add(e, uintptr(t.elemsize)) {
   425  				top := b.tophash[i]
   426  				if isEmpty(top) {
   427  					b.tophash[i] = evacuatedEmpty
   428  					continue
   429  				}
   430  				if top < minTopHash {
   431  					throw("bad map state")
   432  				}
   433  				var useY uint8
   434  				if !h.sameSizeGrow() {
   435  					// Compute hash to make our evacuation decision (whether we need
   436  					// to send this key/elem to bucket x or bucket y).
   437  					hash := t.hasher(k, uintptr(h.hash0))
   438  					if hash&newbit != 0 {
   439  						useY = 1
   440  					}
   441  				}
   442  
   443  				b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap
   444  				dst := &xy[useY]                 // evacuation destination
   445  
   446  				if dst.i == bucketCnt {
   447  					dst.b = h.newoverflow(t, dst.b)
   448  					dst.i = 0
   449  					dst.k = add(unsafe.Pointer(dst.b), dataOffset)
   450  					dst.e = add(dst.k, bucketCnt*2*sys.PtrSize)
   451  				}
   452  				dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
   453  
   454  				// Copy key.
   455  				*(*string)(dst.k) = *(*string)(k)
   456  
   457  				typedmemmove(t.elem, dst.e, e)
   458  				dst.i++
   459  				// These updates might push these pointers past the end of the
   460  				// key or elem arrays.  That's ok, as we have the overflow pointer
   461  				// at the end of the bucket to protect against pointing past the
   462  				// end of the bucket.
   463  				dst.k = add(dst.k, 2*sys.PtrSize)
   464  				dst.e = add(dst.e, uintptr(t.elemsize))
   465  			}
   466  		}
   467  		// Unlink the overflow buckets & clear key/elem to help GC.
   468  		if h.flags&oldIterator == 0 && t.bucket.ptrdata != 0 {
   469  			b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))
   470  			// Preserve b.tophash because the evacuation
   471  			// state is maintained there.
   472  			ptr := add(b, dataOffset)
   473  			n := uintptr(t.bucketsize) - dataOffset
   474  			memclrHasPointers(ptr, n)
   475  		}
   476  	}
   477  
   478  	if oldbucket == h.nevacuate {
   479  		advanceEvacuationMark(h, t, newbit)
   480  	}
   481  }