github.com/aloncn/graphics-go@v0.0.1/src/runtime/hashmap_fast.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
    14  	if raceenabled && h != nil {
    15  		callerpc := getcallerpc(unsafe.Pointer(&t))
    16  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
    17  	}
    18  	if h == nil || h.count == 0 {
    19  		return atomic.Loadp(unsafe.Pointer(&zeroptr))
    20  	}
    21  	if h.flags&hashWriting != 0 {
    22  		throw("concurrent map read and map write")
    23  	}
    24  	var b *bmap
    25  	if h.B == 0 {
    26  		// One-bucket table.  No need to hash.
    27  		b = (*bmap)(h.buckets)
    28  	} else {
    29  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    30  		m := uintptr(1)<<h.B - 1
    31  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    32  		if c := h.oldbuckets; c != nil {
    33  			oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
    34  			if !evacuated(oldb) {
    35  				b = oldb
    36  			}
    37  		}
    38  	}
    39  	for {
    40  		for i := uintptr(0); i < bucketCnt; i++ {
    41  			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
    42  			if k != key {
    43  				continue
    44  			}
    45  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
    46  			if x == empty {
    47  				continue
    48  			}
    49  			return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
    50  		}
    51  		b = b.overflow(t)
    52  		if b == nil {
    53  			return atomic.Loadp(unsafe.Pointer(&zeroptr))
    54  		}
    55  	}
    56  }
    57  
    58  func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
    59  	if raceenabled && h != nil {
    60  		callerpc := getcallerpc(unsafe.Pointer(&t))
    61  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
    62  	}
    63  	if h == nil || h.count == 0 {
    64  		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
    65  	}
    66  	if h.flags&hashWriting != 0 {
    67  		throw("concurrent map read and map write")
    68  	}
    69  	var b *bmap
    70  	if h.B == 0 {
    71  		// One-bucket table.  No need to hash.
    72  		b = (*bmap)(h.buckets)
    73  	} else {
    74  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
    75  		m := uintptr(1)<<h.B - 1
    76  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
    77  		if c := h.oldbuckets; c != nil {
    78  			oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
    79  			if !evacuated(oldb) {
    80  				b = oldb
    81  			}
    82  		}
    83  	}
    84  	for {
    85  		for i := uintptr(0); i < bucketCnt; i++ {
    86  			k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
    87  			if k != key {
    88  				continue
    89  			}
    90  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
    91  			if x == empty {
    92  				continue
    93  			}
    94  			return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
    95  		}
    96  		b = b.overflow(t)
    97  		if b == nil {
    98  			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
    99  		}
   100  	}
   101  }
   102  
   103  func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
   104  	if raceenabled && h != nil {
   105  		callerpc := getcallerpc(unsafe.Pointer(&t))
   106  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
   107  	}
   108  	if h == nil || h.count == 0 {
   109  		return atomic.Loadp(unsafe.Pointer(&zeroptr))
   110  	}
   111  	if h.flags&hashWriting != 0 {
   112  		throw("concurrent map read and map write")
   113  	}
   114  	var b *bmap
   115  	if h.B == 0 {
   116  		// One-bucket table.  No need to hash.
   117  		b = (*bmap)(h.buckets)
   118  	} else {
   119  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   120  		m := uintptr(1)<<h.B - 1
   121  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   122  		if c := h.oldbuckets; c != nil {
   123  			oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   124  			if !evacuated(oldb) {
   125  				b = oldb
   126  			}
   127  		}
   128  	}
   129  	for {
   130  		for i := uintptr(0); i < bucketCnt; i++ {
   131  			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   132  			if k != key {
   133  				continue
   134  			}
   135  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   136  			if x == empty {
   137  				continue
   138  			}
   139  			return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
   140  		}
   141  		b = b.overflow(t)
   142  		if b == nil {
   143  			return atomic.Loadp(unsafe.Pointer(&zeroptr))
   144  		}
   145  	}
   146  }
   147  
   148  func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
   149  	if raceenabled && h != nil {
   150  		callerpc := getcallerpc(unsafe.Pointer(&t))
   151  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
   152  	}
   153  	if h == nil || h.count == 0 {
   154  		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   155  	}
   156  	if h.flags&hashWriting != 0 {
   157  		throw("concurrent map read and map write")
   158  	}
   159  	var b *bmap
   160  	if h.B == 0 {
   161  		// One-bucket table.  No need to hash.
   162  		b = (*bmap)(h.buckets)
   163  	} else {
   164  		hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
   165  		m := uintptr(1)<<h.B - 1
   166  		b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   167  		if c := h.oldbuckets; c != nil {
   168  			oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   169  			if !evacuated(oldb) {
   170  				b = oldb
   171  			}
   172  		}
   173  	}
   174  	for {
   175  		for i := uintptr(0); i < bucketCnt; i++ {
   176  			k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
   177  			if k != key {
   178  				continue
   179  			}
   180  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   181  			if x == empty {
   182  				continue
   183  			}
   184  			return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
   185  		}
   186  		b = b.overflow(t)
   187  		if b == nil {
   188  			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   189  		}
   190  	}
   191  }
   192  
   193  func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
   194  	if raceenabled && h != nil {
   195  		callerpc := getcallerpc(unsafe.Pointer(&t))
   196  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
   197  	}
   198  	if h == nil || h.count == 0 {
   199  		return atomic.Loadp(unsafe.Pointer(&zeroptr))
   200  	}
   201  	if h.flags&hashWriting != 0 {
   202  		throw("concurrent map read and map write")
   203  	}
   204  	key := stringStructOf(&ky)
   205  	if h.B == 0 {
   206  		// One-bucket table.
   207  		b := (*bmap)(h.buckets)
   208  		if key.len < 32 {
   209  			// short key, doing lots of comparisons is ok
   210  			for i := uintptr(0); i < bucketCnt; i++ {
   211  				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   212  				if x == empty {
   213  					continue
   214  				}
   215  				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   216  				if k.len != key.len {
   217  					continue
   218  				}
   219  				if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
   220  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   221  				}
   222  			}
   223  			return atomic.Loadp(unsafe.Pointer(&zeroptr))
   224  		}
   225  		// long key, try not to do more comparisons than necessary
   226  		keymaybe := uintptr(bucketCnt)
   227  		for i := uintptr(0); i < bucketCnt; i++ {
   228  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   229  			if x == empty {
   230  				continue
   231  			}
   232  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   233  			if k.len != key.len {
   234  				continue
   235  			}
   236  			if k.str == key.str {
   237  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   238  			}
   239  			// check first 4 bytes
   240  			// TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
   241  			// four 1-byte comparisons.
   242  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   243  				continue
   244  			}
   245  			// check last 4 bytes
   246  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   247  				continue
   248  			}
   249  			if keymaybe != bucketCnt {
   250  				// Two keys are potential matches.  Use hash to distinguish them.
   251  				goto dohash
   252  			}
   253  			keymaybe = i
   254  		}
   255  		if keymaybe != bucketCnt {
   256  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   257  			if memeq(k.str, key.str, uintptr(key.len)) {
   258  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
   259  			}
   260  		}
   261  		return atomic.Loadp(unsafe.Pointer(&zeroptr))
   262  	}
   263  dohash:
   264  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   265  	m := uintptr(1)<<h.B - 1
   266  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   267  	if c := h.oldbuckets; c != nil {
   268  		oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   269  		if !evacuated(oldb) {
   270  			b = oldb
   271  		}
   272  	}
   273  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   274  	if top < minTopHash {
   275  		top += minTopHash
   276  	}
   277  	for {
   278  		for i := uintptr(0); i < bucketCnt; i++ {
   279  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   280  			if x != top {
   281  				continue
   282  			}
   283  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   284  			if k.len != key.len {
   285  				continue
   286  			}
   287  			if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
   288  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
   289  			}
   290  		}
   291  		b = b.overflow(t)
   292  		if b == nil {
   293  			return atomic.Loadp(unsafe.Pointer(&zeroptr))
   294  		}
   295  	}
   296  }
   297  
   298  func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
   299  	if raceenabled && h != nil {
   300  		callerpc := getcallerpc(unsafe.Pointer(&t))
   301  		racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
   302  	}
   303  	if h == nil || h.count == 0 {
   304  		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   305  	}
   306  	if h.flags&hashWriting != 0 {
   307  		throw("concurrent map read and map write")
   308  	}
   309  	key := stringStructOf(&ky)
   310  	if h.B == 0 {
   311  		// One-bucket table.
   312  		b := (*bmap)(h.buckets)
   313  		if key.len < 32 {
   314  			// short key, doing lots of comparisons is ok
   315  			for i := uintptr(0); i < bucketCnt; i++ {
   316  				x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   317  				if x == empty {
   318  					continue
   319  				}
   320  				k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   321  				if k.len != key.len {
   322  					continue
   323  				}
   324  				if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
   325  					return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   326  				}
   327  			}
   328  			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   329  		}
   330  		// long key, try not to do more comparisons than necessary
   331  		keymaybe := uintptr(bucketCnt)
   332  		for i := uintptr(0); i < bucketCnt; i++ {
   333  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   334  			if x == empty {
   335  				continue
   336  			}
   337  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   338  			if k.len != key.len {
   339  				continue
   340  			}
   341  			if k.str == key.str {
   342  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   343  			}
   344  			// check first 4 bytes
   345  			if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
   346  				continue
   347  			}
   348  			// check last 4 bytes
   349  			if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
   350  				continue
   351  			}
   352  			if keymaybe != bucketCnt {
   353  				// Two keys are potential matches.  Use hash to distinguish them.
   354  				goto dohash
   355  			}
   356  			keymaybe = i
   357  		}
   358  		if keymaybe != bucketCnt {
   359  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
   360  			if memeq(k.str, key.str, uintptr(key.len)) {
   361  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
   362  			}
   363  		}
   364  		return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   365  	}
   366  dohash:
   367  	hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
   368  	m := uintptr(1)<<h.B - 1
   369  	b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
   370  	if c := h.oldbuckets; c != nil {
   371  		oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
   372  		if !evacuated(oldb) {
   373  			b = oldb
   374  		}
   375  	}
   376  	top := uint8(hash >> (sys.PtrSize*8 - 8))
   377  	if top < minTopHash {
   378  		top += minTopHash
   379  	}
   380  	for {
   381  		for i := uintptr(0); i < bucketCnt; i++ {
   382  			x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
   383  			if x != top {
   384  				continue
   385  			}
   386  			k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
   387  			if k.len != key.len {
   388  				continue
   389  			}
   390  			if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
   391  				return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
   392  			}
   393  		}
   394  		b = b.overflow(t)
   395  		if b == nil {
   396  			return atomic.Loadp(unsafe.Pointer(&zeroptr)), false
   397  		}
   398  	}
   399  }