github.com/slayercat/go@v0.0.0-20170428012452-c51559813f61/src/runtime/hashmap_fast.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/sys" 9 "unsafe" 10 ) 11 12 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { 13 if raceenabled && h != nil { 14 callerpc := getcallerpc(unsafe.Pointer(&t)) 15 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) 16 } 17 if h == nil || h.count == 0 { 18 return unsafe.Pointer(&zeroVal[0]) 19 } 20 if h.flags&hashWriting != 0 { 21 throw("concurrent map read and map write") 22 } 23 var b *bmap 24 if h.B == 0 { 25 // One-bucket table. No need to hash. 26 b = (*bmap)(h.buckets) 27 } else { 28 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 29 m := uintptr(1)<<h.B - 1 30 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 31 if c := h.oldbuckets; c != nil { 32 if !h.sameSizeGrow() { 33 // There used to be half as many buckets; mask down one more power of two. 34 m >>= 1 35 } 36 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 37 if !evacuated(oldb) { 38 b = oldb 39 } 40 } 41 } 42 for { 43 for i := uintptr(0); i < bucketCnt; i++ { 44 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) 45 if k != key { 46 continue 47 } 48 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 49 if x == empty { 50 continue 51 } 52 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 53 } 54 b = b.overflow(t) 55 if b == nil { 56 return unsafe.Pointer(&zeroVal[0]) 57 } 58 } 59 } 60 61 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { 62 if raceenabled && h != nil { 63 callerpc := getcallerpc(unsafe.Pointer(&t)) 64 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) 65 } 66 if h == nil || h.count == 0 { 67 return unsafe.Pointer(&zeroVal[0]), false 68 } 69 if h.flags&hashWriting != 0 { 70 throw("concurrent map read and map write") 71 } 72 var b *bmap 73 if h.B == 0 { 74 // One-bucket table. No need to hash. 75 b = (*bmap)(h.buckets) 76 } else { 77 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 78 m := uintptr(1)<<h.B - 1 79 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 80 if c := h.oldbuckets; c != nil { 81 if !h.sameSizeGrow() { 82 // There used to be half as many buckets; mask down one more power of two. 83 m >>= 1 84 } 85 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 86 if !evacuated(oldb) { 87 b = oldb 88 } 89 } 90 } 91 for { 92 for i := uintptr(0); i < bucketCnt; i++ { 93 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) 94 if k != key { 95 continue 96 } 97 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 98 if x == empty { 99 continue 100 } 101 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true 102 } 103 b = b.overflow(t) 104 if b == nil { 105 return unsafe.Pointer(&zeroVal[0]), false 106 } 107 } 108 } 109 110 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { 111 if raceenabled && h != nil { 112 callerpc := getcallerpc(unsafe.Pointer(&t)) 113 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) 114 } 115 if h == nil || h.count == 0 { 116 return unsafe.Pointer(&zeroVal[0]) 117 } 118 if h.flags&hashWriting != 0 { 119 throw("concurrent map read and map write") 120 } 121 var b *bmap 122 if h.B == 0 { 123 // One-bucket table. No need to hash. 124 b = (*bmap)(h.buckets) 125 } else { 126 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 127 m := uintptr(1)<<h.B - 1 128 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 129 if c := h.oldbuckets; c != nil { 130 if !h.sameSizeGrow() { 131 // There used to be half as many buckets; mask down one more power of two. 132 m >>= 1 133 } 134 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 135 if !evacuated(oldb) { 136 b = oldb 137 } 138 } 139 } 140 for { 141 for i := uintptr(0); i < bucketCnt; i++ { 142 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) 143 if k != key { 144 continue 145 } 146 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 147 if x == empty { 148 continue 149 } 150 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 151 } 152 b = b.overflow(t) 153 if b == nil { 154 return unsafe.Pointer(&zeroVal[0]) 155 } 156 } 157 } 158 159 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { 160 if raceenabled && h != nil { 161 callerpc := getcallerpc(unsafe.Pointer(&t)) 162 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) 163 } 164 if h == nil || h.count == 0 { 165 return unsafe.Pointer(&zeroVal[0]), false 166 } 167 if h.flags&hashWriting != 0 { 168 throw("concurrent map read and map write") 169 } 170 var b *bmap 171 if h.B == 0 { 172 // One-bucket table. No need to hash. 173 b = (*bmap)(h.buckets) 174 } else { 175 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 176 m := uintptr(1)<<h.B - 1 177 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 178 if c := h.oldbuckets; c != nil { 179 if !h.sameSizeGrow() { 180 // There used to be half as many buckets; mask down one more power of two. 181 m >>= 1 182 } 183 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 184 if !evacuated(oldb) { 185 b = oldb 186 } 187 } 188 } 189 for { 190 for i := uintptr(0); i < bucketCnt; i++ { 191 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) 192 if k != key { 193 continue 194 } 195 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 196 if x == empty { 197 continue 198 } 199 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true 200 } 201 b = b.overflow(t) 202 if b == nil { 203 return unsafe.Pointer(&zeroVal[0]), false 204 } 205 } 206 } 207 208 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { 209 if raceenabled && h != nil { 210 callerpc := getcallerpc(unsafe.Pointer(&t)) 211 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) 212 } 213 if h == nil || h.count == 0 { 214 return unsafe.Pointer(&zeroVal[0]) 215 } 216 if h.flags&hashWriting != 0 { 217 throw("concurrent map read and map write") 218 } 219 key := stringStructOf(&ky) 220 if h.B == 0 { 221 // One-bucket table. 222 b := (*bmap)(h.buckets) 223 if key.len < 32 { 224 // short key, doing lots of comparisons is ok 225 for i := uintptr(0); i < bucketCnt; i++ { 226 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 227 if x == empty { 228 continue 229 } 230 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 231 if k.len != key.len { 232 continue 233 } 234 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 235 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 236 } 237 } 238 return unsafe.Pointer(&zeroVal[0]) 239 } 240 // long key, try not to do more comparisons than necessary 241 keymaybe := uintptr(bucketCnt) 242 for i := uintptr(0); i < bucketCnt; i++ { 243 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 244 if x == empty { 245 continue 246 } 247 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 248 if k.len != key.len { 249 continue 250 } 251 if k.str == key.str { 252 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 253 } 254 // check first 4 bytes 255 // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of 256 // four 1-byte comparisons. 257 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 258 continue 259 } 260 // check last 4 bytes 261 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 262 continue 263 } 264 if keymaybe != bucketCnt { 265 // Two keys are potential matches. Use hash to distinguish them. 266 goto dohash 267 } 268 keymaybe = i 269 } 270 if keymaybe != bucketCnt { 271 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize)) 272 if memequal(k.str, key.str, uintptr(key.len)) { 273 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)) 274 } 275 } 276 return unsafe.Pointer(&zeroVal[0]) 277 } 278 dohash: 279 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 280 m := uintptr(1)<<h.B - 1 281 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 282 if c := h.oldbuckets; c != nil { 283 if !h.sameSizeGrow() { 284 // There used to be half as many buckets; mask down one more power of two. 285 m >>= 1 286 } 287 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 288 if !evacuated(oldb) { 289 b = oldb 290 } 291 } 292 top := uint8(hash >> (sys.PtrSize*8 - 8)) 293 if top < minTopHash { 294 top += minTopHash 295 } 296 for { 297 for i := uintptr(0); i < bucketCnt; i++ { 298 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 299 if x != top { 300 continue 301 } 302 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 303 if k.len != key.len { 304 continue 305 } 306 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 307 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 308 } 309 } 310 b = b.overflow(t) 311 if b == nil { 312 return unsafe.Pointer(&zeroVal[0]) 313 } 314 } 315 } 316 317 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { 318 if raceenabled && h != nil { 319 callerpc := getcallerpc(unsafe.Pointer(&t)) 320 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) 321 } 322 if h == nil || h.count == 0 { 323 return unsafe.Pointer(&zeroVal[0]), false 324 } 325 if h.flags&hashWriting != 0 { 326 throw("concurrent map read and map write") 327 } 328 key := stringStructOf(&ky) 329 if h.B == 0 { 330 // One-bucket table. 331 b := (*bmap)(h.buckets) 332 if key.len < 32 { 333 // short key, doing lots of comparisons is ok 334 for i := uintptr(0); i < bucketCnt; i++ { 335 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 336 if x == empty { 337 continue 338 } 339 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 340 if k.len != key.len { 341 continue 342 } 343 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 344 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true 345 } 346 } 347 return unsafe.Pointer(&zeroVal[0]), false 348 } 349 // long key, try not to do more comparisons than necessary 350 keymaybe := uintptr(bucketCnt) 351 for i := uintptr(0); i < bucketCnt; i++ { 352 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 353 if x == empty { 354 continue 355 } 356 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 357 if k.len != key.len { 358 continue 359 } 360 if k.str == key.str { 361 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true 362 } 363 // check first 4 bytes 364 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 365 continue 366 } 367 // check last 4 bytes 368 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 369 continue 370 } 371 if keymaybe != bucketCnt { 372 // Two keys are potential matches. Use hash to distinguish them. 373 goto dohash 374 } 375 keymaybe = i 376 } 377 if keymaybe != bucketCnt { 378 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize)) 379 if memequal(k.str, key.str, uintptr(key.len)) { 380 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true 381 } 382 } 383 return unsafe.Pointer(&zeroVal[0]), false 384 } 385 dohash: 386 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 387 m := uintptr(1)<<h.B - 1 388 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 389 if c := h.oldbuckets; c != nil { 390 if !h.sameSizeGrow() { 391 // There used to be half as many buckets; mask down one more power of two. 392 m >>= 1 393 } 394 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 395 if !evacuated(oldb) { 396 b = oldb 397 } 398 } 399 top := uint8(hash >> (sys.PtrSize*8 - 8)) 400 if top < minTopHash { 401 top += minTopHash 402 } 403 for { 404 for i := uintptr(0); i < bucketCnt; i++ { 405 x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check 406 if x != top { 407 continue 408 } 409 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 410 if k.len != key.len { 411 continue 412 } 413 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 414 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true 415 } 416 } 417 b = b.overflow(t) 418 if b == nil { 419 return unsafe.Pointer(&zeroVal[0]), false 420 } 421 } 422 } 423 424 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { 425 if h == nil { 426 panic(plainError("assignment to entry in nil map")) 427 } 428 if raceenabled { 429 callerpc := getcallerpc(unsafe.Pointer(&t)) 430 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32)) 431 } 432 if h.flags&hashWriting != 0 { 433 throw("concurrent map writes") 434 } 435 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 436 437 // Set hashWriting after calling alg.hash for consistency with mapassign. 438 h.flags |= hashWriting 439 440 if h.buckets == nil { 441 h.buckets = newarray(t.bucket, 1) 442 } 443 444 again: 445 bucket := hash & (uintptr(1)<<h.B - 1) 446 if h.growing() { 447 growWork(t, h, bucket) 448 } 449 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 450 top := uint8(hash >> (sys.PtrSize*8 - 8)) 451 if top < minTopHash { 452 top += minTopHash 453 } 454 455 var inserti *uint8 456 var insertk unsafe.Pointer 457 var val unsafe.Pointer 458 for { 459 for i := uintptr(0); i < bucketCnt; i++ { 460 if b.tophash[i] != top { 461 if b.tophash[i] == empty && inserti == nil { 462 inserti = &b.tophash[i] 463 insertk = add(unsafe.Pointer(b), dataOffset+i*4) 464 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 465 } 466 continue 467 } 468 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) 469 if k != key { 470 continue 471 } 472 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 473 goto done 474 } 475 ovf := b.overflow(t) 476 if ovf == nil { 477 break 478 } 479 b = ovf 480 } 481 482 // Did not find mapping for key. Allocate new cell & add entry. 483 484 // If we hit the max load factor or we have too many overflow buckets, 485 // and we're not already in the middle of growing, start growing. 486 if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 487 hashGrow(t, h) 488 goto again // Growing the table invalidates everything, so try again 489 } 490 491 if inserti == nil { 492 // all current buckets are full, allocate a new one. 493 newb := h.newoverflow(t, b) 494 inserti = &newb.tophash[0] 495 insertk = add(unsafe.Pointer(newb), dataOffset) 496 val = add(insertk, bucketCnt*4) 497 } 498 499 // store new key/value at insert position 500 *((*uint32)(insertk)) = key 501 *inserti = top 502 h.count++ 503 504 done: 505 if h.flags&hashWriting == 0 { 506 throw("concurrent map writes") 507 } 508 h.flags &^= hashWriting 509 return val 510 } 511 512 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { 513 if h == nil { 514 panic(plainError("assignment to entry in nil map")) 515 } 516 if raceenabled { 517 callerpc := getcallerpc(unsafe.Pointer(&t)) 518 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64)) 519 } 520 if h.flags&hashWriting != 0 { 521 throw("concurrent map writes") 522 } 523 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 524 525 // Set hashWriting after calling alg.hash for consistency with mapassign. 526 h.flags |= hashWriting 527 528 if h.buckets == nil { 529 h.buckets = newarray(t.bucket, 1) 530 } 531 532 again: 533 bucket := hash & (uintptr(1)<<h.B - 1) 534 if h.growing() { 535 growWork(t, h, bucket) 536 } 537 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 538 top := uint8(hash >> (sys.PtrSize*8 - 8)) 539 if top < minTopHash { 540 top += minTopHash 541 } 542 543 var inserti *uint8 544 var insertk unsafe.Pointer 545 var val unsafe.Pointer 546 for { 547 for i := uintptr(0); i < bucketCnt; i++ { 548 if b.tophash[i] != top { 549 if b.tophash[i] == empty && inserti == nil { 550 inserti = &b.tophash[i] 551 insertk = add(unsafe.Pointer(b), dataOffset+i*8) 552 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 553 } 554 continue 555 } 556 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) 557 if k != key { 558 continue 559 } 560 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 561 goto done 562 } 563 ovf := b.overflow(t) 564 if ovf == nil { 565 break 566 } 567 b = ovf 568 } 569 570 // Did not find mapping for key. Allocate new cell & add entry. 571 572 // If we hit the max load factor or we have too many overflow buckets, 573 // and we're not already in the middle of growing, start growing. 574 if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 575 hashGrow(t, h) 576 goto again // Growing the table invalidates everything, so try again 577 } 578 579 if inserti == nil { 580 // all current buckets are full, allocate a new one. 581 newb := h.newoverflow(t, b) 582 inserti = &newb.tophash[0] 583 insertk = add(unsafe.Pointer(newb), dataOffset) 584 val = add(insertk, bucketCnt*8) 585 } 586 587 // store new key/value at insert position 588 *((*uint64)(insertk)) = key 589 *inserti = top 590 h.count++ 591 592 done: 593 if h.flags&hashWriting == 0 { 594 throw("concurrent map writes") 595 } 596 h.flags &^= hashWriting 597 return val 598 } 599 600 func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { 601 if h == nil { 602 panic(plainError("assignment to entry in nil map")) 603 } 604 if raceenabled { 605 callerpc := getcallerpc(unsafe.Pointer(&t)) 606 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr)) 607 } 608 if h.flags&hashWriting != 0 { 609 throw("concurrent map writes") 610 } 611 key := stringStructOf(&ky) 612 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 613 614 // Set hashWriting after calling alg.hash for consistency with mapassign. 615 h.flags |= hashWriting 616 617 if h.buckets == nil { 618 h.buckets = newarray(t.bucket, 1) 619 } 620 621 again: 622 bucket := hash & (uintptr(1)<<h.B - 1) 623 if h.growing() { 624 growWork(t, h, bucket) 625 } 626 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 627 top := uint8(hash >> (sys.PtrSize*8 - 8)) 628 if top < minTopHash { 629 top += minTopHash 630 } 631 632 var inserti *uint8 633 var insertk unsafe.Pointer 634 var val unsafe.Pointer 635 for { 636 for i := uintptr(0); i < bucketCnt; i++ { 637 if b.tophash[i] != top { 638 if b.tophash[i] == empty && inserti == nil { 639 inserti = &b.tophash[i] 640 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 641 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 642 } 643 continue 644 } 645 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 646 if k.len != key.len { 647 continue 648 } 649 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { 650 continue 651 } 652 // already have a mapping for key. Update it. 653 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 654 goto done 655 } 656 ovf := b.overflow(t) 657 if ovf == nil { 658 break 659 } 660 b = ovf 661 } 662 663 // Did not find mapping for key. Allocate new cell & add entry. 664 665 // If we hit the max load factor or we have too many overflow buckets, 666 // and we're not already in the middle of growing, start growing. 667 if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 668 hashGrow(t, h) 669 goto again // Growing the table invalidates everything, so try again 670 } 671 672 if inserti == nil { 673 // all current buckets are full, allocate a new one. 674 newb := h.newoverflow(t, b) 675 inserti = &newb.tophash[0] 676 insertk = add(unsafe.Pointer(newb), dataOffset) 677 val = add(insertk, bucketCnt*2*sys.PtrSize) 678 } 679 680 // store new key/value at insert position 681 *((*stringStruct)(insertk)) = *key 682 *inserti = top 683 h.count++ 684 685 done: 686 if h.flags&hashWriting == 0 { 687 throw("concurrent map writes") 688 } 689 h.flags &^= hashWriting 690 return val 691 } 692 693 func mapdelete_fast32(t *maptype, h *hmap, key uint32) { 694 if raceenabled && h != nil { 695 callerpc := getcallerpc(unsafe.Pointer(&t)) 696 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32)) 697 } 698 if h == nil || h.count == 0 { 699 return 700 } 701 if h.flags&hashWriting != 0 { 702 throw("concurrent map writes") 703 } 704 705 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 706 707 // Set hashWriting after calling alg.hash for consistency with mapdelete 708 h.flags |= hashWriting 709 710 bucket := hash & (uintptr(1)<<h.B - 1) 711 if h.growing() { 712 growWork(t, h, bucket) 713 } 714 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 715 top := uint8(hash >> (sys.PtrSize*8 - 8)) 716 if top < minTopHash { 717 top += minTopHash 718 } 719 for { 720 for i := uintptr(0); i < bucketCnt; i++ { 721 if b.tophash[i] != top { 722 continue 723 } 724 k := (*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)) 725 if key != *k { 726 continue 727 } 728 *k = 0 729 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*4 + i*uintptr(t.valuesize)) 730 typedmemclr(t.elem, v) 731 b.tophash[i] = empty 732 h.count-- 733 goto done 734 } 735 b = b.overflow(t) 736 if b == nil { 737 goto done 738 } 739 } 740 741 done: 742 if h.flags&hashWriting == 0 { 743 throw("concurrent map writes") 744 } 745 h.flags &^= hashWriting 746 } 747 748 func mapdelete_fast64(t *maptype, h *hmap, key uint64) { 749 if raceenabled && h != nil { 750 callerpc := getcallerpc(unsafe.Pointer(&t)) 751 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64)) 752 } 753 if h == nil || h.count == 0 { 754 return 755 } 756 if h.flags&hashWriting != 0 { 757 throw("concurrent map writes") 758 } 759 760 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 761 762 // Set hashWriting after calling alg.hash for consistency with mapdelete 763 h.flags |= hashWriting 764 765 bucket := hash & (uintptr(1)<<h.B - 1) 766 if h.growing() { 767 growWork(t, h, bucket) 768 } 769 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 770 top := uint8(hash >> (sys.PtrSize*8 - 8)) 771 if top < minTopHash { 772 top += minTopHash 773 } 774 for { 775 for i := uintptr(0); i < bucketCnt; i++ { 776 if b.tophash[i] != top { 777 continue 778 } 779 k := (*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)) 780 if key != *k { 781 continue 782 } 783 *k = 0 784 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*8 + i*uintptr(t.valuesize)) 785 typedmemclr(t.elem, v) 786 b.tophash[i] = empty 787 h.count-- 788 goto done 789 } 790 b = b.overflow(t) 791 if b == nil { 792 goto done 793 } 794 } 795 796 done: 797 if h.flags&hashWriting == 0 { 798 throw("concurrent map writes") 799 } 800 h.flags &^= hashWriting 801 } 802 803 func mapdelete_faststr(t *maptype, h *hmap, ky string) { 804 if raceenabled && h != nil { 805 callerpc := getcallerpc(unsafe.Pointer(&t)) 806 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr)) 807 } 808 if h == nil || h.count == 0 { 809 return 810 } 811 if h.flags&hashWriting != 0 { 812 throw("concurrent map writes") 813 } 814 815 key := stringStructOf(&ky) 816 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 817 818 // Set hashWriting after calling alg.hash for consistency with mapdelete 819 h.flags |= hashWriting 820 821 bucket := hash & (uintptr(1)<<h.B - 1) 822 if h.growing() { 823 growWork(t, h, bucket) 824 } 825 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 826 top := uint8(hash >> (sys.PtrSize*8 - 8)) 827 if top < minTopHash { 828 top += minTopHash 829 } 830 for { 831 for i := uintptr(0); i < bucketCnt; i++ { 832 if b.tophash[i] != top { 833 continue 834 } 835 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 836 if k.len != key.len { 837 continue 838 } 839 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { 840 continue 841 } 842 typedmemclr(t.key, unsafe.Pointer(k)) 843 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*2*sys.PtrSize + i*uintptr(t.valuesize)) 844 typedmemclr(t.elem, v) 845 b.tophash[i] = empty 846 h.count-- 847 goto done 848 } 849 b = b.overflow(t) 850 if b == nil { 851 goto done 852 } 853 } 854 855 done: 856 if h.flags&hashWriting == 0 { 857 throw("concurrent map writes") 858 } 859 h.flags &^= hashWriting 860 }