github.com/filosottile/go@v0.0.0-20170906193555-dbed9972d994/src/runtime/hashmap_fast.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/sys" 9 "unsafe" 10 ) 11 12 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { 13 if raceenabled && h != nil { 14 callerpc := getcallerpc(unsafe.Pointer(&t)) 15 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) 16 } 17 if h == nil || h.count == 0 { 18 return unsafe.Pointer(&zeroVal[0]) 19 } 20 if h.flags&hashWriting != 0 { 21 throw("concurrent map read and map write") 22 } 23 var b *bmap 24 if h.B == 0 { 25 // One-bucket table. No need to hash. 26 b = (*bmap)(h.buckets) 27 } else { 28 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 29 m := bucketMask(h.B) 30 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 31 if c := h.oldbuckets; c != nil { 32 if !h.sameSizeGrow() { 33 // There used to be half as many buckets; mask down one more power of two. 34 m >>= 1 35 } 36 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 37 if !evacuated(oldb) { 38 b = oldb 39 } 40 } 41 } 42 for ; b != nil; b = b.overflow(t) { 43 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { 44 if *(*uint32)(k) == key && b.tophash[i] != empty { 45 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 46 } 47 } 48 } 49 return unsafe.Pointer(&zeroVal[0]) 50 } 51 52 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { 53 if raceenabled && h != nil { 54 callerpc := getcallerpc(unsafe.Pointer(&t)) 55 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) 56 } 57 if h == nil || h.count == 0 { 58 return unsafe.Pointer(&zeroVal[0]), false 59 } 60 if h.flags&hashWriting != 0 { 61 throw("concurrent map read and map write") 62 } 63 var b *bmap 64 if h.B == 0 { 65 // One-bucket table. No need to hash. 66 b = (*bmap)(h.buckets) 67 } else { 68 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 69 m := bucketMask(h.B) 70 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 71 if c := h.oldbuckets; c != nil { 72 if !h.sameSizeGrow() { 73 // There used to be half as many buckets; mask down one more power of two. 74 m >>= 1 75 } 76 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 77 if !evacuated(oldb) { 78 b = oldb 79 } 80 } 81 } 82 for ; b != nil; b = b.overflow(t) { 83 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { 84 if *(*uint32)(k) == key && b.tophash[i] != empty { 85 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true 86 } 87 } 88 } 89 return unsafe.Pointer(&zeroVal[0]), false 90 } 91 92 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { 93 if raceenabled && h != nil { 94 callerpc := getcallerpc(unsafe.Pointer(&t)) 95 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) 96 } 97 if h == nil || h.count == 0 { 98 return unsafe.Pointer(&zeroVal[0]) 99 } 100 if h.flags&hashWriting != 0 { 101 throw("concurrent map read and map write") 102 } 103 var b *bmap 104 if h.B == 0 { 105 // One-bucket table. No need to hash. 106 b = (*bmap)(h.buckets) 107 } else { 108 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 109 m := bucketMask(h.B) 110 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 111 if c := h.oldbuckets; c != nil { 112 if !h.sameSizeGrow() { 113 // There used to be half as many buckets; mask down one more power of two. 114 m >>= 1 115 } 116 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 117 if !evacuated(oldb) { 118 b = oldb 119 } 120 } 121 } 122 for ; b != nil; b = b.overflow(t) { 123 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { 124 if *(*uint64)(k) == key && b.tophash[i] != empty { 125 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 126 } 127 } 128 } 129 return unsafe.Pointer(&zeroVal[0]) 130 } 131 132 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { 133 if raceenabled && h != nil { 134 callerpc := getcallerpc(unsafe.Pointer(&t)) 135 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) 136 } 137 if h == nil || h.count == 0 { 138 return unsafe.Pointer(&zeroVal[0]), false 139 } 140 if h.flags&hashWriting != 0 { 141 throw("concurrent map read and map write") 142 } 143 var b *bmap 144 if h.B == 0 { 145 // One-bucket table. No need to hash. 146 b = (*bmap)(h.buckets) 147 } else { 148 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 149 m := bucketMask(h.B) 150 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 151 if c := h.oldbuckets; c != nil { 152 if !h.sameSizeGrow() { 153 // There used to be half as many buckets; mask down one more power of two. 154 m >>= 1 155 } 156 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 157 if !evacuated(oldb) { 158 b = oldb 159 } 160 } 161 } 162 for ; b != nil; b = b.overflow(t) { 163 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { 164 if *(*uint64)(k) == key && b.tophash[i] != empty { 165 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true 166 } 167 } 168 } 169 return unsafe.Pointer(&zeroVal[0]), false 170 } 171 172 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { 173 if raceenabled && h != nil { 174 callerpc := getcallerpc(unsafe.Pointer(&t)) 175 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) 176 } 177 if h == nil || h.count == 0 { 178 return unsafe.Pointer(&zeroVal[0]) 179 } 180 if h.flags&hashWriting != 0 { 181 throw("concurrent map read and map write") 182 } 183 key := stringStructOf(&ky) 184 if h.B == 0 { 185 // One-bucket table. 186 b := (*bmap)(h.buckets) 187 if key.len < 32 { 188 // short key, doing lots of comparisons is ok 189 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 190 k := (*stringStruct)(kptr) 191 if k.len != key.len || b.tophash[i] == empty { 192 continue 193 } 194 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 195 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 196 } 197 } 198 return unsafe.Pointer(&zeroVal[0]) 199 } 200 // long key, try not to do more comparisons than necessary 201 keymaybe := uintptr(bucketCnt) 202 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 203 k := (*stringStruct)(kptr) 204 if k.len != key.len || b.tophash[i] == empty { 205 continue 206 } 207 if k.str == key.str { 208 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 209 } 210 // check first 4 bytes 211 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 212 continue 213 } 214 // check last 4 bytes 215 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 216 continue 217 } 218 if keymaybe != bucketCnt { 219 // Two keys are potential matches. Use hash to distinguish them. 220 goto dohash 221 } 222 keymaybe = i 223 } 224 if keymaybe != bucketCnt { 225 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize)) 226 if memequal(k.str, key.str, uintptr(key.len)) { 227 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)) 228 } 229 } 230 return unsafe.Pointer(&zeroVal[0]) 231 } 232 dohash: 233 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 234 m := bucketMask(h.B) 235 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 236 if c := h.oldbuckets; c != nil { 237 if !h.sameSizeGrow() { 238 // There used to be half as many buckets; mask down one more power of two. 239 m >>= 1 240 } 241 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 242 if !evacuated(oldb) { 243 b = oldb 244 } 245 } 246 top := tophash(hash) 247 for ; b != nil; b = b.overflow(t) { 248 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 249 k := (*stringStruct)(kptr) 250 if k.len != key.len || b.tophash[i] != top { 251 continue 252 } 253 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 254 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 255 } 256 } 257 } 258 return unsafe.Pointer(&zeroVal[0]) 259 } 260 261 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { 262 if raceenabled && h != nil { 263 callerpc := getcallerpc(unsafe.Pointer(&t)) 264 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) 265 } 266 if h == nil || h.count == 0 { 267 return unsafe.Pointer(&zeroVal[0]), false 268 } 269 if h.flags&hashWriting != 0 { 270 throw("concurrent map read and map write") 271 } 272 key := stringStructOf(&ky) 273 if h.B == 0 { 274 // One-bucket table. 275 b := (*bmap)(h.buckets) 276 if key.len < 32 { 277 // short key, doing lots of comparisons is ok 278 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 279 k := (*stringStruct)(kptr) 280 if k.len != key.len || b.tophash[i] == empty { 281 continue 282 } 283 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 284 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true 285 } 286 } 287 return unsafe.Pointer(&zeroVal[0]), false 288 } 289 // long key, try not to do more comparisons than necessary 290 keymaybe := uintptr(bucketCnt) 291 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 292 k := (*stringStruct)(kptr) 293 if k.len != key.len || b.tophash[i] == empty { 294 continue 295 } 296 if k.str == key.str { 297 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true 298 } 299 // check first 4 bytes 300 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 301 continue 302 } 303 // check last 4 bytes 304 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 305 continue 306 } 307 if keymaybe != bucketCnt { 308 // Two keys are potential matches. Use hash to distinguish them. 309 goto dohash 310 } 311 keymaybe = i 312 } 313 if keymaybe != bucketCnt { 314 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize)) 315 if memequal(k.str, key.str, uintptr(key.len)) { 316 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true 317 } 318 } 319 return unsafe.Pointer(&zeroVal[0]), false 320 } 321 dohash: 322 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 323 m := bucketMask(h.B) 324 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 325 if c := h.oldbuckets; c != nil { 326 if !h.sameSizeGrow() { 327 // There used to be half as many buckets; mask down one more power of two. 328 m >>= 1 329 } 330 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 331 if !evacuated(oldb) { 332 b = oldb 333 } 334 } 335 top := tophash(hash) 336 for ; b != nil; b = b.overflow(t) { 337 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 338 k := (*stringStruct)(kptr) 339 if k.len != key.len || b.tophash[i] != top { 340 continue 341 } 342 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 343 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true 344 } 345 } 346 } 347 return unsafe.Pointer(&zeroVal[0]), false 348 } 349 350 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { 351 if h == nil { 352 panic(plainError("assignment to entry in nil map")) 353 } 354 if raceenabled { 355 callerpc := getcallerpc(unsafe.Pointer(&t)) 356 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32)) 357 } 358 if h.flags&hashWriting != 0 { 359 throw("concurrent map writes") 360 } 361 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 362 363 // Set hashWriting after calling alg.hash for consistency with mapassign. 364 h.flags |= hashWriting 365 366 if h.buckets == nil { 367 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 368 } 369 370 again: 371 bucket := hash & bucketMask(h.B) 372 if h.growing() { 373 growWork_fast32(t, h, bucket) 374 } 375 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 376 top := tophash(hash) 377 378 var inserti *uint8 379 var insertk unsafe.Pointer 380 var val unsafe.Pointer 381 for { 382 for i := uintptr(0); i < bucketCnt; i++ { 383 if b.tophash[i] != top { 384 if b.tophash[i] == empty && inserti == nil { 385 inserti = &b.tophash[i] 386 insertk = add(unsafe.Pointer(b), dataOffset+i*4) 387 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 388 } 389 continue 390 } 391 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) 392 if k != key { 393 continue 394 } 395 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 396 goto done 397 } 398 ovf := b.overflow(t) 399 if ovf == nil { 400 break 401 } 402 b = ovf 403 } 404 405 // Did not find mapping for key. Allocate new cell & add entry. 406 407 // If we hit the max load factor or we have too many overflow buckets, 408 // and we're not already in the middle of growing, start growing. 409 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 410 hashGrow(t, h) 411 goto again // Growing the table invalidates everything, so try again 412 } 413 414 if inserti == nil { 415 // all current buckets are full, allocate a new one. 416 newb := h.newoverflow(t, b) 417 inserti = &newb.tophash[0] 418 insertk = add(unsafe.Pointer(newb), dataOffset) 419 val = add(insertk, bucketCnt*4) 420 } 421 422 // store new key at insert position 423 if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { 424 writebarrierptr((*uintptr)(insertk), uintptr(key)) 425 } else { 426 *(*uint32)(insertk) = key 427 } 428 *inserti = top 429 h.count++ 430 431 done: 432 if h.flags&hashWriting == 0 { 433 throw("concurrent map writes") 434 } 435 h.flags &^= hashWriting 436 return val 437 } 438 439 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { 440 if h == nil { 441 panic(plainError("assignment to entry in nil map")) 442 } 443 if raceenabled { 444 callerpc := getcallerpc(unsafe.Pointer(&t)) 445 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64)) 446 } 447 if h.flags&hashWriting != 0 { 448 throw("concurrent map writes") 449 } 450 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 451 452 // Set hashWriting after calling alg.hash for consistency with mapassign. 453 h.flags |= hashWriting 454 455 if h.buckets == nil { 456 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 457 } 458 459 again: 460 bucket := hash & bucketMask(h.B) 461 if h.growing() { 462 growWork_fast64(t, h, bucket) 463 } 464 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 465 top := tophash(hash) 466 467 var inserti *uint8 468 var insertk unsafe.Pointer 469 var val unsafe.Pointer 470 for { 471 for i := uintptr(0); i < bucketCnt; i++ { 472 if b.tophash[i] != top { 473 if b.tophash[i] == empty && inserti == nil { 474 inserti = &b.tophash[i] 475 insertk = add(unsafe.Pointer(b), dataOffset+i*8) 476 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 477 } 478 continue 479 } 480 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) 481 if k != key { 482 continue 483 } 484 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 485 goto done 486 } 487 ovf := b.overflow(t) 488 if ovf == nil { 489 break 490 } 491 b = ovf 492 } 493 494 // Did not find mapping for key. Allocate new cell & add entry. 495 496 // If we hit the max load factor or we have too many overflow buckets, 497 // and we're not already in the middle of growing, start growing. 498 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 499 hashGrow(t, h) 500 goto again // Growing the table invalidates everything, so try again 501 } 502 503 if inserti == nil { 504 // all current buckets are full, allocate a new one. 505 newb := h.newoverflow(t, b) 506 inserti = &newb.tophash[0] 507 insertk = add(unsafe.Pointer(newb), dataOffset) 508 val = add(insertk, bucketCnt*8) 509 } 510 511 // store new key at insert position 512 if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { 513 if sys.PtrSize == 8 { 514 writebarrierptr((*uintptr)(insertk), uintptr(key)) 515 } else { 516 // There are three ways to squeeze at least one 32 bit pointer into 64 bits. 517 // Give up and call typedmemmove. 518 typedmemmove(t.key, insertk, unsafe.Pointer(&key)) 519 } 520 } else { 521 *(*uint64)(insertk) = key 522 } 523 524 *inserti = top 525 h.count++ 526 527 done: 528 if h.flags&hashWriting == 0 { 529 throw("concurrent map writes") 530 } 531 h.flags &^= hashWriting 532 return val 533 } 534 535 func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { 536 if h == nil { 537 panic(plainError("assignment to entry in nil map")) 538 } 539 if raceenabled { 540 callerpc := getcallerpc(unsafe.Pointer(&t)) 541 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr)) 542 } 543 if h.flags&hashWriting != 0 { 544 throw("concurrent map writes") 545 } 546 key := stringStructOf(&ky) 547 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 548 549 // Set hashWriting after calling alg.hash for consistency with mapassign. 550 h.flags |= hashWriting 551 552 if h.buckets == nil { 553 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 554 } 555 556 again: 557 bucket := hash & bucketMask(h.B) 558 if h.growing() { 559 growWork_faststr(t, h, bucket) 560 } 561 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 562 top := tophash(hash) 563 564 var inserti *uint8 565 var insertk unsafe.Pointer 566 var val unsafe.Pointer 567 for { 568 for i := uintptr(0); i < bucketCnt; i++ { 569 if b.tophash[i] != top { 570 if b.tophash[i] == empty && inserti == nil { 571 inserti = &b.tophash[i] 572 insertk = add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize) 573 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 574 } 575 continue 576 } 577 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 578 if k.len != key.len { 579 continue 580 } 581 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { 582 continue 583 } 584 // already have a mapping for key. Update it. 585 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 586 goto done 587 } 588 ovf := b.overflow(t) 589 if ovf == nil { 590 break 591 } 592 b = ovf 593 } 594 595 // Did not find mapping for key. Allocate new cell & add entry. 596 597 // If we hit the max load factor or we have too many overflow buckets, 598 // and we're not already in the middle of growing, start growing. 599 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 600 hashGrow(t, h) 601 goto again // Growing the table invalidates everything, so try again 602 } 603 604 if inserti == nil { 605 // all current buckets are full, allocate a new one. 606 newb := h.newoverflow(t, b) 607 inserti = &newb.tophash[0] 608 insertk = add(unsafe.Pointer(newb), dataOffset) 609 val = add(insertk, bucketCnt*2*sys.PtrSize) 610 } 611 612 // store new key at insert position 613 *((*stringStruct)(insertk)) = *key 614 *inserti = top 615 h.count++ 616 617 done: 618 if h.flags&hashWriting == 0 { 619 throw("concurrent map writes") 620 } 621 h.flags &^= hashWriting 622 return val 623 } 624 625 func mapdelete_fast32(t *maptype, h *hmap, key uint32) { 626 if raceenabled && h != nil { 627 callerpc := getcallerpc(unsafe.Pointer(&t)) 628 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32)) 629 } 630 if h == nil || h.count == 0 { 631 return 632 } 633 if h.flags&hashWriting != 0 { 634 throw("concurrent map writes") 635 } 636 637 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 638 639 // Set hashWriting after calling alg.hash for consistency with mapdelete 640 h.flags |= hashWriting 641 642 bucket := hash & bucketMask(h.B) 643 if h.growing() { 644 growWork_fast32(t, h, bucket) 645 } 646 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 647 search: 648 for ; b != nil; b = b.overflow(t) { 649 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { 650 if key != *(*uint32)(k) || b.tophash[i] == empty { 651 continue 652 } 653 // Only clear key if there are pointers in it. 654 if t.key.kind&kindNoPointers == 0 { 655 memclrHasPointers(k, t.key.size) 656 } 657 // Only clear value if there are pointers in it. 658 if t.elem.kind&kindNoPointers == 0 { 659 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 660 memclrHasPointers(v, t.elem.size) 661 } 662 b.tophash[i] = empty 663 h.count-- 664 break search 665 } 666 } 667 668 if h.flags&hashWriting == 0 { 669 throw("concurrent map writes") 670 } 671 h.flags &^= hashWriting 672 } 673 674 func mapdelete_fast64(t *maptype, h *hmap, key uint64) { 675 if raceenabled && h != nil { 676 callerpc := getcallerpc(unsafe.Pointer(&t)) 677 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64)) 678 } 679 if h == nil || h.count == 0 { 680 return 681 } 682 if h.flags&hashWriting != 0 { 683 throw("concurrent map writes") 684 } 685 686 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 687 688 // Set hashWriting after calling alg.hash for consistency with mapdelete 689 h.flags |= hashWriting 690 691 bucket := hash & bucketMask(h.B) 692 if h.growing() { 693 growWork_fast64(t, h, bucket) 694 } 695 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 696 search: 697 for ; b != nil; b = b.overflow(t) { 698 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { 699 if key != *(*uint64)(k) || b.tophash[i] == empty { 700 continue 701 } 702 // Only clear key if there are pointers in it. 703 if t.key.kind&kindNoPointers == 0 { 704 memclrHasPointers(k, t.key.size) 705 } 706 // Only clear value if there are pointers in it. 707 if t.elem.kind&kindNoPointers == 0 { 708 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 709 memclrHasPointers(v, t.elem.size) 710 } 711 b.tophash[i] = empty 712 h.count-- 713 break search 714 } 715 } 716 717 if h.flags&hashWriting == 0 { 718 throw("concurrent map writes") 719 } 720 h.flags &^= hashWriting 721 } 722 723 func mapdelete_faststr(t *maptype, h *hmap, ky string) { 724 if raceenabled && h != nil { 725 callerpc := getcallerpc(unsafe.Pointer(&t)) 726 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr)) 727 } 728 if h == nil || h.count == 0 { 729 return 730 } 731 if h.flags&hashWriting != 0 { 732 throw("concurrent map writes") 733 } 734 735 key := stringStructOf(&ky) 736 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 737 738 // Set hashWriting after calling alg.hash for consistency with mapdelete 739 h.flags |= hashWriting 740 741 bucket := hash & bucketMask(h.B) 742 if h.growing() { 743 growWork_faststr(t, h, bucket) 744 } 745 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 746 top := tophash(hash) 747 search: 748 for ; b != nil; b = b.overflow(t) { 749 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 750 k := (*stringStruct)(kptr) 751 if k.len != key.len || b.tophash[i] != top { 752 continue 753 } 754 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { 755 continue 756 } 757 // Clear key's pointer. 758 k.str = nil 759 // Only clear value if there are pointers in it. 760 if t.elem.kind&kindNoPointers == 0 { 761 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 762 memclrHasPointers(v, t.elem.size) 763 } 764 b.tophash[i] = empty 765 h.count-- 766 break search 767 } 768 } 769 770 if h.flags&hashWriting == 0 { 771 throw("concurrent map writes") 772 } 773 h.flags &^= hashWriting 774 } 775 776 func growWork_fast32(t *maptype, h *hmap, bucket uintptr) { 777 // make sure we evacuate the oldbucket corresponding 778 // to the bucket we're about to use 779 evacuate_fast32(t, h, bucket&h.oldbucketmask()) 780 781 // evacuate one more oldbucket to make progress on growing 782 if h.growing() { 783 evacuate_fast32(t, h, h.nevacuate) 784 } 785 } 786 787 func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { 788 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 789 newbit := h.noldbuckets() 790 if !evacuated(b) { 791 // TODO: reuse overflow buckets instead of using new ones, if there 792 // is no iterator using the old buckets. (If !oldIterator.) 793 794 // xy contains the x and y (low and high) evacuation destinations. 795 var xy [2]evacDst 796 x := &xy[0] 797 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 798 x.k = add(unsafe.Pointer(x.b), dataOffset) 799 x.v = add(x.k, bucketCnt*4) 800 801 if !h.sameSizeGrow() { 802 // Only calculate y pointers if we're growing bigger. 803 // Otherwise GC can see bad pointers. 804 y := &xy[1] 805 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 806 y.k = add(unsafe.Pointer(y.b), dataOffset) 807 y.v = add(y.k, bucketCnt*4) 808 } 809 810 for ; b != nil; b = b.overflow(t) { 811 k := add(unsafe.Pointer(b), dataOffset) 812 v := add(k, bucketCnt*4) 813 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 4), add(v, uintptr(t.valuesize)) { 814 top := b.tophash[i] 815 if top == empty { 816 b.tophash[i] = evacuatedEmpty 817 continue 818 } 819 if top < minTopHash { 820 throw("bad map state") 821 } 822 var useY uint8 823 if !h.sameSizeGrow() { 824 // Compute hash to make our evacuation decision (whether we need 825 // to send this key/value to bucket x or bucket y). 826 hash := t.key.alg.hash(k, uintptr(h.hash0)) 827 if hash&newbit != 0 { 828 useY = 1 829 } 830 } 831 832 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap 833 dst := &xy[useY] // evacuation destination 834 835 if dst.i == bucketCnt { 836 dst.b = h.newoverflow(t, dst.b) 837 dst.i = 0 838 dst.k = add(unsafe.Pointer(dst.b), dataOffset) 839 dst.v = add(dst.k, bucketCnt*4) 840 } 841 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check 842 843 // Copy key. 844 if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { 845 writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k)) 846 } else { 847 *(*uint32)(dst.k) = *(*uint32)(k) 848 } 849 850 typedmemmove(t.elem, dst.v, v) 851 dst.i++ 852 // These updates might push these pointers past the end of the 853 // key or value arrays. That's ok, as we have the overflow pointer 854 // at the end of the bucket to protect against pointing past the 855 // end of the bucket. 856 dst.k = add(dst.k, 4) 857 dst.v = add(dst.v, uintptr(t.valuesize)) 858 } 859 } 860 // Unlink the overflow buckets & clear key/value to help GC. 861 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 { 862 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) 863 // Preserve b.tophash because the evacuation 864 // state is maintained there. 865 ptr := add(b, dataOffset) 866 n := uintptr(t.bucketsize) - dataOffset 867 memclrHasPointers(ptr, n) 868 } 869 } 870 871 if oldbucket == h.nevacuate { 872 advanceEvacuationMark(h, t, newbit) 873 } 874 } 875 876 func growWork_fast64(t *maptype, h *hmap, bucket uintptr) { 877 // make sure we evacuate the oldbucket corresponding 878 // to the bucket we're about to use 879 evacuate_fast64(t, h, bucket&h.oldbucketmask()) 880 881 // evacuate one more oldbucket to make progress on growing 882 if h.growing() { 883 evacuate_fast64(t, h, h.nevacuate) 884 } 885 } 886 887 func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { 888 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 889 newbit := h.noldbuckets() 890 if !evacuated(b) { 891 // TODO: reuse overflow buckets instead of using new ones, if there 892 // is no iterator using the old buckets. (If !oldIterator.) 893 894 // xy contains the x and y (low and high) evacuation destinations. 895 var xy [2]evacDst 896 x := &xy[0] 897 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 898 x.k = add(unsafe.Pointer(x.b), dataOffset) 899 x.v = add(x.k, bucketCnt*8) 900 901 if !h.sameSizeGrow() { 902 // Only calculate y pointers if we're growing bigger. 903 // Otherwise GC can see bad pointers. 904 y := &xy[1] 905 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 906 y.k = add(unsafe.Pointer(y.b), dataOffset) 907 y.v = add(y.k, bucketCnt*8) 908 } 909 910 for ; b != nil; b = b.overflow(t) { 911 k := add(unsafe.Pointer(b), dataOffset) 912 v := add(k, bucketCnt*8) 913 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) { 914 top := b.tophash[i] 915 if top == empty { 916 b.tophash[i] = evacuatedEmpty 917 continue 918 } 919 if top < minTopHash { 920 throw("bad map state") 921 } 922 var useY uint8 923 if !h.sameSizeGrow() { 924 // Compute hash to make our evacuation decision (whether we need 925 // to send this key/value to bucket x or bucket y). 926 hash := t.key.alg.hash(k, uintptr(h.hash0)) 927 if hash&newbit != 0 { 928 useY = 1 929 } 930 } 931 932 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap 933 dst := &xy[useY] // evacuation destination 934 935 if dst.i == bucketCnt { 936 dst.b = h.newoverflow(t, dst.b) 937 dst.i = 0 938 dst.k = add(unsafe.Pointer(dst.b), dataOffset) 939 dst.v = add(dst.k, bucketCnt*8) 940 } 941 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check 942 943 // Copy key. 944 if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { 945 if sys.PtrSize == 8 { 946 writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k)) 947 } else { 948 // There are three ways to squeeze at least one 32 bit pointer into 64 bits. 949 // Give up and call typedmemmove. 950 typedmemmove(t.key, dst.k, k) 951 } 952 } else { 953 *(*uint64)(dst.k) = *(*uint64)(k) 954 } 955 956 typedmemmove(t.elem, dst.v, v) 957 dst.i++ 958 // These updates might push these pointers past the end of the 959 // key or value arrays. That's ok, as we have the overflow pointer 960 // at the end of the bucket to protect against pointing past the 961 // end of the bucket. 962 dst.k = add(dst.k, 8) 963 dst.v = add(dst.v, uintptr(t.valuesize)) 964 } 965 } 966 // Unlink the overflow buckets & clear key/value to help GC. 967 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 { 968 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) 969 // Preserve b.tophash because the evacuation 970 // state is maintained there. 971 ptr := add(b, dataOffset) 972 n := uintptr(t.bucketsize) - dataOffset 973 memclrHasPointers(ptr, n) 974 } 975 } 976 977 if oldbucket == h.nevacuate { 978 advanceEvacuationMark(h, t, newbit) 979 } 980 } 981 982 func growWork_faststr(t *maptype, h *hmap, bucket uintptr) { 983 // make sure we evacuate the oldbucket corresponding 984 // to the bucket we're about to use 985 evacuate_faststr(t, h, bucket&h.oldbucketmask()) 986 987 // evacuate one more oldbucket to make progress on growing 988 if h.growing() { 989 evacuate_faststr(t, h, h.nevacuate) 990 } 991 } 992 993 func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { 994 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 995 newbit := h.noldbuckets() 996 if !evacuated(b) { 997 // TODO: reuse overflow buckets instead of using new ones, if there 998 // is no iterator using the old buckets. (If !oldIterator.) 999 1000 // xy contains the x and y (low and high) evacuation destinations. 1001 var xy [2]evacDst 1002 x := &xy[0] 1003 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 1004 x.k = add(unsafe.Pointer(x.b), dataOffset) 1005 x.v = add(x.k, bucketCnt*2*sys.PtrSize) 1006 1007 if !h.sameSizeGrow() { 1008 // Only calculate y pointers if we're growing bigger. 1009 // Otherwise GC can see bad pointers. 1010 y := &xy[1] 1011 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 1012 y.k = add(unsafe.Pointer(y.b), dataOffset) 1013 y.v = add(y.k, bucketCnt*2*sys.PtrSize) 1014 } 1015 1016 for ; b != nil; b = b.overflow(t) { 1017 k := add(unsafe.Pointer(b), dataOffset) 1018 v := add(k, bucketCnt*2*sys.PtrSize) 1019 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) { 1020 top := b.tophash[i] 1021 if top == empty { 1022 b.tophash[i] = evacuatedEmpty 1023 continue 1024 } 1025 if top < minTopHash { 1026 throw("bad map state") 1027 } 1028 var useY uint8 1029 if !h.sameSizeGrow() { 1030 // Compute hash to make our evacuation decision (whether we need 1031 // to send this key/value to bucket x or bucket y). 1032 hash := t.key.alg.hash(k, uintptr(h.hash0)) 1033 if hash&newbit != 0 { 1034 useY = 1 1035 } 1036 } 1037 1038 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap 1039 dst := &xy[useY] // evacuation destination 1040 1041 if dst.i == bucketCnt { 1042 dst.b = h.newoverflow(t, dst.b) 1043 dst.i = 0 1044 dst.k = add(unsafe.Pointer(dst.b), dataOffset) 1045 dst.v = add(dst.k, bucketCnt*2*sys.PtrSize) 1046 } 1047 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check 1048 1049 // Copy key. 1050 *(*string)(dst.k) = *(*string)(k) 1051 1052 typedmemmove(t.elem, dst.v, v) 1053 dst.i++ 1054 // These updates might push these pointers past the end of the 1055 // key or value arrays. That's ok, as we have the overflow pointer 1056 // at the end of the bucket to protect against pointing past the 1057 // end of the bucket. 1058 dst.k = add(dst.k, 2*sys.PtrSize) 1059 dst.v = add(dst.v, uintptr(t.valuesize)) 1060 } 1061 } 1062 // Unlink the overflow buckets & clear key/value to help GC. 1063 // Unlink the overflow buckets & clear key/value to help GC. 1064 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 { 1065 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) 1066 // Preserve b.tophash because the evacuation 1067 // state is maintained there. 1068 ptr := add(b, dataOffset) 1069 n := uintptr(t.bucketsize) - dataOffset 1070 memclrHasPointers(ptr, n) 1071 } 1072 } 1073 1074 if oldbucket == h.nevacuate { 1075 advanceEvacuationMark(h, t, newbit) 1076 } 1077 }