github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/runtime/hashmap_fast.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/sys" 9 "unsafe" 10 ) 11 12 func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { 13 if raceenabled && h != nil { 14 callerpc := getcallerpc() 15 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32)) 16 } 17 if h == nil || h.count == 0 { 18 return unsafe.Pointer(&zeroVal[0]) 19 } 20 if h.flags&hashWriting != 0 { 21 throw("concurrent map read and map write") 22 } 23 var b *bmap 24 if h.B == 0 { 25 // One-bucket table. No need to hash. 26 b = (*bmap)(h.buckets) 27 } else { 28 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 29 m := bucketMask(h.B) 30 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 31 if c := h.oldbuckets; c != nil { 32 if !h.sameSizeGrow() { 33 // There used to be half as many buckets; mask down one more power of two. 34 m >>= 1 35 } 36 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 37 if !evacuated(oldb) { 38 b = oldb 39 } 40 } 41 } 42 for ; b != nil; b = b.overflow(t) { 43 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { 44 if *(*uint32)(k) == key && b.tophash[i] != empty { 45 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 46 } 47 } 48 } 49 return unsafe.Pointer(&zeroVal[0]) 50 } 51 52 func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { 53 if raceenabled && h != nil { 54 callerpc := getcallerpc() 55 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32)) 56 } 57 if h == nil || h.count == 0 { 58 return unsafe.Pointer(&zeroVal[0]), false 59 } 60 if h.flags&hashWriting != 0 { 61 throw("concurrent map read and map write") 62 } 63 var b *bmap 64 if h.B == 0 { 65 // One-bucket table. No need to hash. 66 b = (*bmap)(h.buckets) 67 } else { 68 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 69 m := bucketMask(h.B) 70 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 71 if c := h.oldbuckets; c != nil { 72 if !h.sameSizeGrow() { 73 // There used to be half as many buckets; mask down one more power of two. 74 m >>= 1 75 } 76 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 77 if !evacuated(oldb) { 78 b = oldb 79 } 80 } 81 } 82 for ; b != nil; b = b.overflow(t) { 83 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { 84 if *(*uint32)(k) == key && b.tophash[i] != empty { 85 return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true 86 } 87 } 88 } 89 return unsafe.Pointer(&zeroVal[0]), false 90 } 91 92 func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { 93 if raceenabled && h != nil { 94 callerpc := getcallerpc() 95 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64)) 96 } 97 if h == nil || h.count == 0 { 98 return unsafe.Pointer(&zeroVal[0]) 99 } 100 if h.flags&hashWriting != 0 { 101 throw("concurrent map read and map write") 102 } 103 var b *bmap 104 if h.B == 0 { 105 // One-bucket table. No need to hash. 106 b = (*bmap)(h.buckets) 107 } else { 108 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 109 m := bucketMask(h.B) 110 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 111 if c := h.oldbuckets; c != nil { 112 if !h.sameSizeGrow() { 113 // There used to be half as many buckets; mask down one more power of two. 114 m >>= 1 115 } 116 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 117 if !evacuated(oldb) { 118 b = oldb 119 } 120 } 121 } 122 for ; b != nil; b = b.overflow(t) { 123 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { 124 if *(*uint64)(k) == key && b.tophash[i] != empty { 125 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 126 } 127 } 128 } 129 return unsafe.Pointer(&zeroVal[0]) 130 } 131 132 func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { 133 if raceenabled && h != nil { 134 callerpc := getcallerpc() 135 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64)) 136 } 137 if h == nil || h.count == 0 { 138 return unsafe.Pointer(&zeroVal[0]), false 139 } 140 if h.flags&hashWriting != 0 { 141 throw("concurrent map read and map write") 142 } 143 var b *bmap 144 if h.B == 0 { 145 // One-bucket table. No need to hash. 146 b = (*bmap)(h.buckets) 147 } else { 148 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 149 m := bucketMask(h.B) 150 b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 151 if c := h.oldbuckets; c != nil { 152 if !h.sameSizeGrow() { 153 // There used to be half as many buckets; mask down one more power of two. 154 m >>= 1 155 } 156 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 157 if !evacuated(oldb) { 158 b = oldb 159 } 160 } 161 } 162 for ; b != nil; b = b.overflow(t) { 163 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { 164 if *(*uint64)(k) == key && b.tophash[i] != empty { 165 return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true 166 } 167 } 168 } 169 return unsafe.Pointer(&zeroVal[0]), false 170 } 171 172 func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { 173 if raceenabled && h != nil { 174 callerpc := getcallerpc() 175 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr)) 176 } 177 if h == nil || h.count == 0 { 178 return unsafe.Pointer(&zeroVal[0]) 179 } 180 if h.flags&hashWriting != 0 { 181 throw("concurrent map read and map write") 182 } 183 key := stringStructOf(&ky) 184 if h.B == 0 { 185 // One-bucket table. 186 b := (*bmap)(h.buckets) 187 if key.len < 32 { 188 // short key, doing lots of comparisons is ok 189 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 190 k := (*stringStruct)(kptr) 191 if k.len != key.len || b.tophash[i] == empty { 192 continue 193 } 194 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 195 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 196 } 197 } 198 return unsafe.Pointer(&zeroVal[0]) 199 } 200 // long key, try not to do more comparisons than necessary 201 keymaybe := uintptr(bucketCnt) 202 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 203 k := (*stringStruct)(kptr) 204 if k.len != key.len || b.tophash[i] == empty { 205 continue 206 } 207 if k.str == key.str { 208 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 209 } 210 // check first 4 bytes 211 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 212 continue 213 } 214 // check last 4 bytes 215 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 216 continue 217 } 218 if keymaybe != bucketCnt { 219 // Two keys are potential matches. Use hash to distinguish them. 220 goto dohash 221 } 222 keymaybe = i 223 } 224 if keymaybe != bucketCnt { 225 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize)) 226 if memequal(k.str, key.str, uintptr(key.len)) { 227 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)) 228 } 229 } 230 return unsafe.Pointer(&zeroVal[0]) 231 } 232 dohash: 233 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 234 m := bucketMask(h.B) 235 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 236 if c := h.oldbuckets; c != nil { 237 if !h.sameSizeGrow() { 238 // There used to be half as many buckets; mask down one more power of two. 239 m >>= 1 240 } 241 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 242 if !evacuated(oldb) { 243 b = oldb 244 } 245 } 246 top := tophash(hash) 247 for ; b != nil; b = b.overflow(t) { 248 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 249 k := (*stringStruct)(kptr) 250 if k.len != key.len || b.tophash[i] != top { 251 continue 252 } 253 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 254 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 255 } 256 } 257 } 258 return unsafe.Pointer(&zeroVal[0]) 259 } 260 261 func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { 262 if raceenabled && h != nil { 263 callerpc := getcallerpc() 264 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr)) 265 } 266 if h == nil || h.count == 0 { 267 return unsafe.Pointer(&zeroVal[0]), false 268 } 269 if h.flags&hashWriting != 0 { 270 throw("concurrent map read and map write") 271 } 272 key := stringStructOf(&ky) 273 if h.B == 0 { 274 // One-bucket table. 275 b := (*bmap)(h.buckets) 276 if key.len < 32 { 277 // short key, doing lots of comparisons is ok 278 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 279 k := (*stringStruct)(kptr) 280 if k.len != key.len || b.tophash[i] == empty { 281 continue 282 } 283 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 284 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true 285 } 286 } 287 return unsafe.Pointer(&zeroVal[0]), false 288 } 289 // long key, try not to do more comparisons than necessary 290 keymaybe := uintptr(bucketCnt) 291 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 292 k := (*stringStruct)(kptr) 293 if k.len != key.len || b.tophash[i] == empty { 294 continue 295 } 296 if k.str == key.str { 297 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true 298 } 299 // check first 4 bytes 300 if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { 301 continue 302 } 303 // check last 4 bytes 304 if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { 305 continue 306 } 307 if keymaybe != bucketCnt { 308 // Two keys are potential matches. Use hash to distinguish them. 309 goto dohash 310 } 311 keymaybe = i 312 } 313 if keymaybe != bucketCnt { 314 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize)) 315 if memequal(k.str, key.str, uintptr(key.len)) { 316 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true 317 } 318 } 319 return unsafe.Pointer(&zeroVal[0]), false 320 } 321 dohash: 322 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 323 m := bucketMask(h.B) 324 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 325 if c := h.oldbuckets; c != nil { 326 if !h.sameSizeGrow() { 327 // There used to be half as many buckets; mask down one more power of two. 328 m >>= 1 329 } 330 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 331 if !evacuated(oldb) { 332 b = oldb 333 } 334 } 335 top := tophash(hash) 336 for ; b != nil; b = b.overflow(t) { 337 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 338 k := (*stringStruct)(kptr) 339 if k.len != key.len || b.tophash[i] != top { 340 continue 341 } 342 if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { 343 return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true 344 } 345 } 346 } 347 return unsafe.Pointer(&zeroVal[0]), false 348 } 349 350 func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { 351 if h == nil { 352 panic(plainError("assignment to entry in nil map")) 353 } 354 if raceenabled { 355 callerpc := getcallerpc() 356 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32)) 357 } 358 if h.flags&hashWriting != 0 { 359 throw("concurrent map writes") 360 } 361 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 362 363 // Set hashWriting after calling alg.hash for consistency with mapassign. 364 h.flags |= hashWriting 365 366 if h.buckets == nil { 367 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 368 } 369 370 again: 371 bucket := hash & bucketMask(h.B) 372 if h.growing() { 373 growWork_fast32(t, h, bucket) 374 } 375 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 376 377 var inserti *uint8 378 var insertk unsafe.Pointer 379 var val unsafe.Pointer 380 for { 381 for i := uintptr(0); i < bucketCnt; i++ { 382 if b.tophash[i] == empty { 383 if inserti == nil { 384 inserti = &b.tophash[i] 385 insertk = add(unsafe.Pointer(b), dataOffset+i*4) 386 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 387 } 388 continue 389 } 390 k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))) 391 if k != key { 392 continue 393 } 394 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 395 goto done 396 } 397 ovf := b.overflow(t) 398 if ovf == nil { 399 break 400 } 401 b = ovf 402 } 403 404 // Did not find mapping for key. Allocate new cell & add entry. 405 406 // If we hit the max load factor or we have too many overflow buckets, 407 // and we're not already in the middle of growing, start growing. 408 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 409 hashGrow(t, h) 410 goto again // Growing the table invalidates everything, so try again 411 } 412 413 if inserti == nil { 414 // all current buckets are full, allocate a new one. 415 newb := h.newoverflow(t, b) 416 inserti = &newb.tophash[0] 417 insertk = add(unsafe.Pointer(newb), dataOffset) 418 val = add(insertk, bucketCnt*4) 419 } 420 421 // store new key at insert position 422 if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { 423 writebarrierptr((*uintptr)(insertk), uintptr(key)) 424 } else { 425 *(*uint32)(insertk) = key 426 } 427 428 *inserti = tophash(hash) 429 h.count++ 430 431 done: 432 if h.flags&hashWriting == 0 { 433 throw("concurrent map writes") 434 } 435 h.flags &^= hashWriting 436 return val 437 } 438 439 func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { 440 if h == nil { 441 panic(plainError("assignment to entry in nil map")) 442 } 443 if raceenabled { 444 callerpc := getcallerpc() 445 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64)) 446 } 447 if h.flags&hashWriting != 0 { 448 throw("concurrent map writes") 449 } 450 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 451 452 // Set hashWriting after calling alg.hash for consistency with mapassign. 453 h.flags |= hashWriting 454 455 if h.buckets == nil { 456 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 457 } 458 459 again: 460 bucket := hash & bucketMask(h.B) 461 if h.growing() { 462 growWork_fast64(t, h, bucket) 463 } 464 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 465 466 var inserti *uint8 467 var insertk unsafe.Pointer 468 var val unsafe.Pointer 469 for { 470 for i := uintptr(0); i < bucketCnt; i++ { 471 if b.tophash[i] == empty { 472 if inserti == nil { 473 inserti = &b.tophash[i] 474 insertk = add(unsafe.Pointer(b), dataOffset+i*8) 475 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 476 } 477 continue 478 } 479 k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))) 480 if k != key { 481 continue 482 } 483 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 484 goto done 485 } 486 ovf := b.overflow(t) 487 if ovf == nil { 488 break 489 } 490 b = ovf 491 } 492 493 // Did not find mapping for key. Allocate new cell & add entry. 494 495 // If we hit the max load factor or we have too many overflow buckets, 496 // and we're not already in the middle of growing, start growing. 497 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 498 hashGrow(t, h) 499 goto again // Growing the table invalidates everything, so try again 500 } 501 502 if inserti == nil { 503 // all current buckets are full, allocate a new one. 504 newb := h.newoverflow(t, b) 505 inserti = &newb.tophash[0] 506 insertk = add(unsafe.Pointer(newb), dataOffset) 507 val = add(insertk, bucketCnt*8) 508 } 509 510 // store new key at insert position 511 if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { 512 if sys.PtrSize == 8 { 513 writebarrierptr((*uintptr)(insertk), uintptr(key)) 514 } else { 515 // There are three ways to squeeze at least one 32 bit pointer into 64 bits. 516 // Give up and call typedmemmove. 517 typedmemmove(t.key, insertk, unsafe.Pointer(&key)) 518 } 519 } else { 520 *(*uint64)(insertk) = key 521 } 522 523 *inserti = tophash(hash) 524 h.count++ 525 526 done: 527 if h.flags&hashWriting == 0 { 528 throw("concurrent map writes") 529 } 530 h.flags &^= hashWriting 531 return val 532 } 533 534 func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { 535 if h == nil { 536 panic(plainError("assignment to entry in nil map")) 537 } 538 if raceenabled { 539 callerpc := getcallerpc() 540 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr)) 541 } 542 if h.flags&hashWriting != 0 { 543 throw("concurrent map writes") 544 } 545 key := stringStructOf(&ky) 546 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 547 548 // Set hashWriting after calling alg.hash for consistency with mapassign. 549 h.flags |= hashWriting 550 551 if h.buckets == nil { 552 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 553 } 554 555 again: 556 bucket := hash & bucketMask(h.B) 557 if h.growing() { 558 growWork_faststr(t, h, bucket) 559 } 560 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 561 top := tophash(hash) 562 563 var inserti *uint8 564 var insertk unsafe.Pointer 565 var val unsafe.Pointer 566 for { 567 for i := uintptr(0); i < bucketCnt; i++ { 568 if b.tophash[i] != top { 569 if b.tophash[i] == empty && inserti == nil { 570 inserti = &b.tophash[i] 571 insertk = add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize) 572 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 573 } 574 continue 575 } 576 k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize)) 577 if k.len != key.len { 578 continue 579 } 580 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { 581 continue 582 } 583 // already have a mapping for key. Update it. 584 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 585 goto done 586 } 587 ovf := b.overflow(t) 588 if ovf == nil { 589 break 590 } 591 b = ovf 592 } 593 594 // Did not find mapping for key. Allocate new cell & add entry. 595 596 // If we hit the max load factor or we have too many overflow buckets, 597 // and we're not already in the middle of growing, start growing. 598 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 599 hashGrow(t, h) 600 goto again // Growing the table invalidates everything, so try again 601 } 602 603 if inserti == nil { 604 // all current buckets are full, allocate a new one. 605 newb := h.newoverflow(t, b) 606 inserti = &newb.tophash[0] 607 insertk = add(unsafe.Pointer(newb), dataOffset) 608 val = add(insertk, bucketCnt*2*sys.PtrSize) 609 } 610 611 // store new key at insert position 612 *((*stringStruct)(insertk)) = *key 613 *inserti = top 614 h.count++ 615 616 done: 617 if h.flags&hashWriting == 0 { 618 throw("concurrent map writes") 619 } 620 h.flags &^= hashWriting 621 return val 622 } 623 624 func mapdelete_fast32(t *maptype, h *hmap, key uint32) { 625 if raceenabled && h != nil { 626 callerpc := getcallerpc() 627 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32)) 628 } 629 if h == nil || h.count == 0 { 630 return 631 } 632 if h.flags&hashWriting != 0 { 633 throw("concurrent map writes") 634 } 635 636 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 637 638 // Set hashWriting after calling alg.hash for consistency with mapdelete 639 h.flags |= hashWriting 640 641 bucket := hash & bucketMask(h.B) 642 if h.growing() { 643 growWork_fast32(t, h, bucket) 644 } 645 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 646 search: 647 for ; b != nil; b = b.overflow(t) { 648 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { 649 if key != *(*uint32)(k) || b.tophash[i] == empty { 650 continue 651 } 652 // Only clear key if there are pointers in it. 653 if t.key.kind&kindNoPointers == 0 { 654 memclrHasPointers(k, t.key.size) 655 } 656 // Only clear value if there are pointers in it. 657 if t.elem.kind&kindNoPointers == 0 { 658 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)) 659 memclrHasPointers(v, t.elem.size) 660 } 661 b.tophash[i] = empty 662 h.count-- 663 break search 664 } 665 } 666 667 if h.flags&hashWriting == 0 { 668 throw("concurrent map writes") 669 } 670 h.flags &^= hashWriting 671 } 672 673 func mapdelete_fast64(t *maptype, h *hmap, key uint64) { 674 if raceenabled && h != nil { 675 callerpc := getcallerpc() 676 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64)) 677 } 678 if h == nil || h.count == 0 { 679 return 680 } 681 if h.flags&hashWriting != 0 { 682 throw("concurrent map writes") 683 } 684 685 hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0)) 686 687 // Set hashWriting after calling alg.hash for consistency with mapdelete 688 h.flags |= hashWriting 689 690 bucket := hash & bucketMask(h.B) 691 if h.growing() { 692 growWork_fast64(t, h, bucket) 693 } 694 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 695 search: 696 for ; b != nil; b = b.overflow(t) { 697 for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { 698 if key != *(*uint64)(k) || b.tophash[i] == empty { 699 continue 700 } 701 // Only clear key if there are pointers in it. 702 if t.key.kind&kindNoPointers == 0 { 703 memclrHasPointers(k, t.key.size) 704 } 705 // Only clear value if there are pointers in it. 706 if t.elem.kind&kindNoPointers == 0 { 707 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)) 708 memclrHasPointers(v, t.elem.size) 709 } 710 b.tophash[i] = empty 711 h.count-- 712 break search 713 } 714 } 715 716 if h.flags&hashWriting == 0 { 717 throw("concurrent map writes") 718 } 719 h.flags &^= hashWriting 720 } 721 722 func mapdelete_faststr(t *maptype, h *hmap, ky string) { 723 if raceenabled && h != nil { 724 callerpc := getcallerpc() 725 racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr)) 726 } 727 if h == nil || h.count == 0 { 728 return 729 } 730 if h.flags&hashWriting != 0 { 731 throw("concurrent map writes") 732 } 733 734 key := stringStructOf(&ky) 735 hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0)) 736 737 // Set hashWriting after calling alg.hash for consistency with mapdelete 738 h.flags |= hashWriting 739 740 bucket := hash & bucketMask(h.B) 741 if h.growing() { 742 growWork_faststr(t, h, bucket) 743 } 744 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 745 top := tophash(hash) 746 search: 747 for ; b != nil; b = b.overflow(t) { 748 for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*sys.PtrSize) { 749 k := (*stringStruct)(kptr) 750 if k.len != key.len || b.tophash[i] != top { 751 continue 752 } 753 if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) { 754 continue 755 } 756 // Clear key's pointer. 757 k.str = nil 758 // Only clear value if there are pointers in it. 759 if t.elem.kind&kindNoPointers == 0 { 760 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)) 761 memclrHasPointers(v, t.elem.size) 762 } 763 b.tophash[i] = empty 764 h.count-- 765 break search 766 } 767 } 768 769 if h.flags&hashWriting == 0 { 770 throw("concurrent map writes") 771 } 772 h.flags &^= hashWriting 773 } 774 775 func growWork_fast32(t *maptype, h *hmap, bucket uintptr) { 776 // make sure we evacuate the oldbucket corresponding 777 // to the bucket we're about to use 778 evacuate_fast32(t, h, bucket&h.oldbucketmask()) 779 780 // evacuate one more oldbucket to make progress on growing 781 if h.growing() { 782 evacuate_fast32(t, h, h.nevacuate) 783 } 784 } 785 786 func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { 787 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 788 newbit := h.noldbuckets() 789 if !evacuated(b) { 790 // TODO: reuse overflow buckets instead of using new ones, if there 791 // is no iterator using the old buckets. (If !oldIterator.) 792 793 // xy contains the x and y (low and high) evacuation destinations. 794 var xy [2]evacDst 795 x := &xy[0] 796 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 797 x.k = add(unsafe.Pointer(x.b), dataOffset) 798 x.v = add(x.k, bucketCnt*4) 799 800 if !h.sameSizeGrow() { 801 // Only calculate y pointers if we're growing bigger. 802 // Otherwise GC can see bad pointers. 803 y := &xy[1] 804 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 805 y.k = add(unsafe.Pointer(y.b), dataOffset) 806 y.v = add(y.k, bucketCnt*4) 807 } 808 809 for ; b != nil; b = b.overflow(t) { 810 k := add(unsafe.Pointer(b), dataOffset) 811 v := add(k, bucketCnt*4) 812 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 4), add(v, uintptr(t.valuesize)) { 813 top := b.tophash[i] 814 if top == empty { 815 b.tophash[i] = evacuatedEmpty 816 continue 817 } 818 if top < minTopHash { 819 throw("bad map state") 820 } 821 var useY uint8 822 if !h.sameSizeGrow() { 823 // Compute hash to make our evacuation decision (whether we need 824 // to send this key/value to bucket x or bucket y). 825 hash := t.key.alg.hash(k, uintptr(h.hash0)) 826 if hash&newbit != 0 { 827 useY = 1 828 } 829 } 830 831 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap 832 dst := &xy[useY] // evacuation destination 833 834 if dst.i == bucketCnt { 835 dst.b = h.newoverflow(t, dst.b) 836 dst.i = 0 837 dst.k = add(unsafe.Pointer(dst.b), dataOffset) 838 dst.v = add(dst.k, bucketCnt*4) 839 } 840 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check 841 842 // Copy key. 843 if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { 844 writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k)) 845 } else { 846 *(*uint32)(dst.k) = *(*uint32)(k) 847 } 848 849 typedmemmove(t.elem, dst.v, v) 850 dst.i++ 851 // These updates might push these pointers past the end of the 852 // key or value arrays. That's ok, as we have the overflow pointer 853 // at the end of the bucket to protect against pointing past the 854 // end of the bucket. 855 dst.k = add(dst.k, 4) 856 dst.v = add(dst.v, uintptr(t.valuesize)) 857 } 858 } 859 // Unlink the overflow buckets & clear key/value to help GC. 860 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 { 861 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) 862 // Preserve b.tophash because the evacuation 863 // state is maintained there. 864 ptr := add(b, dataOffset) 865 n := uintptr(t.bucketsize) - dataOffset 866 memclrHasPointers(ptr, n) 867 } 868 } 869 870 if oldbucket == h.nevacuate { 871 advanceEvacuationMark(h, t, newbit) 872 } 873 } 874 875 func growWork_fast64(t *maptype, h *hmap, bucket uintptr) { 876 // make sure we evacuate the oldbucket corresponding 877 // to the bucket we're about to use 878 evacuate_fast64(t, h, bucket&h.oldbucketmask()) 879 880 // evacuate one more oldbucket to make progress on growing 881 if h.growing() { 882 evacuate_fast64(t, h, h.nevacuate) 883 } 884 } 885 886 func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { 887 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 888 newbit := h.noldbuckets() 889 if !evacuated(b) { 890 // TODO: reuse overflow buckets instead of using new ones, if there 891 // is no iterator using the old buckets. (If !oldIterator.) 892 893 // xy contains the x and y (low and high) evacuation destinations. 894 var xy [2]evacDst 895 x := &xy[0] 896 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 897 x.k = add(unsafe.Pointer(x.b), dataOffset) 898 x.v = add(x.k, bucketCnt*8) 899 900 if !h.sameSizeGrow() { 901 // Only calculate y pointers if we're growing bigger. 902 // Otherwise GC can see bad pointers. 903 y := &xy[1] 904 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 905 y.k = add(unsafe.Pointer(y.b), dataOffset) 906 y.v = add(y.k, bucketCnt*8) 907 } 908 909 for ; b != nil; b = b.overflow(t) { 910 k := add(unsafe.Pointer(b), dataOffset) 911 v := add(k, bucketCnt*8) 912 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) { 913 top := b.tophash[i] 914 if top == empty { 915 b.tophash[i] = evacuatedEmpty 916 continue 917 } 918 if top < minTopHash { 919 throw("bad map state") 920 } 921 var useY uint8 922 if !h.sameSizeGrow() { 923 // Compute hash to make our evacuation decision (whether we need 924 // to send this key/value to bucket x or bucket y). 925 hash := t.key.alg.hash(k, uintptr(h.hash0)) 926 if hash&newbit != 0 { 927 useY = 1 928 } 929 } 930 931 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap 932 dst := &xy[useY] // evacuation destination 933 934 if dst.i == bucketCnt { 935 dst.b = h.newoverflow(t, dst.b) 936 dst.i = 0 937 dst.k = add(unsafe.Pointer(dst.b), dataOffset) 938 dst.v = add(dst.k, bucketCnt*8) 939 } 940 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check 941 942 // Copy key. 943 if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled { 944 if sys.PtrSize == 8 { 945 writebarrierptr((*uintptr)(dst.k), *(*uintptr)(k)) 946 } else { 947 // There are three ways to squeeze at least one 32 bit pointer into 64 bits. 948 // Give up and call typedmemmove. 949 typedmemmove(t.key, dst.k, k) 950 } 951 } else { 952 *(*uint64)(dst.k) = *(*uint64)(k) 953 } 954 955 typedmemmove(t.elem, dst.v, v) 956 dst.i++ 957 // These updates might push these pointers past the end of the 958 // key or value arrays. That's ok, as we have the overflow pointer 959 // at the end of the bucket to protect against pointing past the 960 // end of the bucket. 961 dst.k = add(dst.k, 8) 962 dst.v = add(dst.v, uintptr(t.valuesize)) 963 } 964 } 965 // Unlink the overflow buckets & clear key/value to help GC. 966 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 { 967 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) 968 // Preserve b.tophash because the evacuation 969 // state is maintained there. 970 ptr := add(b, dataOffset) 971 n := uintptr(t.bucketsize) - dataOffset 972 memclrHasPointers(ptr, n) 973 } 974 } 975 976 if oldbucket == h.nevacuate { 977 advanceEvacuationMark(h, t, newbit) 978 } 979 } 980 981 func growWork_faststr(t *maptype, h *hmap, bucket uintptr) { 982 // make sure we evacuate the oldbucket corresponding 983 // to the bucket we're about to use 984 evacuate_faststr(t, h, bucket&h.oldbucketmask()) 985 986 // evacuate one more oldbucket to make progress on growing 987 if h.growing() { 988 evacuate_faststr(t, h, h.nevacuate) 989 } 990 } 991 992 func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { 993 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 994 newbit := h.noldbuckets() 995 if !evacuated(b) { 996 // TODO: reuse overflow buckets instead of using new ones, if there 997 // is no iterator using the old buckets. (If !oldIterator.) 998 999 // xy contains the x and y (low and high) evacuation destinations. 1000 var xy [2]evacDst 1001 x := &xy[0] 1002 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 1003 x.k = add(unsafe.Pointer(x.b), dataOffset) 1004 x.v = add(x.k, bucketCnt*2*sys.PtrSize) 1005 1006 if !h.sameSizeGrow() { 1007 // Only calculate y pointers if we're growing bigger. 1008 // Otherwise GC can see bad pointers. 1009 y := &xy[1] 1010 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 1011 y.k = add(unsafe.Pointer(y.b), dataOffset) 1012 y.v = add(y.k, bucketCnt*2*sys.PtrSize) 1013 } 1014 1015 for ; b != nil; b = b.overflow(t) { 1016 k := add(unsafe.Pointer(b), dataOffset) 1017 v := add(k, bucketCnt*2*sys.PtrSize) 1018 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) { 1019 top := b.tophash[i] 1020 if top == empty { 1021 b.tophash[i] = evacuatedEmpty 1022 continue 1023 } 1024 if top < minTopHash { 1025 throw("bad map state") 1026 } 1027 var useY uint8 1028 if !h.sameSizeGrow() { 1029 // Compute hash to make our evacuation decision (whether we need 1030 // to send this key/value to bucket x or bucket y). 1031 hash := t.key.alg.hash(k, uintptr(h.hash0)) 1032 if hash&newbit != 0 { 1033 useY = 1 1034 } 1035 } 1036 1037 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap 1038 dst := &xy[useY] // evacuation destination 1039 1040 if dst.i == bucketCnt { 1041 dst.b = h.newoverflow(t, dst.b) 1042 dst.i = 0 1043 dst.k = add(unsafe.Pointer(dst.b), dataOffset) 1044 dst.v = add(dst.k, bucketCnt*2*sys.PtrSize) 1045 } 1046 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check 1047 1048 // Copy key. 1049 *(*string)(dst.k) = *(*string)(k) 1050 1051 typedmemmove(t.elem, dst.v, v) 1052 dst.i++ 1053 // These updates might push these pointers past the end of the 1054 // key or value arrays. That's ok, as we have the overflow pointer 1055 // at the end of the bucket to protect against pointing past the 1056 // end of the bucket. 1057 dst.k = add(dst.k, 2*sys.PtrSize) 1058 dst.v = add(dst.v, uintptr(t.valuesize)) 1059 } 1060 } 1061 // Unlink the overflow buckets & clear key/value to help GC. 1062 // Unlink the overflow buckets & clear key/value to help GC. 1063 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 { 1064 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) 1065 // Preserve b.tophash because the evacuation 1066 // state is maintained there. 1067 ptr := add(b, dataOffset) 1068 n := uintptr(t.bucketsize) - dataOffset 1069 memclrHasPointers(ptr, n) 1070 } 1071 } 1072 1073 if oldbucket == h.nevacuate { 1074 advanceEvacuationMark(h, t, newbit) 1075 } 1076 }