github.com/zach-klippenstein/go@v0.0.0-20150108044943-fcfbeb3adf58/src/runtime/hashmap.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go's map type. 8 // 9 // A map is just a hash table. The data is arranged 10 // into an array of buckets. Each bucket contains up to 11 // 8 key/value pairs. The low-order bits of the hash are 12 // used to select a bucket. Each bucket contains a few 13 // high-order bits of each hash to distinguish the entries 14 // within a single bucket. 15 // 16 // If more than 8 keys hash to a bucket, we chain on 17 // extra buckets. 18 // 19 // When the hashtable grows, we allocate a new array 20 // of buckets twice as big. Buckets are incrementally 21 // copied from the old bucket array to the new bucket array. 22 // 23 // Map iterators walk through the array of buckets and 24 // return the keys in walk order (bucket #, then overflow 25 // chain order, then bucket index). To maintain iteration 26 // semantics, we never move keys within their bucket (if 27 // we did, keys might be returned 0 or 2 times). When 28 // growing the table, iterators remain iterating through the 29 // old table and must check the new table if the bucket 30 // they are iterating through has been moved ("evacuated") 31 // to the new table. 32 33 // Picking loadFactor: too large and we have lots of overflow 34 // buckets, too small and we waste a lot of space. I wrote 35 // a simple program to check some stats for different loads: 36 // (64-bit, 8 byte keys and values) 37 // loadFactor %overflow bytes/entry hitprobe missprobe 38 // 4.00 2.13 20.77 3.00 4.00 39 // 4.50 4.05 17.30 3.25 4.50 40 // 5.00 6.85 14.77 3.50 5.00 41 // 5.50 10.55 12.94 3.75 5.50 42 // 6.00 15.27 11.67 4.00 6.00 43 // 6.50 20.90 10.79 4.25 6.50 44 // 7.00 27.14 10.15 4.50 7.00 45 // 7.50 34.03 9.73 4.75 7.50 46 // 8.00 41.10 9.40 5.00 8.00 47 // 48 // %overflow = percentage of buckets which have an overflow bucket 49 // bytes/entry = overhead bytes used per key/value pair 50 // hitprobe = # of entries to check when looking up a present key 51 // missprobe = # of entries to check when looking up an absent key 52 // 53 // Keep in mind this data is for maximally loaded tables, i.e. just 54 // before the table grows. Typical tables will be somewhat less loaded. 55 56 import ( 57 "unsafe" 58 ) 59 60 const ( 61 // Maximum number of key/value pairs a bucket can hold. 62 bucketCntBits = 3 63 bucketCnt = 1 << bucketCntBits 64 65 // Maximum average load of a bucket that triggers growth. 66 loadFactor = 6.5 67 68 // Maximum key or value size to keep inline (instead of mallocing per element). 69 // Must fit in a uint8. 70 // Fast versions cannot handle big values - the cutoff size for 71 // fast versions in ../../cmd/gc/walk.c must be at most this value. 72 maxKeySize = 128 73 maxValueSize = 128 74 75 // data offset should be the size of the bmap struct, but needs to be 76 // aligned correctly. For amd64p32 this means 64-bit alignment 77 // even though pointers are 32 bit. 78 dataOffset = unsafe.Offsetof(struct { 79 b bmap 80 v int64 81 }{}.v) 82 83 // Possible tophash values. We reserve a few possibilities for special marks. 84 // Each bucket (including its overflow buckets, if any) will have either all or none of its 85 // entries in the evacuated* states (except during the evacuate() method, which only happens 86 // during map writes and thus no one else can observe the map during that time). 87 empty = 0 // cell is empty 88 evacuatedEmpty = 1 // cell is empty, bucket is evacuated. 89 evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table. 90 evacuatedY = 3 // same as above, but evacuated to second half of larger table. 91 minTopHash = 4 // minimum tophash for a normal filled cell. 92 93 // flags 94 iterator = 1 // there may be an iterator using buckets 95 oldIterator = 2 // there may be an iterator using oldbuckets 96 97 // sentinel bucket ID for iterator checks 98 noCheck = 1<<(8*ptrSize) - 1 99 100 // trigger a garbage collection at every alloc called from this code 101 checkgc = false 102 ) 103 104 // A header for a Go map. 105 type hmap struct { 106 // Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and 107 // ../reflect/type.go. Don't change this structure without also changing that code! 108 count int // # live cells == size of map. Must be first (used by len() builtin) 109 flags uint32 110 hash0 uint32 // hash seed 111 B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) 112 113 buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. 114 oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing 115 nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) 116 } 117 118 // A bucket for a Go map. 119 type bmap struct { 120 tophash [bucketCnt]uint8 121 // Followed by bucketCnt keys and then bucketCnt values. 122 // NOTE: packing all the keys together and then all the values together makes the 123 // code a bit more complicated than alternating key/value/key/value/... but it allows 124 // us to eliminate padding which would be needed for, e.g., map[int64]int8. 125 // Followed by an overflow pointer. 126 } 127 128 // A hash iteration structure. 129 // If you modify hiter, also change cmd/gc/reflect.c to indicate 130 // the layout of this structure. 131 type hiter struct { 132 key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c). 133 value unsafe.Pointer // Must be in second position (see cmd/gc/range.c). 134 t *maptype 135 h *hmap 136 buckets unsafe.Pointer // bucket ptr at hash_iter initialization time 137 bptr *bmap // current bucket 138 startBucket uintptr // bucket iteration started at 139 offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) 140 wrapped bool // already wrapped around from end of bucket array to beginning 141 B uint8 142 i uint8 143 bucket uintptr 144 checkBucket uintptr 145 } 146 147 func evacuated(b *bmap) bool { 148 h := b.tophash[0] 149 return h > empty && h < minTopHash 150 } 151 152 func (b *bmap) overflow(t *maptype) *bmap { 153 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) 154 } 155 func (b *bmap) setoverflow(t *maptype, ovf *bmap) { 156 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) = ovf 157 } 158 159 func makemap(t *maptype, hint int64) *hmap { 160 if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) { 161 throw("bad hmap size") 162 } 163 164 if hint < 0 || int64(int32(hint)) != hint { 165 panic("makemap: size out of range") 166 // TODO: make hint an int, then none of this nonsense 167 } 168 169 if !ismapkey(t.key) { 170 throw("runtime.makemap: unsupported map key type") 171 } 172 173 // check compiler's and reflect's math 174 if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) || 175 t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { 176 throw("key size wrong") 177 } 178 if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) || 179 t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { 180 throw("value size wrong") 181 } 182 183 // invariants we depend on. We should probably check these at compile time 184 // somewhere, but for now we'll do it here. 185 if t.key.align > bucketCnt { 186 throw("key align too big") 187 } 188 if t.elem.align > bucketCnt { 189 throw("value align too big") 190 } 191 if uintptr(t.key.size)%uintptr(t.key.align) != 0 { 192 throw("key size not a multiple of key align") 193 } 194 if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 { 195 throw("value size not a multiple of value align") 196 } 197 if bucketCnt < 8 { 198 throw("bucketsize too small for proper alignment") 199 } 200 if dataOffset%uintptr(t.key.align) != 0 { 201 throw("need padding in bucket (key)") 202 } 203 if dataOffset%uintptr(t.elem.align) != 0 { 204 throw("need padding in bucket (value)") 205 } 206 207 // find size parameter which will hold the requested # of elements 208 B := uint8(0) 209 for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ { 210 } 211 212 // allocate initial hash table 213 // if B == 0, the buckets field is allocated lazily later (in mapassign) 214 // If hint is large zeroing this memory could take a while. 215 var buckets unsafe.Pointer 216 if B != 0 { 217 if checkgc { 218 memstats.next_gc = memstats.heap_alloc 219 } 220 buckets = newarray(t.bucket, uintptr(1)<<B) 221 } 222 223 // initialize Hmap 224 if checkgc { 225 memstats.next_gc = memstats.heap_alloc 226 } 227 h := (*hmap)(newobject(t.hmap)) 228 h.count = 0 229 h.B = B 230 h.flags = 0 231 h.hash0 = fastrand1() 232 h.buckets = buckets 233 h.oldbuckets = nil 234 h.nevacuate = 0 235 236 return h 237 } 238 239 // mapaccess1 returns a pointer to h[key]. Never returns nil, instead 240 // it will return a reference to the zero object for the value type if 241 // the key is not in the map. 242 // NOTE: The returned pointer may keep the whole map live, so don't 243 // hold onto it for very long. 244 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 245 if raceenabled && h != nil { 246 callerpc := getcallerpc(unsafe.Pointer(&t)) 247 pc := funcPC(mapaccess1) 248 racereadpc(unsafe.Pointer(h), callerpc, pc) 249 raceReadObjectPC(t.key, key, callerpc, pc) 250 } 251 if h == nil || h.count == 0 { 252 return unsafe.Pointer(t.elem.zero) 253 } 254 alg := t.key.alg 255 hash := alg.hash(key, uintptr(h.hash0)) 256 m := uintptr(1)<<h.B - 1 257 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 258 if c := h.oldbuckets; c != nil { 259 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) 260 if !evacuated(oldb) { 261 b = oldb 262 } 263 } 264 top := uint8(hash >> (ptrSize*8 - 8)) 265 if top < minTopHash { 266 top += minTopHash 267 } 268 for { 269 for i := uintptr(0); i < bucketCnt; i++ { 270 if b.tophash[i] != top { 271 continue 272 } 273 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 274 if t.indirectkey { 275 k = *((*unsafe.Pointer)(k)) 276 } 277 if alg.equal(key, k) { 278 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 279 if t.indirectvalue { 280 v = *((*unsafe.Pointer)(v)) 281 } 282 return v 283 } 284 } 285 b = b.overflow(t) 286 if b == nil { 287 return unsafe.Pointer(t.elem.zero) 288 } 289 } 290 } 291 292 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { 293 if raceenabled && h != nil { 294 callerpc := getcallerpc(unsafe.Pointer(&t)) 295 pc := funcPC(mapaccess2) 296 racereadpc(unsafe.Pointer(h), callerpc, pc) 297 raceReadObjectPC(t.key, key, callerpc, pc) 298 } 299 if h == nil || h.count == 0 { 300 return unsafe.Pointer(t.elem.zero), false 301 } 302 alg := t.key.alg 303 hash := alg.hash(key, uintptr(h.hash0)) 304 m := uintptr(1)<<h.B - 1 305 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) 306 if c := h.oldbuckets; c != nil { 307 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) 308 if !evacuated(oldb) { 309 b = oldb 310 } 311 } 312 top := uint8(hash >> (ptrSize*8 - 8)) 313 if top < minTopHash { 314 top += minTopHash 315 } 316 for { 317 for i := uintptr(0); i < bucketCnt; i++ { 318 if b.tophash[i] != top { 319 continue 320 } 321 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 322 if t.indirectkey { 323 k = *((*unsafe.Pointer)(k)) 324 } 325 if alg.equal(key, k) { 326 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 327 if t.indirectvalue { 328 v = *((*unsafe.Pointer)(v)) 329 } 330 return v, true 331 } 332 } 333 b = b.overflow(t) 334 if b == nil { 335 return unsafe.Pointer(t.elem.zero), false 336 } 337 } 338 } 339 340 // returns both key and value. Used by map iterator 341 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { 342 if h == nil || h.count == 0 { 343 return nil, nil 344 } 345 alg := t.key.alg 346 hash := alg.hash(key, uintptr(h.hash0)) 347 m := uintptr(1)<<h.B - 1 348 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) 349 if c := h.oldbuckets; c != nil { 350 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) 351 if !evacuated(oldb) { 352 b = oldb 353 } 354 } 355 top := uint8(hash >> (ptrSize*8 - 8)) 356 if top < minTopHash { 357 top += minTopHash 358 } 359 for { 360 for i := uintptr(0); i < bucketCnt; i++ { 361 if b.tophash[i] != top { 362 continue 363 } 364 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 365 if t.indirectkey { 366 k = *((*unsafe.Pointer)(k)) 367 } 368 if alg.equal(key, k) { 369 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 370 if t.indirectvalue { 371 v = *((*unsafe.Pointer)(v)) 372 } 373 return k, v 374 } 375 } 376 b = b.overflow(t) 377 if b == nil { 378 return nil, nil 379 } 380 } 381 } 382 383 func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { 384 if h == nil { 385 panic("assignment to entry in nil map") 386 } 387 if raceenabled { 388 callerpc := getcallerpc(unsafe.Pointer(&t)) 389 pc := funcPC(mapassign1) 390 racewritepc(unsafe.Pointer(h), callerpc, pc) 391 raceReadObjectPC(t.key, key, callerpc, pc) 392 raceReadObjectPC(t.elem, val, callerpc, pc) 393 } 394 395 alg := t.key.alg 396 hash := alg.hash(key, uintptr(h.hash0)) 397 398 if h.buckets == nil { 399 if checkgc { 400 memstats.next_gc = memstats.heap_alloc 401 } 402 h.buckets = newarray(t.bucket, 1) 403 } 404 405 again: 406 bucket := hash & (uintptr(1)<<h.B - 1) 407 if h.oldbuckets != nil { 408 growWork(t, h, bucket) 409 } 410 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 411 top := uint8(hash >> (ptrSize*8 - 8)) 412 if top < minTopHash { 413 top += minTopHash 414 } 415 416 var inserti *uint8 417 var insertk unsafe.Pointer 418 var insertv unsafe.Pointer 419 for { 420 for i := uintptr(0); i < bucketCnt; i++ { 421 if b.tophash[i] != top { 422 if b.tophash[i] == empty && inserti == nil { 423 inserti = &b.tophash[i] 424 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 425 insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 426 } 427 continue 428 } 429 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 430 k2 := k 431 if t.indirectkey { 432 k2 = *((*unsafe.Pointer)(k2)) 433 } 434 if !alg.equal(key, k2) { 435 continue 436 } 437 // already have a mapping for key. Update it. 438 typedmemmove(t.key, k2, key) 439 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 440 v2 := v 441 if t.indirectvalue { 442 v2 = *((*unsafe.Pointer)(v2)) 443 } 444 typedmemmove(t.elem, v2, val) 445 return 446 } 447 ovf := b.overflow(t) 448 if ovf == nil { 449 break 450 } 451 b = ovf 452 } 453 454 // did not find mapping for key. Allocate new cell & add entry. 455 if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt { 456 hashGrow(t, h) 457 goto again // Growing the table invalidates everything, so try again 458 } 459 460 if inserti == nil { 461 // all current buckets are full, allocate a new one. 462 if checkgc { 463 memstats.next_gc = memstats.heap_alloc 464 } 465 newb := (*bmap)(newobject(t.bucket)) 466 b.setoverflow(t, newb) 467 inserti = &newb.tophash[0] 468 insertk = add(unsafe.Pointer(newb), dataOffset) 469 insertv = add(insertk, bucketCnt*uintptr(t.keysize)) 470 } 471 472 // store new key/value at insert position 473 if t.indirectkey { 474 if checkgc { 475 memstats.next_gc = memstats.heap_alloc 476 } 477 kmem := newobject(t.key) 478 *(*unsafe.Pointer)(insertk) = kmem 479 insertk = kmem 480 } 481 if t.indirectvalue { 482 if checkgc { 483 memstats.next_gc = memstats.heap_alloc 484 } 485 vmem := newobject(t.elem) 486 *(*unsafe.Pointer)(insertv) = vmem 487 insertv = vmem 488 } 489 typedmemmove(t.key, insertk, key) 490 typedmemmove(t.elem, insertv, val) 491 *inserti = top 492 h.count++ 493 } 494 495 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { 496 if raceenabled && h != nil { 497 callerpc := getcallerpc(unsafe.Pointer(&t)) 498 pc := funcPC(mapdelete) 499 racewritepc(unsafe.Pointer(h), callerpc, pc) 500 raceReadObjectPC(t.key, key, callerpc, pc) 501 } 502 if h == nil || h.count == 0 { 503 return 504 } 505 alg := t.key.alg 506 hash := alg.hash(key, uintptr(h.hash0)) 507 bucket := hash & (uintptr(1)<<h.B - 1) 508 if h.oldbuckets != nil { 509 growWork(t, h, bucket) 510 } 511 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 512 top := uint8(hash >> (ptrSize*8 - 8)) 513 if top < minTopHash { 514 top += minTopHash 515 } 516 for { 517 for i := uintptr(0); i < bucketCnt; i++ { 518 if b.tophash[i] != top { 519 continue 520 } 521 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 522 k2 := k 523 if t.indirectkey { 524 k2 = *((*unsafe.Pointer)(k2)) 525 } 526 if !alg.equal(key, k2) { 527 continue 528 } 529 memclr(k, uintptr(t.keysize)) 530 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize)) 531 memclr(v, uintptr(t.valuesize)) 532 b.tophash[i] = empty 533 h.count-- 534 return 535 } 536 b = b.overflow(t) 537 if b == nil { 538 return 539 } 540 } 541 } 542 543 func mapiterinit(t *maptype, h *hmap, it *hiter) { 544 // Clear pointer fields so garbage collector does not complain. 545 it.key = nil 546 it.value = nil 547 it.t = nil 548 it.h = nil 549 it.buckets = nil 550 it.bptr = nil 551 552 if raceenabled && h != nil { 553 callerpc := getcallerpc(unsafe.Pointer(&t)) 554 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit)) 555 } 556 557 if h == nil || h.count == 0 { 558 it.key = nil 559 it.value = nil 560 return 561 } 562 563 if unsafe.Sizeof(hiter{})/ptrSize != 10 { 564 throw("hash_iter size incorrect") // see ../../cmd/gc/reflect.c 565 } 566 it.t = t 567 it.h = h 568 569 // grab snapshot of bucket state 570 it.B = h.B 571 it.buckets = h.buckets 572 573 // decide where to start 574 r := uintptr(fastrand1()) 575 if h.B > 31-bucketCntBits { 576 r += uintptr(fastrand1()) << 31 577 } 578 it.startBucket = r & (uintptr(1)<<h.B - 1) 579 it.offset = uint8(r >> h.B & (bucketCnt - 1)) 580 581 // iterator state 582 it.bucket = it.startBucket 583 it.wrapped = false 584 it.bptr = nil 585 586 // Remember we have an iterator. 587 // Can run concurrently with another hash_iter_init(). 588 for { 589 old := h.flags 590 if old == old|iterator|oldIterator { 591 break 592 } 593 if cas(&h.flags, old, old|iterator|oldIterator) { 594 break 595 } 596 } 597 598 mapiternext(it) 599 } 600 601 func mapiternext(it *hiter) { 602 h := it.h 603 if raceenabled { 604 callerpc := getcallerpc(unsafe.Pointer(&it)) 605 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext)) 606 } 607 t := it.t 608 bucket := it.bucket 609 b := it.bptr 610 i := it.i 611 checkBucket := it.checkBucket 612 alg := t.key.alg 613 614 next: 615 if b == nil { 616 if bucket == it.startBucket && it.wrapped { 617 // end of iteration 618 it.key = nil 619 it.value = nil 620 return 621 } 622 if h.oldbuckets != nil && it.B == h.B { 623 // Iterator was started in the middle of a grow, and the grow isn't done yet. 624 // If the bucket we're looking at hasn't been filled in yet (i.e. the old 625 // bucket hasn't been evacuated) then we need to iterate through the old 626 // bucket and only return the ones that will be migrated to this bucket. 627 oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1) 628 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 629 if !evacuated(b) { 630 checkBucket = bucket 631 } else { 632 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) 633 checkBucket = noCheck 634 } 635 } else { 636 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) 637 checkBucket = noCheck 638 } 639 bucket++ 640 if bucket == uintptr(1)<<it.B { 641 bucket = 0 642 it.wrapped = true 643 } 644 i = 0 645 } 646 for ; i < bucketCnt; i++ { 647 offi := (i + it.offset) & (bucketCnt - 1) 648 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) 649 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize)) 650 if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty { 651 if checkBucket != noCheck { 652 // Special case: iterator was started during a grow and the 653 // grow is not done yet. We're working on a bucket whose 654 // oldbucket has not been evacuated yet. Or at least, it wasn't 655 // evacuated when we started the bucket. So we're iterating 656 // through the oldbucket, skipping any keys that will go 657 // to the other new bucket (each oldbucket expands to two 658 // buckets during a grow). 659 k2 := k 660 if t.indirectkey { 661 k2 = *((*unsafe.Pointer)(k2)) 662 } 663 if t.reflexivekey || alg.equal(k2, k2) { 664 // If the item in the oldbucket is not destined for 665 // the current new bucket in the iteration, skip it. 666 hash := alg.hash(k2, uintptr(h.hash0)) 667 if hash&(uintptr(1)<<it.B-1) != checkBucket { 668 continue 669 } 670 } else { 671 // Hash isn't repeatable if k != k (NaNs). We need a 672 // repeatable and randomish choice of which direction 673 // to send NaNs during evacuation. We'll use the low 674 // bit of tophash to decide which way NaNs go. 675 // NOTE: this case is why we need two evacuate tophash 676 // values, evacuatedX and evacuatedY, that differ in 677 // their low bit. 678 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { 679 continue 680 } 681 } 682 } 683 if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY { 684 // this is the golden data, we can return it. 685 if t.indirectkey { 686 k = *((*unsafe.Pointer)(k)) 687 } 688 it.key = k 689 if t.indirectvalue { 690 v = *((*unsafe.Pointer)(v)) 691 } 692 it.value = v 693 } else { 694 // The hash table has grown since the iterator was started. 695 // The golden data for this key is now somewhere else. 696 k2 := k 697 if t.indirectkey { 698 k2 = *((*unsafe.Pointer)(k2)) 699 } 700 if t.reflexivekey || alg.equal(k2, k2) { 701 // Check the current hash table for the data. 702 // This code handles the case where the key 703 // has been deleted, updated, or deleted and reinserted. 704 // NOTE: we need to regrab the key as it has potentially been 705 // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). 706 rk, rv := mapaccessK(t, h, k2) 707 if rk == nil { 708 continue // key has been deleted 709 } 710 it.key = rk 711 it.value = rv 712 } else { 713 // if key!=key then the entry can't be deleted or 714 // updated, so we can just return it. That's lucky for 715 // us because when key!=key we can't look it up 716 // successfully in the current table. 717 it.key = k2 718 if t.indirectvalue { 719 v = *((*unsafe.Pointer)(v)) 720 } 721 it.value = v 722 } 723 } 724 it.bucket = bucket 725 it.bptr = b 726 it.i = i + 1 727 it.checkBucket = checkBucket 728 return 729 } 730 } 731 b = b.overflow(t) 732 i = 0 733 goto next 734 } 735 736 func hashGrow(t *maptype, h *hmap) { 737 if h.oldbuckets != nil { 738 throw("evacuation not done in time") 739 } 740 oldbuckets := h.buckets 741 if checkgc { 742 memstats.next_gc = memstats.heap_alloc 743 } 744 newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1)) 745 flags := h.flags &^ (iterator | oldIterator) 746 if h.flags&iterator != 0 { 747 flags |= oldIterator 748 } 749 // commit the grow (atomic wrt gc) 750 h.B++ 751 h.flags = flags 752 h.oldbuckets = oldbuckets 753 h.buckets = newbuckets 754 h.nevacuate = 0 755 756 // the actual copying of the hash table data is done incrementally 757 // by growWork() and evacuate(). 758 } 759 760 func growWork(t *maptype, h *hmap, bucket uintptr) { 761 noldbuckets := uintptr(1) << (h.B - 1) 762 763 // make sure we evacuate the oldbucket corresponding 764 // to the bucket we're about to use 765 evacuate(t, h, bucket&(noldbuckets-1)) 766 767 // evacuate one more oldbucket to make progress on growing 768 if h.oldbuckets != nil { 769 evacuate(t, h, h.nevacuate) 770 } 771 } 772 773 func evacuate(t *maptype, h *hmap, oldbucket uintptr) { 774 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 775 newbit := uintptr(1) << (h.B - 1) 776 alg := t.key.alg 777 if !evacuated(b) { 778 // TODO: reuse overflow buckets instead of using new ones, if there 779 // is no iterator using the old buckets. (If !oldIterator.) 780 781 x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 782 y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 783 xi := 0 784 yi := 0 785 xk := add(unsafe.Pointer(x), dataOffset) 786 yk := add(unsafe.Pointer(y), dataOffset) 787 xv := add(xk, bucketCnt*uintptr(t.keysize)) 788 yv := add(yk, bucketCnt*uintptr(t.keysize)) 789 for ; b != nil; b = b.overflow(t) { 790 k := add(unsafe.Pointer(b), dataOffset) 791 v := add(k, bucketCnt*uintptr(t.keysize)) 792 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { 793 top := b.tophash[i] 794 if top == empty { 795 b.tophash[i] = evacuatedEmpty 796 continue 797 } 798 if top < minTopHash { 799 throw("bad map state") 800 } 801 k2 := k 802 if t.indirectkey { 803 k2 = *((*unsafe.Pointer)(k2)) 804 } 805 // Compute hash to make our evacuation decision (whether we need 806 // to send this key/value to bucket x or bucket y). 807 hash := alg.hash(k2, uintptr(h.hash0)) 808 if h.flags&iterator != 0 { 809 if !t.reflexivekey && !alg.equal(k2, k2) { 810 // If key != key (NaNs), then the hash could be (and probably 811 // will be) entirely different from the old hash. Moreover, 812 // it isn't reproducible. Reproducibility is required in the 813 // presence of iterators, as our evacuation decision must 814 // match whatever decision the iterator made. 815 // Fortunately, we have the freedom to send these keys either 816 // way. Also, tophash is meaningless for these kinds of keys. 817 // We let the low bit of tophash drive the evacuation decision. 818 // We recompute a new random tophash for the next level so 819 // these keys will get evenly distributed across all buckets 820 // after multiple grows. 821 if (top & 1) != 0 { 822 hash |= newbit 823 } else { 824 hash &^= newbit 825 } 826 top = uint8(hash >> (ptrSize*8 - 8)) 827 if top < minTopHash { 828 top += minTopHash 829 } 830 } 831 } 832 if (hash & newbit) == 0 { 833 b.tophash[i] = evacuatedX 834 if xi == bucketCnt { 835 if checkgc { 836 memstats.next_gc = memstats.heap_alloc 837 } 838 newx := (*bmap)(newobject(t.bucket)) 839 x.setoverflow(t, newx) 840 x = newx 841 xi = 0 842 xk = add(unsafe.Pointer(x), dataOffset) 843 xv = add(xk, bucketCnt*uintptr(t.keysize)) 844 } 845 x.tophash[xi] = top 846 if t.indirectkey { 847 *(*unsafe.Pointer)(xk) = k2 // copy pointer 848 } else { 849 typedmemmove(t.key, xk, k) // copy value 850 } 851 if t.indirectvalue { 852 *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v) 853 } else { 854 typedmemmove(t.elem, xv, v) 855 } 856 xi++ 857 xk = add(xk, uintptr(t.keysize)) 858 xv = add(xv, uintptr(t.valuesize)) 859 } else { 860 b.tophash[i] = evacuatedY 861 if yi == bucketCnt { 862 if checkgc { 863 memstats.next_gc = memstats.heap_alloc 864 } 865 newy := (*bmap)(newobject(t.bucket)) 866 y.setoverflow(t, newy) 867 y = newy 868 yi = 0 869 yk = add(unsafe.Pointer(y), dataOffset) 870 yv = add(yk, bucketCnt*uintptr(t.keysize)) 871 } 872 y.tophash[yi] = top 873 if t.indirectkey { 874 *(*unsafe.Pointer)(yk) = k2 875 } else { 876 typedmemmove(t.key, yk, k) 877 } 878 if t.indirectvalue { 879 *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v) 880 } else { 881 typedmemmove(t.elem, yv, v) 882 } 883 yi++ 884 yk = add(yk, uintptr(t.keysize)) 885 yv = add(yv, uintptr(t.valuesize)) 886 } 887 } 888 } 889 // Unlink the overflow buckets & clear key/value to help GC. 890 if h.flags&oldIterator == 0 { 891 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 892 memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) 893 } 894 } 895 896 // Advance evacuation mark 897 if oldbucket == h.nevacuate { 898 h.nevacuate = oldbucket + 1 899 if oldbucket+1 == newbit { // newbit == # of oldbuckets 900 // Growing is all done. Free old main bucket array. 901 h.oldbuckets = nil 902 } 903 } 904 } 905 906 func ismapkey(t *_type) bool { 907 return t.alg.hash != nil 908 } 909 910 // Reflect stubs. Called from ../reflect/asm_*.s 911 912 //go:linkname reflect_makemap reflect.makemap 913 func reflect_makemap(t *maptype) *hmap { 914 return makemap(t, 0) 915 } 916 917 //go:linkname reflect_mapaccess reflect.mapaccess 918 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 919 val, ok := mapaccess2(t, h, key) 920 if !ok { 921 // reflect wants nil for a missing element 922 val = nil 923 } 924 return val 925 } 926 927 //go:linkname reflect_mapassign reflect.mapassign 928 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { 929 mapassign1(t, h, key, val) 930 } 931 932 //go:linkname reflect_mapdelete reflect.mapdelete 933 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { 934 mapdelete(t, h, key) 935 } 936 937 //go:linkname reflect_mapiterinit reflect.mapiterinit 938 func reflect_mapiterinit(t *maptype, h *hmap) *hiter { 939 it := new(hiter) 940 mapiterinit(t, h, it) 941 return it 942 } 943 944 //go:linkname reflect_mapiternext reflect.mapiternext 945 func reflect_mapiternext(it *hiter) { 946 mapiternext(it) 947 } 948 949 //go:linkname reflect_mapiterkey reflect.mapiterkey 950 func reflect_mapiterkey(it *hiter) unsafe.Pointer { 951 return it.key 952 } 953 954 //go:linkname reflect_maplen reflect.maplen 955 func reflect_maplen(h *hmap) int { 956 if h == nil { 957 return 0 958 } 959 if raceenabled { 960 callerpc := getcallerpc(unsafe.Pointer(&h)) 961 racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen)) 962 } 963 return h.count 964 } 965 966 //go:linkname reflect_ismapkey reflect.ismapkey 967 func reflect_ismapkey(t *_type) bool { 968 return ismapkey(t) 969 }