rsc.io/go@v0.0.0-20150416155037-e040fd465409/src/runtime/hashmap.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go's map type. 8 // 9 // A map is just a hash table. The data is arranged 10 // into an array of buckets. Each bucket contains up to 11 // 8 key/value pairs. The low-order bits of the hash are 12 // used to select a bucket. Each bucket contains a few 13 // high-order bits of each hash to distinguish the entries 14 // within a single bucket. 15 // 16 // If more than 8 keys hash to a bucket, we chain on 17 // extra buckets. 18 // 19 // When the hashtable grows, we allocate a new array 20 // of buckets twice as big. Buckets are incrementally 21 // copied from the old bucket array to the new bucket array. 22 // 23 // Map iterators walk through the array of buckets and 24 // return the keys in walk order (bucket #, then overflow 25 // chain order, then bucket index). To maintain iteration 26 // semantics, we never move keys within their bucket (if 27 // we did, keys might be returned 0 or 2 times). When 28 // growing the table, iterators remain iterating through the 29 // old table and must check the new table if the bucket 30 // they are iterating through has been moved ("evacuated") 31 // to the new table. 32 33 // Picking loadFactor: too large and we have lots of overflow 34 // buckets, too small and we waste a lot of space. I wrote 35 // a simple program to check some stats for different loads: 36 // (64-bit, 8 byte keys and values) 37 // loadFactor %overflow bytes/entry hitprobe missprobe 38 // 4.00 2.13 20.77 3.00 4.00 39 // 4.50 4.05 17.30 3.25 4.50 40 // 5.00 6.85 14.77 3.50 5.00 41 // 5.50 10.55 12.94 3.75 5.50 42 // 6.00 15.27 11.67 4.00 6.00 43 // 6.50 20.90 10.79 4.25 6.50 44 // 7.00 27.14 10.15 4.50 7.00 45 // 7.50 34.03 9.73 4.75 7.50 46 // 8.00 41.10 9.40 5.00 8.00 47 // 48 // %overflow = percentage of buckets which have an overflow bucket 49 // bytes/entry = overhead bytes used per key/value pair 50 // hitprobe = # of entries to check when looking up a present key 51 // missprobe = # of entries to check when looking up an absent key 52 // 53 // Keep in mind this data is for maximally loaded tables, i.e. just 54 // before the table grows. Typical tables will be somewhat less loaded. 55 56 import ( 57 "unsafe" 58 ) 59 60 const ( 61 // Maximum number of key/value pairs a bucket can hold. 62 bucketCntBits = 3 63 bucketCnt = 1 << bucketCntBits 64 65 // Maximum average load of a bucket that triggers growth. 66 loadFactor = 6.5 67 68 // Maximum key or value size to keep inline (instead of mallocing per element). 69 // Must fit in a uint8. 70 // Fast versions cannot handle big values - the cutoff size for 71 // fast versions in ../../cmd/internal/gc/walk.go must be at most this value. 72 maxKeySize = 128 73 maxValueSize = 128 74 75 // data offset should be the size of the bmap struct, but needs to be 76 // aligned correctly. For amd64p32 this means 64-bit alignment 77 // even though pointers are 32 bit. 78 dataOffset = unsafe.Offsetof(struct { 79 b bmap 80 v int64 81 }{}.v) 82 83 // Possible tophash values. We reserve a few possibilities for special marks. 84 // Each bucket (including its overflow buckets, if any) will have either all or none of its 85 // entries in the evacuated* states (except during the evacuate() method, which only happens 86 // during map writes and thus no one else can observe the map during that time). 87 empty = 0 // cell is empty 88 evacuatedEmpty = 1 // cell is empty, bucket is evacuated. 89 evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table. 90 evacuatedY = 3 // same as above, but evacuated to second half of larger table. 91 minTopHash = 4 // minimum tophash for a normal filled cell. 92 93 // flags 94 iterator = 1 // there may be an iterator using buckets 95 oldIterator = 2 // there may be an iterator using oldbuckets 96 97 // sentinel bucket ID for iterator checks 98 noCheck = 1<<(8*ptrSize) - 1 99 ) 100 101 // A header for a Go map. 102 type hmap struct { 103 // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and 104 // ../reflect/type.go. Don't change this structure without also changing that code! 105 count int // # live cells == size of map. Must be first (used by len() builtin) 106 flags uint8 107 B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) 108 hash0 uint32 // hash seed 109 110 buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. 111 oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing 112 nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) 113 114 // If both key and value do not contain pointers and are inline, then we mark bucket 115 // type as containing no pointers. This avoids scanning such maps. 116 // However, bmap.overflow is a pointer. In order to keep overflow buckets 117 // alive, we store pointers to all overflow buckets in hmap.overflow. 118 // Overflow is used only if key and value do not contain pointers. 119 // overflow[0] contains overflow buckets for hmap.buckets. 120 // overflow[1] contains overflow buckets for hmap.oldbuckets. 121 // The first indirection allows us to reduce static size of hmap. 122 // The second indirection allows to store a pointer to the slice in hiter. 123 overflow *[2]*[]*bmap 124 } 125 126 // A bucket for a Go map. 127 type bmap struct { 128 tophash [bucketCnt]uint8 129 // Followed by bucketCnt keys and then bucketCnt values. 130 // NOTE: packing all the keys together and then all the values together makes the 131 // code a bit more complicated than alternating key/value/key/value/... but it allows 132 // us to eliminate padding which would be needed for, e.g., map[int64]int8. 133 // Followed by an overflow pointer. 134 } 135 136 // A hash iteration structure. 137 // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate 138 // the layout of this structure. 139 type hiter struct { 140 key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). 141 value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). 142 t *maptype 143 h *hmap 144 buckets unsafe.Pointer // bucket ptr at hash_iter initialization time 145 bptr *bmap // current bucket 146 overflow [2]*[]*bmap // keeps overflow buckets alive 147 startBucket uintptr // bucket iteration started at 148 offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) 149 wrapped bool // already wrapped around from end of bucket array to beginning 150 B uint8 151 i uint8 152 bucket uintptr 153 checkBucket uintptr 154 } 155 156 func evacuated(b *bmap) bool { 157 h := b.tophash[0] 158 return h > empty && h < minTopHash 159 } 160 161 func (b *bmap) overflow(t *maptype) *bmap { 162 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) 163 } 164 165 func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) { 166 if t.bucket.kind&kindNoPointers != 0 { 167 h.createOverflow() 168 *h.overflow[0] = append(*h.overflow[0], ovf) 169 } 170 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) = ovf 171 } 172 173 func (h *hmap) createOverflow() { 174 if h.overflow == nil { 175 h.overflow = new([2]*[]*bmap) 176 } 177 if h.overflow[0] == nil { 178 h.overflow[0] = new([]*bmap) 179 } 180 } 181 182 // makemap implements a Go map creation make(map[k]v, hint) 183 // If the compiler has determined that the map or the first bucket 184 // can be created on the stack, h and/or bucket may be non-nil. 185 // If h != nil, the map can be created directly in h. 186 // If bucket != nil, bucket can be used as the first bucket. 187 func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap { 188 if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) { 189 println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size) 190 throw("bad hmap size") 191 } 192 193 if hint < 0 || int64(int32(hint)) != hint { 194 panic("makemap: size out of range") 195 // TODO: make hint an int, then none of this nonsense 196 } 197 198 if !ismapkey(t.key) { 199 throw("runtime.makemap: unsupported map key type") 200 } 201 202 // check compiler's and reflect's math 203 if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) || 204 t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { 205 throw("key size wrong") 206 } 207 if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) || 208 t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { 209 throw("value size wrong") 210 } 211 212 // invariants we depend on. We should probably check these at compile time 213 // somewhere, but for now we'll do it here. 214 if t.key.align > bucketCnt { 215 throw("key align too big") 216 } 217 if t.elem.align > bucketCnt { 218 throw("value align too big") 219 } 220 if uintptr(t.key.size)%uintptr(t.key.align) != 0 { 221 throw("key size not a multiple of key align") 222 } 223 if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 { 224 throw("value size not a multiple of value align") 225 } 226 if bucketCnt < 8 { 227 throw("bucketsize too small for proper alignment") 228 } 229 if dataOffset%uintptr(t.key.align) != 0 { 230 throw("need padding in bucket (key)") 231 } 232 if dataOffset%uintptr(t.elem.align) != 0 { 233 throw("need padding in bucket (value)") 234 } 235 236 // find size parameter which will hold the requested # of elements 237 B := uint8(0) 238 for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ { 239 } 240 241 // allocate initial hash table 242 // if B == 0, the buckets field is allocated lazily later (in mapassign) 243 // If hint is large zeroing this memory could take a while. 244 buckets := bucket 245 if B != 0 { 246 buckets = newarray(t.bucket, uintptr(1)<<B) 247 } 248 249 // initialize Hmap 250 if h == nil { 251 h = (*hmap)(newobject(t.hmap)) 252 } 253 h.count = 0 254 h.B = B 255 h.flags = 0 256 h.hash0 = fastrand1() 257 h.buckets = buckets 258 h.oldbuckets = nil 259 h.nevacuate = 0 260 261 return h 262 } 263 264 // mapaccess1 returns a pointer to h[key]. Never returns nil, instead 265 // it will return a reference to the zero object for the value type if 266 // the key is not in the map. 267 // NOTE: The returned pointer may keep the whole map live, so don't 268 // hold onto it for very long. 269 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 270 if raceenabled && h != nil { 271 callerpc := getcallerpc(unsafe.Pointer(&t)) 272 pc := funcPC(mapaccess1) 273 racereadpc(unsafe.Pointer(h), callerpc, pc) 274 raceReadObjectPC(t.key, key, callerpc, pc) 275 } 276 if h == nil || h.count == 0 { 277 return unsafe.Pointer(t.elem.zero) 278 } 279 alg := t.key.alg 280 hash := alg.hash(key, uintptr(h.hash0)) 281 m := uintptr(1)<<h.B - 1 282 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 283 if c := h.oldbuckets; c != nil { 284 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) 285 if !evacuated(oldb) { 286 b = oldb 287 } 288 } 289 top := uint8(hash >> (ptrSize*8 - 8)) 290 if top < minTopHash { 291 top += minTopHash 292 } 293 for { 294 for i := uintptr(0); i < bucketCnt; i++ { 295 if b.tophash[i] != top { 296 continue 297 } 298 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 299 if t.indirectkey { 300 k = *((*unsafe.Pointer)(k)) 301 } 302 if alg.equal(key, k) { 303 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 304 if t.indirectvalue { 305 v = *((*unsafe.Pointer)(v)) 306 } 307 return v 308 } 309 } 310 b = b.overflow(t) 311 if b == nil { 312 return unsafe.Pointer(t.elem.zero) 313 } 314 } 315 } 316 317 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { 318 if raceenabled && h != nil { 319 callerpc := getcallerpc(unsafe.Pointer(&t)) 320 pc := funcPC(mapaccess2) 321 racereadpc(unsafe.Pointer(h), callerpc, pc) 322 raceReadObjectPC(t.key, key, callerpc, pc) 323 } 324 if h == nil || h.count == 0 { 325 return unsafe.Pointer(t.elem.zero), false 326 } 327 alg := t.key.alg 328 hash := alg.hash(key, uintptr(h.hash0)) 329 m := uintptr(1)<<h.B - 1 330 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) 331 if c := h.oldbuckets; c != nil { 332 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) 333 if !evacuated(oldb) { 334 b = oldb 335 } 336 } 337 top := uint8(hash >> (ptrSize*8 - 8)) 338 if top < minTopHash { 339 top += minTopHash 340 } 341 for { 342 for i := uintptr(0); i < bucketCnt; i++ { 343 if b.tophash[i] != top { 344 continue 345 } 346 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 347 if t.indirectkey { 348 k = *((*unsafe.Pointer)(k)) 349 } 350 if alg.equal(key, k) { 351 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 352 if t.indirectvalue { 353 v = *((*unsafe.Pointer)(v)) 354 } 355 return v, true 356 } 357 } 358 b = b.overflow(t) 359 if b == nil { 360 return unsafe.Pointer(t.elem.zero), false 361 } 362 } 363 } 364 365 // returns both key and value. Used by map iterator 366 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { 367 if h == nil || h.count == 0 { 368 return nil, nil 369 } 370 alg := t.key.alg 371 hash := alg.hash(key, uintptr(h.hash0)) 372 m := uintptr(1)<<h.B - 1 373 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) 374 if c := h.oldbuckets; c != nil { 375 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) 376 if !evacuated(oldb) { 377 b = oldb 378 } 379 } 380 top := uint8(hash >> (ptrSize*8 - 8)) 381 if top < minTopHash { 382 top += minTopHash 383 } 384 for { 385 for i := uintptr(0); i < bucketCnt; i++ { 386 if b.tophash[i] != top { 387 continue 388 } 389 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 390 if t.indirectkey { 391 k = *((*unsafe.Pointer)(k)) 392 } 393 if alg.equal(key, k) { 394 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 395 if t.indirectvalue { 396 v = *((*unsafe.Pointer)(v)) 397 } 398 return k, v 399 } 400 } 401 b = b.overflow(t) 402 if b == nil { 403 return nil, nil 404 } 405 } 406 } 407 408 func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { 409 if h == nil { 410 panic("assignment to entry in nil map") 411 } 412 if raceenabled { 413 callerpc := getcallerpc(unsafe.Pointer(&t)) 414 pc := funcPC(mapassign1) 415 racewritepc(unsafe.Pointer(h), callerpc, pc) 416 raceReadObjectPC(t.key, key, callerpc, pc) 417 raceReadObjectPC(t.elem, val, callerpc, pc) 418 } 419 420 alg := t.key.alg 421 hash := alg.hash(key, uintptr(h.hash0)) 422 423 if h.buckets == nil { 424 h.buckets = newarray(t.bucket, 1) 425 } 426 427 again: 428 bucket := hash & (uintptr(1)<<h.B - 1) 429 if h.oldbuckets != nil { 430 growWork(t, h, bucket) 431 } 432 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 433 top := uint8(hash >> (ptrSize*8 - 8)) 434 if top < minTopHash { 435 top += minTopHash 436 } 437 438 var inserti *uint8 439 var insertk unsafe.Pointer 440 var insertv unsafe.Pointer 441 for { 442 for i := uintptr(0); i < bucketCnt; i++ { 443 if b.tophash[i] != top { 444 if b.tophash[i] == empty && inserti == nil { 445 inserti = &b.tophash[i] 446 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 447 insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 448 } 449 continue 450 } 451 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 452 k2 := k 453 if t.indirectkey { 454 k2 = *((*unsafe.Pointer)(k2)) 455 } 456 if !alg.equal(key, k2) { 457 continue 458 } 459 // already have a mapping for key. Update it. 460 typedmemmove(t.key, k2, key) 461 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 462 v2 := v 463 if t.indirectvalue { 464 v2 = *((*unsafe.Pointer)(v2)) 465 } 466 typedmemmove(t.elem, v2, val) 467 return 468 } 469 ovf := b.overflow(t) 470 if ovf == nil { 471 break 472 } 473 b = ovf 474 } 475 476 // did not find mapping for key. Allocate new cell & add entry. 477 if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt { 478 hashGrow(t, h) 479 goto again // Growing the table invalidates everything, so try again 480 } 481 482 if inserti == nil { 483 // all current buckets are full, allocate a new one. 484 newb := (*bmap)(newobject(t.bucket)) 485 h.setoverflow(t, b, newb) 486 inserti = &newb.tophash[0] 487 insertk = add(unsafe.Pointer(newb), dataOffset) 488 insertv = add(insertk, bucketCnt*uintptr(t.keysize)) 489 } 490 491 // store new key/value at insert position 492 if t.indirectkey { 493 kmem := newobject(t.key) 494 *(*unsafe.Pointer)(insertk) = kmem 495 insertk = kmem 496 } 497 if t.indirectvalue { 498 vmem := newobject(t.elem) 499 *(*unsafe.Pointer)(insertv) = vmem 500 insertv = vmem 501 } 502 typedmemmove(t.key, insertk, key) 503 typedmemmove(t.elem, insertv, val) 504 *inserti = top 505 h.count++ 506 } 507 508 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { 509 if raceenabled && h != nil { 510 callerpc := getcallerpc(unsafe.Pointer(&t)) 511 pc := funcPC(mapdelete) 512 racewritepc(unsafe.Pointer(h), callerpc, pc) 513 raceReadObjectPC(t.key, key, callerpc, pc) 514 } 515 if h == nil || h.count == 0 { 516 return 517 } 518 alg := t.key.alg 519 hash := alg.hash(key, uintptr(h.hash0)) 520 bucket := hash & (uintptr(1)<<h.B - 1) 521 if h.oldbuckets != nil { 522 growWork(t, h, bucket) 523 } 524 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 525 top := uint8(hash >> (ptrSize*8 - 8)) 526 if top < minTopHash { 527 top += minTopHash 528 } 529 for { 530 for i := uintptr(0); i < bucketCnt; i++ { 531 if b.tophash[i] != top { 532 continue 533 } 534 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 535 k2 := k 536 if t.indirectkey { 537 k2 = *((*unsafe.Pointer)(k2)) 538 } 539 if !alg.equal(key, k2) { 540 continue 541 } 542 memclr(k, uintptr(t.keysize)) 543 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize)) 544 memclr(v, uintptr(t.valuesize)) 545 b.tophash[i] = empty 546 h.count-- 547 return 548 } 549 b = b.overflow(t) 550 if b == nil { 551 return 552 } 553 } 554 } 555 556 func mapiterinit(t *maptype, h *hmap, it *hiter) { 557 // Clear pointer fields so garbage collector does not complain. 558 it.key = nil 559 it.value = nil 560 it.t = nil 561 it.h = nil 562 it.buckets = nil 563 it.bptr = nil 564 it.overflow[0] = nil 565 it.overflow[1] = nil 566 567 if raceenabled && h != nil { 568 callerpc := getcallerpc(unsafe.Pointer(&t)) 569 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit)) 570 } 571 572 if h == nil || h.count == 0 { 573 it.key = nil 574 it.value = nil 575 return 576 } 577 578 if unsafe.Sizeof(hiter{})/ptrSize != 12 { 579 throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go 580 } 581 it.t = t 582 it.h = h 583 584 // grab snapshot of bucket state 585 it.B = h.B 586 it.buckets = h.buckets 587 if t.bucket.kind&kindNoPointers != 0 { 588 // Allocate the current slice and remember pointers to both current and old. 589 // This preserves all relevant overflow buckets alive even if 590 // the table grows and/or overflow buckets are added to the table 591 // while we are iterating. 592 h.createOverflow() 593 it.overflow = *h.overflow 594 } 595 596 // decide where to start 597 r := uintptr(fastrand1()) 598 if h.B > 31-bucketCntBits { 599 r += uintptr(fastrand1()) << 31 600 } 601 it.startBucket = r & (uintptr(1)<<h.B - 1) 602 it.offset = uint8(r >> h.B & (bucketCnt - 1)) 603 604 // iterator state 605 it.bucket = it.startBucket 606 it.wrapped = false 607 it.bptr = nil 608 609 // Remember we have an iterator. 610 // Can run concurrently with another hash_iter_init(). 611 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { 612 atomicor8(&h.flags, iterator|oldIterator) 613 } 614 615 mapiternext(it) 616 } 617 618 func mapiternext(it *hiter) { 619 h := it.h 620 if raceenabled { 621 callerpc := getcallerpc(unsafe.Pointer(&it)) 622 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext)) 623 } 624 t := it.t 625 bucket := it.bucket 626 b := it.bptr 627 i := it.i 628 checkBucket := it.checkBucket 629 alg := t.key.alg 630 631 next: 632 if b == nil { 633 if bucket == it.startBucket && it.wrapped { 634 // end of iteration 635 it.key = nil 636 it.value = nil 637 return 638 } 639 if h.oldbuckets != nil && it.B == h.B { 640 // Iterator was started in the middle of a grow, and the grow isn't done yet. 641 // If the bucket we're looking at hasn't been filled in yet (i.e. the old 642 // bucket hasn't been evacuated) then we need to iterate through the old 643 // bucket and only return the ones that will be migrated to this bucket. 644 oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1) 645 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 646 if !evacuated(b) { 647 checkBucket = bucket 648 } else { 649 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) 650 checkBucket = noCheck 651 } 652 } else { 653 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) 654 checkBucket = noCheck 655 } 656 bucket++ 657 if bucket == uintptr(1)<<it.B { 658 bucket = 0 659 it.wrapped = true 660 } 661 i = 0 662 } 663 for ; i < bucketCnt; i++ { 664 offi := (i + it.offset) & (bucketCnt - 1) 665 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) 666 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize)) 667 if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty { 668 if checkBucket != noCheck { 669 // Special case: iterator was started during a grow and the 670 // grow is not done yet. We're working on a bucket whose 671 // oldbucket has not been evacuated yet. Or at least, it wasn't 672 // evacuated when we started the bucket. So we're iterating 673 // through the oldbucket, skipping any keys that will go 674 // to the other new bucket (each oldbucket expands to two 675 // buckets during a grow). 676 k2 := k 677 if t.indirectkey { 678 k2 = *((*unsafe.Pointer)(k2)) 679 } 680 if t.reflexivekey || alg.equal(k2, k2) { 681 // If the item in the oldbucket is not destined for 682 // the current new bucket in the iteration, skip it. 683 hash := alg.hash(k2, uintptr(h.hash0)) 684 if hash&(uintptr(1)<<it.B-1) != checkBucket { 685 continue 686 } 687 } else { 688 // Hash isn't repeatable if k != k (NaNs). We need a 689 // repeatable and randomish choice of which direction 690 // to send NaNs during evacuation. We'll use the low 691 // bit of tophash to decide which way NaNs go. 692 // NOTE: this case is why we need two evacuate tophash 693 // values, evacuatedX and evacuatedY, that differ in 694 // their low bit. 695 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { 696 continue 697 } 698 } 699 } 700 if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY { 701 // this is the golden data, we can return it. 702 if t.indirectkey { 703 k = *((*unsafe.Pointer)(k)) 704 } 705 it.key = k 706 if t.indirectvalue { 707 v = *((*unsafe.Pointer)(v)) 708 } 709 it.value = v 710 } else { 711 // The hash table has grown since the iterator was started. 712 // The golden data for this key is now somewhere else. 713 k2 := k 714 if t.indirectkey { 715 k2 = *((*unsafe.Pointer)(k2)) 716 } 717 if t.reflexivekey || alg.equal(k2, k2) { 718 // Check the current hash table for the data. 719 // This code handles the case where the key 720 // has been deleted, updated, or deleted and reinserted. 721 // NOTE: we need to regrab the key as it has potentially been 722 // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). 723 rk, rv := mapaccessK(t, h, k2) 724 if rk == nil { 725 continue // key has been deleted 726 } 727 it.key = rk 728 it.value = rv 729 } else { 730 // if key!=key then the entry can't be deleted or 731 // updated, so we can just return it. That's lucky for 732 // us because when key!=key we can't look it up 733 // successfully in the current table. 734 it.key = k2 735 if t.indirectvalue { 736 v = *((*unsafe.Pointer)(v)) 737 } 738 it.value = v 739 } 740 } 741 it.bucket = bucket 742 it.bptr = b 743 it.i = i + 1 744 it.checkBucket = checkBucket 745 return 746 } 747 } 748 b = b.overflow(t) 749 i = 0 750 goto next 751 } 752 753 func hashGrow(t *maptype, h *hmap) { 754 if h.oldbuckets != nil { 755 throw("evacuation not done in time") 756 } 757 oldbuckets := h.buckets 758 newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1)) 759 flags := h.flags &^ (iterator | oldIterator) 760 if h.flags&iterator != 0 { 761 flags |= oldIterator 762 } 763 // commit the grow (atomic wrt gc) 764 h.B++ 765 h.flags = flags 766 h.oldbuckets = oldbuckets 767 h.buckets = newbuckets 768 h.nevacuate = 0 769 770 if h.overflow != nil { 771 // Promote current overflow buckets to the old generation. 772 if h.overflow[1] != nil { 773 throw("overflow is not nil") 774 } 775 h.overflow[1] = h.overflow[0] 776 h.overflow[0] = nil 777 } 778 779 // the actual copying of the hash table data is done incrementally 780 // by growWork() and evacuate(). 781 } 782 783 func growWork(t *maptype, h *hmap, bucket uintptr) { 784 noldbuckets := uintptr(1) << (h.B - 1) 785 786 // make sure we evacuate the oldbucket corresponding 787 // to the bucket we're about to use 788 evacuate(t, h, bucket&(noldbuckets-1)) 789 790 // evacuate one more oldbucket to make progress on growing 791 if h.oldbuckets != nil { 792 evacuate(t, h, h.nevacuate) 793 } 794 } 795 796 func evacuate(t *maptype, h *hmap, oldbucket uintptr) { 797 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 798 newbit := uintptr(1) << (h.B - 1) 799 alg := t.key.alg 800 if !evacuated(b) { 801 // TODO: reuse overflow buckets instead of using new ones, if there 802 // is no iterator using the old buckets. (If !oldIterator.) 803 804 x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 805 y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 806 xi := 0 807 yi := 0 808 xk := add(unsafe.Pointer(x), dataOffset) 809 yk := add(unsafe.Pointer(y), dataOffset) 810 xv := add(xk, bucketCnt*uintptr(t.keysize)) 811 yv := add(yk, bucketCnt*uintptr(t.keysize)) 812 for ; b != nil; b = b.overflow(t) { 813 k := add(unsafe.Pointer(b), dataOffset) 814 v := add(k, bucketCnt*uintptr(t.keysize)) 815 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { 816 top := b.tophash[i] 817 if top == empty { 818 b.tophash[i] = evacuatedEmpty 819 continue 820 } 821 if top < minTopHash { 822 throw("bad map state") 823 } 824 k2 := k 825 if t.indirectkey { 826 k2 = *((*unsafe.Pointer)(k2)) 827 } 828 // Compute hash to make our evacuation decision (whether we need 829 // to send this key/value to bucket x or bucket y). 830 hash := alg.hash(k2, uintptr(h.hash0)) 831 if h.flags&iterator != 0 { 832 if !t.reflexivekey && !alg.equal(k2, k2) { 833 // If key != key (NaNs), then the hash could be (and probably 834 // will be) entirely different from the old hash. Moreover, 835 // it isn't reproducible. Reproducibility is required in the 836 // presence of iterators, as our evacuation decision must 837 // match whatever decision the iterator made. 838 // Fortunately, we have the freedom to send these keys either 839 // way. Also, tophash is meaningless for these kinds of keys. 840 // We let the low bit of tophash drive the evacuation decision. 841 // We recompute a new random tophash for the next level so 842 // these keys will get evenly distributed across all buckets 843 // after multiple grows. 844 if (top & 1) != 0 { 845 hash |= newbit 846 } else { 847 hash &^= newbit 848 } 849 top = uint8(hash >> (ptrSize*8 - 8)) 850 if top < minTopHash { 851 top += minTopHash 852 } 853 } 854 } 855 if (hash & newbit) == 0 { 856 b.tophash[i] = evacuatedX 857 if xi == bucketCnt { 858 newx := (*bmap)(newobject(t.bucket)) 859 h.setoverflow(t, x, newx) 860 x = newx 861 xi = 0 862 xk = add(unsafe.Pointer(x), dataOffset) 863 xv = add(xk, bucketCnt*uintptr(t.keysize)) 864 } 865 x.tophash[xi] = top 866 if t.indirectkey { 867 *(*unsafe.Pointer)(xk) = k2 // copy pointer 868 } else { 869 typedmemmove(t.key, xk, k) // copy value 870 } 871 if t.indirectvalue { 872 *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v) 873 } else { 874 typedmemmove(t.elem, xv, v) 875 } 876 xi++ 877 xk = add(xk, uintptr(t.keysize)) 878 xv = add(xv, uintptr(t.valuesize)) 879 } else { 880 b.tophash[i] = evacuatedY 881 if yi == bucketCnt { 882 newy := (*bmap)(newobject(t.bucket)) 883 h.setoverflow(t, y, newy) 884 y = newy 885 yi = 0 886 yk = add(unsafe.Pointer(y), dataOffset) 887 yv = add(yk, bucketCnt*uintptr(t.keysize)) 888 } 889 y.tophash[yi] = top 890 if t.indirectkey { 891 *(*unsafe.Pointer)(yk) = k2 892 } else { 893 typedmemmove(t.key, yk, k) 894 } 895 if t.indirectvalue { 896 *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v) 897 } else { 898 typedmemmove(t.elem, yv, v) 899 } 900 yi++ 901 yk = add(yk, uintptr(t.keysize)) 902 yv = add(yv, uintptr(t.valuesize)) 903 } 904 } 905 } 906 // Unlink the overflow buckets & clear key/value to help GC. 907 if h.flags&oldIterator == 0 { 908 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 909 memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) 910 } 911 } 912 913 // Advance evacuation mark 914 if oldbucket == h.nevacuate { 915 h.nevacuate = oldbucket + 1 916 if oldbucket+1 == newbit { // newbit == # of oldbuckets 917 // Growing is all done. Free old main bucket array. 918 h.oldbuckets = nil 919 // Can discard old overflow buckets as well. 920 // If they are still referenced by an iterator, 921 // then the iterator holds a pointers to the slice. 922 if h.overflow != nil { 923 h.overflow[1] = nil 924 } 925 } 926 } 927 } 928 929 func ismapkey(t *_type) bool { 930 return t.alg.hash != nil 931 } 932 933 // Reflect stubs. Called from ../reflect/asm_*.s 934 935 //go:linkname reflect_makemap reflect.makemap 936 func reflect_makemap(t *maptype) *hmap { 937 return makemap(t, 0, nil, nil) 938 } 939 940 //go:linkname reflect_mapaccess reflect.mapaccess 941 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 942 val, ok := mapaccess2(t, h, key) 943 if !ok { 944 // reflect wants nil for a missing element 945 val = nil 946 } 947 return val 948 } 949 950 //go:linkname reflect_mapassign reflect.mapassign 951 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { 952 mapassign1(t, h, key, val) 953 } 954 955 //go:linkname reflect_mapdelete reflect.mapdelete 956 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { 957 mapdelete(t, h, key) 958 } 959 960 //go:linkname reflect_mapiterinit reflect.mapiterinit 961 func reflect_mapiterinit(t *maptype, h *hmap) *hiter { 962 it := new(hiter) 963 mapiterinit(t, h, it) 964 return it 965 } 966 967 //go:linkname reflect_mapiternext reflect.mapiternext 968 func reflect_mapiternext(it *hiter) { 969 mapiternext(it) 970 } 971 972 //go:linkname reflect_mapiterkey reflect.mapiterkey 973 func reflect_mapiterkey(it *hiter) unsafe.Pointer { 974 return it.key 975 } 976 977 //go:linkname reflect_maplen reflect.maplen 978 func reflect_maplen(h *hmap) int { 979 if h == nil { 980 return 0 981 } 982 if raceenabled { 983 callerpc := getcallerpc(unsafe.Pointer(&h)) 984 racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen)) 985 } 986 return h.count 987 } 988 989 //go:linkname reflect_ismapkey reflect.ismapkey 990 func reflect_ismapkey(t *_type) bool { 991 return ismapkey(t) 992 }