github.com/jonasi/go@v0.0.0-20150930005915-e78e654c1de0/src/runtime/hashmap.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go's map type. 8 // 9 // A map is just a hash table. The data is arranged 10 // into an array of buckets. Each bucket contains up to 11 // 8 key/value pairs. The low-order bits of the hash are 12 // used to select a bucket. Each bucket contains a few 13 // high-order bits of each hash to distinguish the entries 14 // within a single bucket. 15 // 16 // If more than 8 keys hash to a bucket, we chain on 17 // extra buckets. 18 // 19 // When the hashtable grows, we allocate a new array 20 // of buckets twice as big. Buckets are incrementally 21 // copied from the old bucket array to the new bucket array. 22 // 23 // Map iterators walk through the array of buckets and 24 // return the keys in walk order (bucket #, then overflow 25 // chain order, then bucket index). To maintain iteration 26 // semantics, we never move keys within their bucket (if 27 // we did, keys might be returned 0 or 2 times). When 28 // growing the table, iterators remain iterating through the 29 // old table and must check the new table if the bucket 30 // they are iterating through has been moved ("evacuated") 31 // to the new table. 32 33 // Picking loadFactor: too large and we have lots of overflow 34 // buckets, too small and we waste a lot of space. I wrote 35 // a simple program to check some stats for different loads: 36 // (64-bit, 8 byte keys and values) 37 // loadFactor %overflow bytes/entry hitprobe missprobe 38 // 4.00 2.13 20.77 3.00 4.00 39 // 4.50 4.05 17.30 3.25 4.50 40 // 5.00 6.85 14.77 3.50 5.00 41 // 5.50 10.55 12.94 3.75 5.50 42 // 6.00 15.27 11.67 4.00 6.00 43 // 6.50 20.90 10.79 4.25 6.50 44 // 7.00 27.14 10.15 4.50 7.00 45 // 7.50 34.03 9.73 4.75 7.50 46 // 8.00 41.10 9.40 5.00 8.00 47 // 48 // %overflow = percentage of buckets which have an overflow bucket 49 // bytes/entry = overhead bytes used per key/value pair 50 // hitprobe = # of entries to check when looking up a present key 51 // missprobe = # of entries to check when looking up an absent key 52 // 53 // Keep in mind this data is for maximally loaded tables, i.e. just 54 // before the table grows. Typical tables will be somewhat less loaded. 55 56 import ( 57 "unsafe" 58 ) 59 60 const ( 61 // Maximum number of key/value pairs a bucket can hold. 62 bucketCntBits = 3 63 bucketCnt = 1 << bucketCntBits 64 65 // Maximum average load of a bucket that triggers growth. 66 loadFactor = 6.5 67 68 // Maximum key or value size to keep inline (instead of mallocing per element). 69 // Must fit in a uint8. 70 // Fast versions cannot handle big values - the cutoff size for 71 // fast versions in ../../cmd/internal/gc/walk.go must be at most this value. 72 maxKeySize = 128 73 maxValueSize = 128 74 75 // data offset should be the size of the bmap struct, but needs to be 76 // aligned correctly. For amd64p32 this means 64-bit alignment 77 // even though pointers are 32 bit. 78 dataOffset = unsafe.Offsetof(struct { 79 b bmap 80 v int64 81 }{}.v) 82 83 // Possible tophash values. We reserve a few possibilities for special marks. 84 // Each bucket (including its overflow buckets, if any) will have either all or none of its 85 // entries in the evacuated* states (except during the evacuate() method, which only happens 86 // during map writes and thus no one else can observe the map during that time). 87 empty = 0 // cell is empty 88 evacuatedEmpty = 1 // cell is empty, bucket is evacuated. 89 evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table. 90 evacuatedY = 3 // same as above, but evacuated to second half of larger table. 91 minTopHash = 4 // minimum tophash for a normal filled cell. 92 93 // flags 94 iterator = 1 // there may be an iterator using buckets 95 oldIterator = 2 // there may be an iterator using oldbuckets 96 97 // sentinel bucket ID for iterator checks 98 noCheck = 1<<(8*ptrSize) - 1 99 ) 100 101 // A header for a Go map. 102 type hmap struct { 103 // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and 104 // ../reflect/type.go. Don't change this structure without also changing that code! 105 count int // # live cells == size of map. Must be first (used by len() builtin) 106 flags uint8 107 B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) 108 hash0 uint32 // hash seed 109 110 buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. 111 oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing 112 nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) 113 114 // If both key and value do not contain pointers and are inline, then we mark bucket 115 // type as containing no pointers. This avoids scanning such maps. 116 // However, bmap.overflow is a pointer. In order to keep overflow buckets 117 // alive, we store pointers to all overflow buckets in hmap.overflow. 118 // Overflow is used only if key and value do not contain pointers. 119 // overflow[0] contains overflow buckets for hmap.buckets. 120 // overflow[1] contains overflow buckets for hmap.oldbuckets. 121 // The first indirection allows us to reduce static size of hmap. 122 // The second indirection allows to store a pointer to the slice in hiter. 123 overflow *[2]*[]*bmap 124 } 125 126 // A bucket for a Go map. 127 type bmap struct { 128 tophash [bucketCnt]uint8 129 // Followed by bucketCnt keys and then bucketCnt values. 130 // NOTE: packing all the keys together and then all the values together makes the 131 // code a bit more complicated than alternating key/value/key/value/... but it allows 132 // us to eliminate padding which would be needed for, e.g., map[int64]int8. 133 // Followed by an overflow pointer. 134 } 135 136 // A hash iteration structure. 137 // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate 138 // the layout of this structure. 139 type hiter struct { 140 key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). 141 value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). 142 t *maptype 143 h *hmap 144 buckets unsafe.Pointer // bucket ptr at hash_iter initialization time 145 bptr *bmap // current bucket 146 overflow [2]*[]*bmap // keeps overflow buckets alive 147 startBucket uintptr // bucket iteration started at 148 offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) 149 wrapped bool // already wrapped around from end of bucket array to beginning 150 B uint8 151 i uint8 152 bucket uintptr 153 checkBucket uintptr 154 } 155 156 func evacuated(b *bmap) bool { 157 h := b.tophash[0] 158 return h > empty && h < minTopHash 159 } 160 161 func (b *bmap) overflow(t *maptype) *bmap { 162 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-ptrSize)) 163 } 164 165 func (h *hmap) setoverflow(t *maptype, b, ovf *bmap) { 166 if t.bucket.kind&kindNoPointers != 0 { 167 h.createOverflow() 168 *h.overflow[0] = append(*h.overflow[0], ovf) 169 } 170 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-ptrSize)) = ovf 171 } 172 173 func (h *hmap) createOverflow() { 174 if h.overflow == nil { 175 h.overflow = new([2]*[]*bmap) 176 } 177 if h.overflow[0] == nil { 178 h.overflow[0] = new([]*bmap) 179 } 180 } 181 182 // makemap implements a Go map creation make(map[k]v, hint) 183 // If the compiler has determined that the map or the first bucket 184 // can be created on the stack, h and/or bucket may be non-nil. 185 // If h != nil, the map can be created directly in h. 186 // If bucket != nil, bucket can be used as the first bucket. 187 func makemap(t *maptype, hint int64, h *hmap, bucket unsafe.Pointer) *hmap { 188 if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) { 189 println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size) 190 throw("bad hmap size") 191 } 192 193 if hint < 0 || int64(int32(hint)) != hint { 194 panic("makemap: size out of range") 195 // TODO: make hint an int, then none of this nonsense 196 } 197 198 if !ismapkey(t.key) { 199 throw("runtime.makemap: unsupported map key type") 200 } 201 202 // check compiler's and reflect's math 203 if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) || 204 t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { 205 throw("key size wrong") 206 } 207 if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) || 208 t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { 209 throw("value size wrong") 210 } 211 212 // invariants we depend on. We should probably check these at compile time 213 // somewhere, but for now we'll do it here. 214 if t.key.align > bucketCnt { 215 throw("key align too big") 216 } 217 if t.elem.align > bucketCnt { 218 throw("value align too big") 219 } 220 if uintptr(t.key.size)%uintptr(t.key.align) != 0 { 221 throw("key size not a multiple of key align") 222 } 223 if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 { 224 throw("value size not a multiple of value align") 225 } 226 if bucketCnt < 8 { 227 throw("bucketsize too small for proper alignment") 228 } 229 if dataOffset%uintptr(t.key.align) != 0 { 230 throw("need padding in bucket (key)") 231 } 232 if dataOffset%uintptr(t.elem.align) != 0 { 233 throw("need padding in bucket (value)") 234 } 235 236 // make sure zeroptr is large enough 237 mapzero(t.elem) 238 239 // find size parameter which will hold the requested # of elements 240 B := uint8(0) 241 for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ { 242 } 243 244 // allocate initial hash table 245 // if B == 0, the buckets field is allocated lazily later (in mapassign) 246 // If hint is large zeroing this memory could take a while. 247 buckets := bucket 248 if B != 0 { 249 buckets = newarray(t.bucket, uintptr(1)<<B) 250 } 251 252 // initialize Hmap 253 if h == nil { 254 h = (*hmap)(newobject(t.hmap)) 255 } 256 h.count = 0 257 h.B = B 258 h.flags = 0 259 h.hash0 = fastrand1() 260 h.buckets = buckets 261 h.oldbuckets = nil 262 h.nevacuate = 0 263 264 return h 265 } 266 267 // mapaccess1 returns a pointer to h[key]. Never returns nil, instead 268 // it will return a reference to the zero object for the value type if 269 // the key is not in the map. 270 // NOTE: The returned pointer may keep the whole map live, so don't 271 // hold onto it for very long. 272 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 273 if raceenabled && h != nil { 274 callerpc := getcallerpc(unsafe.Pointer(&t)) 275 pc := funcPC(mapaccess1) 276 racereadpc(unsafe.Pointer(h), callerpc, pc) 277 raceReadObjectPC(t.key, key, callerpc, pc) 278 } 279 if h == nil || h.count == 0 { 280 return atomicloadp(unsafe.Pointer(&zeroptr)) 281 } 282 alg := t.key.alg 283 hash := alg.hash(key, uintptr(h.hash0)) 284 m := uintptr(1)<<h.B - 1 285 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 286 if c := h.oldbuckets; c != nil { 287 oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize))) 288 if !evacuated(oldb) { 289 b = oldb 290 } 291 } 292 top := uint8(hash >> (ptrSize*8 - 8)) 293 if top < minTopHash { 294 top += minTopHash 295 } 296 for { 297 for i := uintptr(0); i < bucketCnt; i++ { 298 if b.tophash[i] != top { 299 continue 300 } 301 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 302 if t.indirectkey { 303 k = *((*unsafe.Pointer)(k)) 304 } 305 if alg.equal(key, k) { 306 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 307 if t.indirectvalue { 308 v = *((*unsafe.Pointer)(v)) 309 } 310 return v 311 } 312 } 313 b = b.overflow(t) 314 if b == nil { 315 return atomicloadp(unsafe.Pointer(&zeroptr)) 316 } 317 } 318 } 319 320 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { 321 if raceenabled && h != nil { 322 callerpc := getcallerpc(unsafe.Pointer(&t)) 323 pc := funcPC(mapaccess2) 324 racereadpc(unsafe.Pointer(h), callerpc, pc) 325 raceReadObjectPC(t.key, key, callerpc, pc) 326 } 327 if h == nil || h.count == 0 { 328 return atomicloadp(unsafe.Pointer(&zeroptr)), false 329 } 330 alg := t.key.alg 331 hash := alg.hash(key, uintptr(h.hash0)) 332 m := uintptr(1)<<h.B - 1 333 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) 334 if c := h.oldbuckets; c != nil { 335 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) 336 if !evacuated(oldb) { 337 b = oldb 338 } 339 } 340 top := uint8(hash >> (ptrSize*8 - 8)) 341 if top < minTopHash { 342 top += minTopHash 343 } 344 for { 345 for i := uintptr(0); i < bucketCnt; i++ { 346 if b.tophash[i] != top { 347 continue 348 } 349 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 350 if t.indirectkey { 351 k = *((*unsafe.Pointer)(k)) 352 } 353 if alg.equal(key, k) { 354 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 355 if t.indirectvalue { 356 v = *((*unsafe.Pointer)(v)) 357 } 358 return v, true 359 } 360 } 361 b = b.overflow(t) 362 if b == nil { 363 return atomicloadp(unsafe.Pointer(&zeroptr)), false 364 } 365 } 366 } 367 368 // returns both key and value. Used by map iterator 369 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { 370 if h == nil || h.count == 0 { 371 return nil, nil 372 } 373 alg := t.key.alg 374 hash := alg.hash(key, uintptr(h.hash0)) 375 m := uintptr(1)<<h.B - 1 376 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) 377 if c := h.oldbuckets; c != nil { 378 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize))) 379 if !evacuated(oldb) { 380 b = oldb 381 } 382 } 383 top := uint8(hash >> (ptrSize*8 - 8)) 384 if top < minTopHash { 385 top += minTopHash 386 } 387 for { 388 for i := uintptr(0); i < bucketCnt; i++ { 389 if b.tophash[i] != top { 390 continue 391 } 392 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 393 if t.indirectkey { 394 k = *((*unsafe.Pointer)(k)) 395 } 396 if alg.equal(key, k) { 397 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 398 if t.indirectvalue { 399 v = *((*unsafe.Pointer)(v)) 400 } 401 return k, v 402 } 403 } 404 b = b.overflow(t) 405 if b == nil { 406 return nil, nil 407 } 408 } 409 } 410 411 func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { 412 if h == nil { 413 panic("assignment to entry in nil map") 414 } 415 if raceenabled { 416 callerpc := getcallerpc(unsafe.Pointer(&t)) 417 pc := funcPC(mapassign1) 418 racewritepc(unsafe.Pointer(h), callerpc, pc) 419 raceReadObjectPC(t.key, key, callerpc, pc) 420 raceReadObjectPC(t.elem, val, callerpc, pc) 421 } 422 423 alg := t.key.alg 424 hash := alg.hash(key, uintptr(h.hash0)) 425 426 if h.buckets == nil { 427 h.buckets = newarray(t.bucket, 1) 428 } 429 430 again: 431 bucket := hash & (uintptr(1)<<h.B - 1) 432 if h.oldbuckets != nil { 433 growWork(t, h, bucket) 434 } 435 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 436 top := uint8(hash >> (ptrSize*8 - 8)) 437 if top < minTopHash { 438 top += minTopHash 439 } 440 441 var inserti *uint8 442 var insertk unsafe.Pointer 443 var insertv unsafe.Pointer 444 for { 445 for i := uintptr(0); i < bucketCnt; i++ { 446 if b.tophash[i] != top { 447 if b.tophash[i] == empty && inserti == nil { 448 inserti = &b.tophash[i] 449 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 450 insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 451 } 452 continue 453 } 454 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 455 k2 := k 456 if t.indirectkey { 457 k2 = *((*unsafe.Pointer)(k2)) 458 } 459 if !alg.equal(key, k2) { 460 continue 461 } 462 // already have a mapping for key. Update it. 463 if t.needkeyupdate { 464 typedmemmove(t.key, k2, key) 465 } 466 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 467 v2 := v 468 if t.indirectvalue { 469 v2 = *((*unsafe.Pointer)(v2)) 470 } 471 typedmemmove(t.elem, v2, val) 472 return 473 } 474 ovf := b.overflow(t) 475 if ovf == nil { 476 break 477 } 478 b = ovf 479 } 480 481 // did not find mapping for key. Allocate new cell & add entry. 482 if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt { 483 hashGrow(t, h) 484 goto again // Growing the table invalidates everything, so try again 485 } 486 487 if inserti == nil { 488 // all current buckets are full, allocate a new one. 489 newb := (*bmap)(newobject(t.bucket)) 490 h.setoverflow(t, b, newb) 491 inserti = &newb.tophash[0] 492 insertk = add(unsafe.Pointer(newb), dataOffset) 493 insertv = add(insertk, bucketCnt*uintptr(t.keysize)) 494 } 495 496 // store new key/value at insert position 497 if t.indirectkey { 498 kmem := newobject(t.key) 499 *(*unsafe.Pointer)(insertk) = kmem 500 insertk = kmem 501 } 502 if t.indirectvalue { 503 vmem := newobject(t.elem) 504 *(*unsafe.Pointer)(insertv) = vmem 505 insertv = vmem 506 } 507 typedmemmove(t.key, insertk, key) 508 typedmemmove(t.elem, insertv, val) 509 *inserti = top 510 h.count++ 511 } 512 513 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { 514 if raceenabled && h != nil { 515 callerpc := getcallerpc(unsafe.Pointer(&t)) 516 pc := funcPC(mapdelete) 517 racewritepc(unsafe.Pointer(h), callerpc, pc) 518 raceReadObjectPC(t.key, key, callerpc, pc) 519 } 520 if h == nil || h.count == 0 { 521 return 522 } 523 alg := t.key.alg 524 hash := alg.hash(key, uintptr(h.hash0)) 525 bucket := hash & (uintptr(1)<<h.B - 1) 526 if h.oldbuckets != nil { 527 growWork(t, h, bucket) 528 } 529 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 530 top := uint8(hash >> (ptrSize*8 - 8)) 531 if top < minTopHash { 532 top += minTopHash 533 } 534 for { 535 for i := uintptr(0); i < bucketCnt; i++ { 536 if b.tophash[i] != top { 537 continue 538 } 539 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 540 k2 := k 541 if t.indirectkey { 542 k2 = *((*unsafe.Pointer)(k2)) 543 } 544 if !alg.equal(key, k2) { 545 continue 546 } 547 memclr(k, uintptr(t.keysize)) 548 v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize)) 549 memclr(v, uintptr(t.valuesize)) 550 b.tophash[i] = empty 551 h.count-- 552 return 553 } 554 b = b.overflow(t) 555 if b == nil { 556 return 557 } 558 } 559 } 560 561 func mapiterinit(t *maptype, h *hmap, it *hiter) { 562 // Clear pointer fields so garbage collector does not complain. 563 it.key = nil 564 it.value = nil 565 it.t = nil 566 it.h = nil 567 it.buckets = nil 568 it.bptr = nil 569 it.overflow[0] = nil 570 it.overflow[1] = nil 571 572 if raceenabled && h != nil { 573 callerpc := getcallerpc(unsafe.Pointer(&t)) 574 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit)) 575 } 576 577 if h == nil || h.count == 0 { 578 it.key = nil 579 it.value = nil 580 return 581 } 582 583 if unsafe.Sizeof(hiter{})/ptrSize != 12 { 584 throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go 585 } 586 it.t = t 587 it.h = h 588 589 // grab snapshot of bucket state 590 it.B = h.B 591 it.buckets = h.buckets 592 if t.bucket.kind&kindNoPointers != 0 { 593 // Allocate the current slice and remember pointers to both current and old. 594 // This preserves all relevant overflow buckets alive even if 595 // the table grows and/or overflow buckets are added to the table 596 // while we are iterating. 597 h.createOverflow() 598 it.overflow = *h.overflow 599 } 600 601 // decide where to start 602 r := uintptr(fastrand1()) 603 if h.B > 31-bucketCntBits { 604 r += uintptr(fastrand1()) << 31 605 } 606 it.startBucket = r & (uintptr(1)<<h.B - 1) 607 it.offset = uint8(r >> h.B & (bucketCnt - 1)) 608 609 // iterator state 610 it.bucket = it.startBucket 611 it.wrapped = false 612 it.bptr = nil 613 614 // Remember we have an iterator. 615 // Can run concurrently with another hash_iter_init(). 616 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { 617 atomicor8(&h.flags, iterator|oldIterator) 618 } 619 620 mapiternext(it) 621 } 622 623 func mapiternext(it *hiter) { 624 h := it.h 625 if raceenabled { 626 callerpc := getcallerpc(unsafe.Pointer(&it)) 627 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext)) 628 } 629 t := it.t 630 bucket := it.bucket 631 b := it.bptr 632 i := it.i 633 checkBucket := it.checkBucket 634 alg := t.key.alg 635 636 next: 637 if b == nil { 638 if bucket == it.startBucket && it.wrapped { 639 // end of iteration 640 it.key = nil 641 it.value = nil 642 return 643 } 644 if h.oldbuckets != nil && it.B == h.B { 645 // Iterator was started in the middle of a grow, and the grow isn't done yet. 646 // If the bucket we're looking at hasn't been filled in yet (i.e. the old 647 // bucket hasn't been evacuated) then we need to iterate through the old 648 // bucket and only return the ones that will be migrated to this bucket. 649 oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1) 650 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 651 if !evacuated(b) { 652 checkBucket = bucket 653 } else { 654 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) 655 checkBucket = noCheck 656 } 657 } else { 658 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) 659 checkBucket = noCheck 660 } 661 bucket++ 662 if bucket == uintptr(1)<<it.B { 663 bucket = 0 664 it.wrapped = true 665 } 666 i = 0 667 } 668 for ; i < bucketCnt; i++ { 669 offi := (i + it.offset) & (bucketCnt - 1) 670 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) 671 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize)) 672 if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty { 673 if checkBucket != noCheck { 674 // Special case: iterator was started during a grow and the 675 // grow is not done yet. We're working on a bucket whose 676 // oldbucket has not been evacuated yet. Or at least, it wasn't 677 // evacuated when we started the bucket. So we're iterating 678 // through the oldbucket, skipping any keys that will go 679 // to the other new bucket (each oldbucket expands to two 680 // buckets during a grow). 681 k2 := k 682 if t.indirectkey { 683 k2 = *((*unsafe.Pointer)(k2)) 684 } 685 if t.reflexivekey || alg.equal(k2, k2) { 686 // If the item in the oldbucket is not destined for 687 // the current new bucket in the iteration, skip it. 688 hash := alg.hash(k2, uintptr(h.hash0)) 689 if hash&(uintptr(1)<<it.B-1) != checkBucket { 690 continue 691 } 692 } else { 693 // Hash isn't repeatable if k != k (NaNs). We need a 694 // repeatable and randomish choice of which direction 695 // to send NaNs during evacuation. We'll use the low 696 // bit of tophash to decide which way NaNs go. 697 // NOTE: this case is why we need two evacuate tophash 698 // values, evacuatedX and evacuatedY, that differ in 699 // their low bit. 700 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { 701 continue 702 } 703 } 704 } 705 if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY { 706 // this is the golden data, we can return it. 707 if t.indirectkey { 708 k = *((*unsafe.Pointer)(k)) 709 } 710 it.key = k 711 if t.indirectvalue { 712 v = *((*unsafe.Pointer)(v)) 713 } 714 it.value = v 715 } else { 716 // The hash table has grown since the iterator was started. 717 // The golden data for this key is now somewhere else. 718 k2 := k 719 if t.indirectkey { 720 k2 = *((*unsafe.Pointer)(k2)) 721 } 722 if t.reflexivekey || alg.equal(k2, k2) { 723 // Check the current hash table for the data. 724 // This code handles the case where the key 725 // has been deleted, updated, or deleted and reinserted. 726 // NOTE: we need to regrab the key as it has potentially been 727 // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). 728 rk, rv := mapaccessK(t, h, k2) 729 if rk == nil { 730 continue // key has been deleted 731 } 732 it.key = rk 733 it.value = rv 734 } else { 735 // if key!=key then the entry can't be deleted or 736 // updated, so we can just return it. That's lucky for 737 // us because when key!=key we can't look it up 738 // successfully in the current table. 739 it.key = k2 740 if t.indirectvalue { 741 v = *((*unsafe.Pointer)(v)) 742 } 743 it.value = v 744 } 745 } 746 it.bucket = bucket 747 it.bptr = b 748 it.i = i + 1 749 it.checkBucket = checkBucket 750 return 751 } 752 } 753 b = b.overflow(t) 754 i = 0 755 goto next 756 } 757 758 func hashGrow(t *maptype, h *hmap) { 759 if h.oldbuckets != nil { 760 throw("evacuation not done in time") 761 } 762 oldbuckets := h.buckets 763 newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1)) 764 flags := h.flags &^ (iterator | oldIterator) 765 if h.flags&iterator != 0 { 766 flags |= oldIterator 767 } 768 // commit the grow (atomic wrt gc) 769 h.B++ 770 h.flags = flags 771 h.oldbuckets = oldbuckets 772 h.buckets = newbuckets 773 h.nevacuate = 0 774 775 if h.overflow != nil { 776 // Promote current overflow buckets to the old generation. 777 if h.overflow[1] != nil { 778 throw("overflow is not nil") 779 } 780 h.overflow[1] = h.overflow[0] 781 h.overflow[0] = nil 782 } 783 784 // the actual copying of the hash table data is done incrementally 785 // by growWork() and evacuate(). 786 } 787 788 func growWork(t *maptype, h *hmap, bucket uintptr) { 789 noldbuckets := uintptr(1) << (h.B - 1) 790 791 // make sure we evacuate the oldbucket corresponding 792 // to the bucket we're about to use 793 evacuate(t, h, bucket&(noldbuckets-1)) 794 795 // evacuate one more oldbucket to make progress on growing 796 if h.oldbuckets != nil { 797 evacuate(t, h, h.nevacuate) 798 } 799 } 800 801 func evacuate(t *maptype, h *hmap, oldbucket uintptr) { 802 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 803 newbit := uintptr(1) << (h.B - 1) 804 alg := t.key.alg 805 if !evacuated(b) { 806 // TODO: reuse overflow buckets instead of using new ones, if there 807 // is no iterator using the old buckets. (If !oldIterator.) 808 809 x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 810 y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 811 xi := 0 812 yi := 0 813 xk := add(unsafe.Pointer(x), dataOffset) 814 yk := add(unsafe.Pointer(y), dataOffset) 815 xv := add(xk, bucketCnt*uintptr(t.keysize)) 816 yv := add(yk, bucketCnt*uintptr(t.keysize)) 817 for ; b != nil; b = b.overflow(t) { 818 k := add(unsafe.Pointer(b), dataOffset) 819 v := add(k, bucketCnt*uintptr(t.keysize)) 820 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { 821 top := b.tophash[i] 822 if top == empty { 823 b.tophash[i] = evacuatedEmpty 824 continue 825 } 826 if top < minTopHash { 827 throw("bad map state") 828 } 829 k2 := k 830 if t.indirectkey { 831 k2 = *((*unsafe.Pointer)(k2)) 832 } 833 // Compute hash to make our evacuation decision (whether we need 834 // to send this key/value to bucket x or bucket y). 835 hash := alg.hash(k2, uintptr(h.hash0)) 836 if h.flags&iterator != 0 { 837 if !t.reflexivekey && !alg.equal(k2, k2) { 838 // If key != key (NaNs), then the hash could be (and probably 839 // will be) entirely different from the old hash. Moreover, 840 // it isn't reproducible. Reproducibility is required in the 841 // presence of iterators, as our evacuation decision must 842 // match whatever decision the iterator made. 843 // Fortunately, we have the freedom to send these keys either 844 // way. Also, tophash is meaningless for these kinds of keys. 845 // We let the low bit of tophash drive the evacuation decision. 846 // We recompute a new random tophash for the next level so 847 // these keys will get evenly distributed across all buckets 848 // after multiple grows. 849 if (top & 1) != 0 { 850 hash |= newbit 851 } else { 852 hash &^= newbit 853 } 854 top = uint8(hash >> (ptrSize*8 - 8)) 855 if top < minTopHash { 856 top += minTopHash 857 } 858 } 859 } 860 if (hash & newbit) == 0 { 861 b.tophash[i] = evacuatedX 862 if xi == bucketCnt { 863 newx := (*bmap)(newobject(t.bucket)) 864 h.setoverflow(t, x, newx) 865 x = newx 866 xi = 0 867 xk = add(unsafe.Pointer(x), dataOffset) 868 xv = add(xk, bucketCnt*uintptr(t.keysize)) 869 } 870 x.tophash[xi] = top 871 if t.indirectkey { 872 *(*unsafe.Pointer)(xk) = k2 // copy pointer 873 } else { 874 typedmemmove(t.key, xk, k) // copy value 875 } 876 if t.indirectvalue { 877 *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v) 878 } else { 879 typedmemmove(t.elem, xv, v) 880 } 881 xi++ 882 xk = add(xk, uintptr(t.keysize)) 883 xv = add(xv, uintptr(t.valuesize)) 884 } else { 885 b.tophash[i] = evacuatedY 886 if yi == bucketCnt { 887 newy := (*bmap)(newobject(t.bucket)) 888 h.setoverflow(t, y, newy) 889 y = newy 890 yi = 0 891 yk = add(unsafe.Pointer(y), dataOffset) 892 yv = add(yk, bucketCnt*uintptr(t.keysize)) 893 } 894 y.tophash[yi] = top 895 if t.indirectkey { 896 *(*unsafe.Pointer)(yk) = k2 897 } else { 898 typedmemmove(t.key, yk, k) 899 } 900 if t.indirectvalue { 901 *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v) 902 } else { 903 typedmemmove(t.elem, yv, v) 904 } 905 yi++ 906 yk = add(yk, uintptr(t.keysize)) 907 yv = add(yv, uintptr(t.valuesize)) 908 } 909 } 910 } 911 // Unlink the overflow buckets & clear key/value to help GC. 912 if h.flags&oldIterator == 0 { 913 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 914 memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset) 915 } 916 } 917 918 // Advance evacuation mark 919 if oldbucket == h.nevacuate { 920 h.nevacuate = oldbucket + 1 921 if oldbucket+1 == newbit { // newbit == # of oldbuckets 922 // Growing is all done. Free old main bucket array. 923 h.oldbuckets = nil 924 // Can discard old overflow buckets as well. 925 // If they are still referenced by an iterator, 926 // then the iterator holds a pointers to the slice. 927 if h.overflow != nil { 928 h.overflow[1] = nil 929 } 930 } 931 } 932 } 933 934 func ismapkey(t *_type) bool { 935 return t.alg.hash != nil 936 } 937 938 // Reflect stubs. Called from ../reflect/asm_*.s 939 940 //go:linkname reflect_makemap reflect.makemap 941 func reflect_makemap(t *maptype) *hmap { 942 return makemap(t, 0, nil, nil) 943 } 944 945 //go:linkname reflect_mapaccess reflect.mapaccess 946 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 947 val, ok := mapaccess2(t, h, key) 948 if !ok { 949 // reflect wants nil for a missing element 950 val = nil 951 } 952 return val 953 } 954 955 //go:linkname reflect_mapassign reflect.mapassign 956 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { 957 mapassign1(t, h, key, val) 958 } 959 960 //go:linkname reflect_mapdelete reflect.mapdelete 961 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { 962 mapdelete(t, h, key) 963 } 964 965 //go:linkname reflect_mapiterinit reflect.mapiterinit 966 func reflect_mapiterinit(t *maptype, h *hmap) *hiter { 967 it := new(hiter) 968 mapiterinit(t, h, it) 969 return it 970 } 971 972 //go:linkname reflect_mapiternext reflect.mapiternext 973 func reflect_mapiternext(it *hiter) { 974 mapiternext(it) 975 } 976 977 //go:linkname reflect_mapiterkey reflect.mapiterkey 978 func reflect_mapiterkey(it *hiter) unsafe.Pointer { 979 return it.key 980 } 981 982 //go:linkname reflect_maplen reflect.maplen 983 func reflect_maplen(h *hmap) int { 984 if h == nil { 985 return 0 986 } 987 if raceenabled { 988 callerpc := getcallerpc(unsafe.Pointer(&h)) 989 racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen)) 990 } 991 return h.count 992 } 993 994 //go:linkname reflect_ismapkey reflect.ismapkey 995 func reflect_ismapkey(t *_type) bool { 996 return ismapkey(t) 997 } 998 999 var zerolock mutex 1000 1001 const initialZeroSize = 1024 1002 1003 var zeroinitial [initialZeroSize]byte 1004 1005 // All accesses to zeroptr and zerosize must be atomic so that they 1006 // can be accessed without locks in the common case. 1007 var zeroptr unsafe.Pointer = unsafe.Pointer(&zeroinitial) 1008 var zerosize uintptr = initialZeroSize 1009 1010 // mapzero ensures that zeroptr points to a buffer large enough to 1011 // serve as the zero value for t. 1012 func mapzero(t *_type) { 1013 // Is the type small enough for existing buffer? 1014 cursize := uintptr(atomicloadp(unsafe.Pointer(&zerosize))) 1015 if t.size <= cursize { 1016 return 1017 } 1018 1019 // Allocate a new buffer. 1020 lock(&zerolock) 1021 cursize = uintptr(atomicloadp(unsafe.Pointer(&zerosize))) 1022 if cursize < t.size { 1023 for cursize < t.size { 1024 cursize *= 2 1025 if cursize == 0 { 1026 // need >2GB zero on 32-bit machine 1027 throw("map element too large") 1028 } 1029 } 1030 atomicstorep1(unsafe.Pointer(&zeroptr), persistentalloc(cursize, 64, &memstats.other_sys)) 1031 atomicstorep1(unsafe.Pointer(&zerosize), unsafe.Pointer(zerosize)) 1032 } 1033 unlock(&zerolock) 1034 }