github.com/ltltlt/go-source-code@v0.0.0-20190830023027-95be009773aa/runtime/hashmap.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // This file contains the implementation of Go's map type. 8 // 9 // A map is just a hash table. The data is arranged 10 // into an array of buckets. Each bucket contains up to 11 // 8 key/value pairs. The low-order bits of the hash are 12 // used to select a bucket. Each bucket contains a few 13 // high-order bits of each hash to distinguish the entries 14 // within a single bucket. 15 // 16 // If more than 8 keys hash to a bucket, we chain on 17 // extra buckets. 18 // 19 // When the hashtable grows, we allocate a new array 20 // of buckets twice as big. Buckets are incrementally 21 // copied from the old bucket array to the new bucket array. 22 // 23 // Map iterators walk through the array of buckets and 24 // return the keys in walk order (bucket #, then overflow 25 // chain order, then bucket index). To maintain iteration 26 // semantics, we never move keys within their bucket (if 27 // we did, keys might be returned 0 or 2 times). When 28 // growing the table, iterators remain iterating through the 29 // old table and must check the new table if the bucket 30 // they are iterating through has been moved ("evacuated") 31 // to the new table. 32 33 // Picking loadFactor: too large and we have lots of overflow 34 // buckets, too small and we waste a lot of space. I wrote 35 // a simple program to check some stats for different loads: 36 // (64-bit, 8 byte keys and values) 37 // loadFactor %overflow bytes/entry hitprobe missprobe 38 // 4.00 2.13 20.77 3.00 4.00 39 // 4.50 4.05 17.30 3.25 4.50 40 // 5.00 6.85 14.77 3.50 5.00 41 // 5.50 10.55 12.94 3.75 5.50 42 // 6.00 15.27 11.67 4.00 6.00 43 // 6.50 20.90 10.79 4.25 6.50 44 // 7.00 27.14 10.15 4.50 7.00 45 // 7.50 34.03 9.73 4.75 7.50 46 // 8.00 41.10 9.40 5.00 8.00 47 // 48 // %overflow = percentage of buckets which have an overflow bucket 49 // bytes/entry = overhead bytes used per key/value pair 50 // hitprobe = # of entries to check when looking up a present key 51 // missprobe = # of entries to check when looking up an absent key 52 // 53 // Keep in mind this data is for maximally loaded tables, i.e. just 54 // before the table grows. Typical tables will be somewhat less loaded. 55 56 import ( 57 "runtime/internal/atomic" 58 "runtime/internal/sys" 59 "unsafe" 60 ) 61 62 const ( 63 // Maximum number of key/value pairs a bucket can hold. 64 bucketCntBits = 3 65 bucketCnt = 1 << bucketCntBits 66 67 // Maximum average load of a bucket that triggers growth is 6.5. 68 // Represent as loadFactorNum/loadFactDen, to allow integer math. 69 loadFactorNum = 13 70 loadFactorDen = 2 71 72 // Maximum key or value size to keep inline (instead of mallocing per element). 73 // Must fit in a uint8. 74 // Fast versions cannot handle big values - the cutoff size for 75 // fast versions in ../../cmd/internal/gc/walk.go must be at most this value. 76 maxKeySize = 128 77 maxValueSize = 128 78 79 // data offset should be the size of the bmap struct, but needs to be 80 // aligned correctly. For amd64p32 this means 64-bit alignment 81 // even though pointers are 32 bit. 82 // 存bmap中kv位置的偏移 83 dataOffset = unsafe.Offsetof(struct { 84 b bmap 85 v int64 86 }{}.v) 87 88 // Possible tophash values. We reserve a few possibilities for special marks. 89 // 可能的tophash值, 保留一部分可能值用于特殊标记 90 // Each bucket (including its overflow buckets, if any) will have either all or none of its 91 // entries in the evacuated* states (except during the evacuate() method, which only happens 92 // during map writes and thus no one else can observe the map during that time). 93 empty = 0 // cell is empty 94 evacuatedEmpty = 1 // cell is empty, bucket is evacuated. 95 evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table. 96 evacuatedY = 3 // same as above, but evacuated to second half of larger table. 97 minTopHash = 4 // minimum tophash for a normal filled cell. 98 99 // flags 100 iterator = 1 // there may be an iterator using buckets 101 oldIterator = 2 // there may be an iterator using oldbuckets 102 hashWriting = 4 // a goroutine is writing to the map 103 sameSizeGrow = 8 // the current map growth is to a new map of the same size 104 105 // sentinel bucket ID for iterator checks 106 noCheck = 1<<(8*sys.PtrSize) - 1 107 ) 108 109 // A header for a Go map. 110 type hmap struct { 111 // Note: the format of the Hmap is encoded in ../../cmd/internal/gc/reflect.go and 112 // ../reflect/type.go. Don't change this structure without also changing that code! 113 count int // # live cells == size of map. Must be first (used by len() builtin) 114 flags uint8 115 B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items) 116 noverflow uint16 // approximate number of overflow buckets; see incrnoverflow for details 117 hash0 uint32 // hash seed 118 119 // bmap的数组, 有2^B个元素 120 buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0. 121 oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing 122 nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated) 123 124 extra *mapextra // optional fields 125 } 126 127 // mapextra holds fields that are not present on all maps. 128 type mapextra struct { 129 // If both key and value do not contain pointers and are inline, then we mark bucket 130 // type as containing no pointers. This avoids scanning such maps. 131 // However, bmap.overflow is a pointer. In order to keep overflow buckets 132 // alive, we store pointers to all overflow buckets in hmap.overflow and h.map.oldoverflow. 133 // overflow and oldoverflow are only used if key and value do not contain pointers. 134 // overflow contains overflow buckets for hmap.buckets. 135 // oldoverflow contains overflow buckets for hmap.oldbuckets. 136 // The indirection allows to store a pointer to the slice in hiter. 137 overflow *[]*bmap 138 oldoverflow *[]*bmap 139 140 // nextOverflow holds a pointer to a free overflow bucket. 141 nextOverflow *bmap 142 } 143 144 // A bucket for a Go map. 145 type bmap struct { 146 // tophash generally contains the top byte of the hash value 147 // for each key in this bucket. If tophash[0] < minTopHash, 148 // tophash[0] is a bucket evacuation state instead. 149 // 这个数组存key的hash值的高8位, 这个值不会<minTopHash(若小于会+这个值) 150 // 如果tophash[0] < minTopHash, 则tophash[0]是这个bucket迁移的状态 151 tophash [bucketCnt]uint8 152 // Followed by bucketCnt keys and then bucketCnt values. 153 // 后面是bucketCnt个key和同样数目的value 154 // NOTE: packing all the keys together and then all the values together makes the 155 // code a bit more complicated than alternating key/value/key/value/... but it allows 156 // us to eliminate padding which would be needed for, e.g., map[int64]int8. 157 // Followed by an overflow pointer. 158 // 最后跟着一个overflow指针(即这个bucket存的大小>bucketCnt时), 指向另一个bmap 159 } 160 161 // A hash iteration structure. 162 // If you modify hiter, also change cmd/internal/gc/reflect.go to indicate 163 // the layout of this structure. 164 type hiter struct { 165 key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go). 166 value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go). 167 t *maptype 168 h *hmap 169 buckets unsafe.Pointer // bucket ptr at hash_iter initialization time 170 bptr *bmap // current bucket 171 overflow *[]*bmap // keeps overflow buckets of hmap.buckets alive 172 oldoverflow *[]*bmap // keeps overflow buckets of hmap.oldbuckets alive 173 startBucket uintptr // bucket iteration started at 174 offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1) 175 wrapped bool // already wrapped around from end of bucket array to beginning 176 B uint8 177 i uint8 178 bucket uintptr 179 checkBucket uintptr 180 } 181 182 // bucketShift returns 1<<b, optimized for code generation. 183 func bucketShift(b uint8) uintptr { 184 if sys.GoarchAmd64|sys.GoarchAmd64p32|sys.Goarch386 != 0 { 185 b &= sys.PtrSize*8 - 1 // help x86 archs remove shift overflow checks 186 } 187 return uintptr(1) << b 188 } 189 190 // bucketMask returns 1<<b - 1, optimized for code generation. 191 func bucketMask(b uint8) uintptr { 192 return bucketShift(b) - 1 193 } 194 195 // tophash calculates the tophash value for hash. 196 // 拿到一个hash值的高8位 197 func tophash(hash uintptr) uint8 { 198 top := uint8(hash >> (sys.PtrSize*8 - 8)) 199 if top < minTopHash { 200 top += minTopHash 201 } 202 return top 203 } 204 205 func evacuated(b *bmap) bool { 206 h := b.tophash[0] 207 return h > empty && h < minTopHash 208 } 209 210 func (b *bmap) overflow(t *maptype) *bmap { 211 return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) 212 } 213 214 func (b *bmap) setoverflow(t *maptype, ovf *bmap) { 215 *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-sys.PtrSize)) = ovf 216 } 217 218 // 返回key的指针 219 func (b *bmap) keys() unsafe.Pointer { 220 return add(unsafe.Pointer(b), dataOffset) 221 } 222 223 // incrnoverflow increments h.noverflow. 224 // noverflow counts the number of overflow buckets. 225 // This is used to trigger same-size map growth. 226 // See also tooManyOverflowBuckets. 227 // To keep hmap small, noverflow is a uint16. 228 // When there are few buckets, noverflow is an exact count. 229 // When there are many buckets, noverflow is an approximate count. 230 func (h *hmap) incrnoverflow() { 231 // We trigger same-size map growth if there are 232 // as many overflow buckets as buckets. 233 // We need to be able to count to 1<<h.B. 234 if h.B < 16 { 235 h.noverflow++ 236 return 237 } 238 // Increment with probability 1/(1<<(h.B-15)). 239 // When we reach 1<<15 - 1, we will have approximately 240 // as many overflow buckets as buckets. 241 mask := uint32(1)<<(h.B-15) - 1 242 // Example: if h.B == 18, then mask == 7, 243 // and fastrand & 7 == 0 with probability 1/8. 244 if fastrand()&mask == 0 { 245 h.noverflow++ 246 } 247 } 248 249 // 一个bmap(bucket) overflow了 250 func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap { 251 var ovf *bmap 252 if h.extra != nil && h.extra.nextOverflow != nil { 253 // We have preallocated overflow buckets available. 254 // See makeBucketArray for more details. 255 ovf = h.extra.nextOverflow 256 if ovf.overflow(t) == nil { 257 // We're not at the end of the preallocated overflow buckets. Bump the pointer. 258 h.extra.nextOverflow = (*bmap)(add(unsafe.Pointer(ovf), uintptr(t.bucketsize))) 259 } else { 260 // This is the last preallocated overflow bucket. 261 // Reset the overflow pointer on this bucket, 262 // which was set to a non-nil sentinel value. 263 ovf.setoverflow(t, nil) 264 h.extra.nextOverflow = nil 265 } 266 } else { 267 ovf = (*bmap)(newobject(t.bucket)) 268 } 269 h.incrnoverflow() 270 if t.bucket.kind&kindNoPointers != 0 { 271 h.createOverflow() 272 *h.extra.overflow = append(*h.extra.overflow, ovf) 273 } 274 b.setoverflow(t, ovf) 275 return ovf 276 } 277 278 func (h *hmap) createOverflow() { 279 if h.extra == nil { 280 h.extra = new(mapextra) 281 } 282 if h.extra.overflow == nil { 283 h.extra.overflow = new([]*bmap) 284 } 285 } 286 287 func makemap64(t *maptype, hint int64, h *hmap) *hmap { 288 if int64(int(hint)) != hint { 289 hint = 0 290 } 291 return makemap(t, int(hint), h) 292 } 293 294 // makehmap_small implements Go map creation for make(map[k]v) and 295 // make(map[k]v, hint) when hint is known to be at most bucketCnt 296 // at compile time and the map needs to be allocated on the heap. 297 func makemap_small() *hmap { 298 h := new(hmap) 299 h.hash0 = fastrand() 300 return h 301 } 302 303 // makemap implements Go map creation for make(map[k]v, hint). 304 // If the compiler has determined that the map or the first bucket 305 // can be created on the stack, h and/or bucket may be non-nil. 306 // If h != nil, the map can be created directly in h. 307 // If h.buckets != nil, bucket pointed to can be used as the first bucket. 308 func makemap(t *maptype, hint int, h *hmap) *hmap { 309 // The size of hmap should be 48 bytes on 64 bit 310 // and 28 bytes on 32 bit platforms. 311 if sz := unsafe.Sizeof(hmap{}); sz != 8+5*sys.PtrSize { 312 println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size) 313 throw("bad hmap size") 314 } 315 316 if hint < 0 || hint > int(maxSliceCap(t.bucket.size)) { 317 hint = 0 318 } 319 320 // initialize Hmap 321 if h == nil { 322 h = (*hmap)(newobject(t.hmap)) 323 } 324 h.hash0 = fastrand() 325 326 // find size parameter which will hold the requested # of elements 327 B := uint8(0) 328 for overLoadFactor(hint, B) { 329 B++ 330 } 331 h.B = B 332 333 // allocate initial hash table 334 // if B == 0, the buckets field is allocated lazily later (in mapassign) 335 // If hint is large zeroing this memory could take a while. 336 if h.B != 0 { 337 var nextOverflow *bmap 338 h.buckets, nextOverflow = makeBucketArray(t, h.B) 339 if nextOverflow != nil { 340 h.extra = new(mapextra) 341 h.extra.nextOverflow = nextOverflow 342 } 343 } 344 345 return h 346 } 347 348 // mapaccess1 returns a pointer to h[key]. Never returns nil, instead 349 // it will return a reference to the zero object for the value type if 350 // the key is not in the map. 351 // NOTE: The returned pointer may keep the whole map live, so don't 352 // hold onto it for very long. 353 func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 354 if raceenabled && h != nil { 355 callerpc := getcallerpc() 356 pc := funcPC(mapaccess1) 357 racereadpc(unsafe.Pointer(h), callerpc, pc) 358 raceReadObjectPC(t.key, key, callerpc, pc) 359 } 360 if msanenabled && h != nil { 361 msanread(key, t.key.size) 362 } 363 if h == nil || h.count == 0 { 364 return unsafe.Pointer(&zeroVal[0]) 365 } 366 if h.flags&hashWriting != 0 { 367 throw("concurrent map read and map write") 368 } 369 alg := t.key.alg 370 hash := alg.hash(key, uintptr(h.hash0)) 371 m := bucketMask(h.B) 372 b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize))) 373 if c := h.oldbuckets; c != nil { 374 if !h.sameSizeGrow() { 375 // There used to be half as many buckets; mask down one more power of two. 376 m >>= 1 377 } 378 oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize))) 379 if !evacuated(oldb) { 380 b = oldb 381 } 382 } 383 top := tophash(hash) 384 for ; b != nil; b = b.overflow(t) { 385 for i := uintptr(0); i < bucketCnt; i++ { 386 if b.tophash[i] != top { 387 continue 388 } 389 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 390 if t.indirectkey { // 如果key是指针,我想会正确 391 k = *((*unsafe.Pointer)(k)) 392 } 393 if alg.equal(key, k) { 394 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 395 if t.indirectvalue { 396 v = *((*unsafe.Pointer)(v)) 397 } 398 return v 399 } 400 } 401 } 402 return unsafe.Pointer(&zeroVal[0]) 403 } 404 405 func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) { 406 if raceenabled && h != nil { 407 callerpc := getcallerpc() 408 pc := funcPC(mapaccess2) 409 racereadpc(unsafe.Pointer(h), callerpc, pc) 410 raceReadObjectPC(t.key, key, callerpc, pc) 411 } 412 if msanenabled && h != nil { 413 msanread(key, t.key.size) 414 } 415 if h == nil || h.count == 0 { 416 return unsafe.Pointer(&zeroVal[0]), false 417 } 418 if h.flags&hashWriting != 0 { 419 throw("concurrent map read and map write") 420 } 421 alg := t.key.alg 422 hash := alg.hash(key, uintptr(h.hash0)) 423 m := bucketMask(h.B) 424 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) 425 if c := h.oldbuckets; c != nil { 426 if !h.sameSizeGrow() { 427 // There used to be half as many buckets; mask down one more power of two. 428 m >>= 1 429 } 430 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize))) 431 if !evacuated(oldb) { 432 b = oldb 433 } 434 } 435 top := tophash(hash) 436 for ; b != nil; b = b.overflow(t) { 437 for i := uintptr(0); i < bucketCnt; i++ { 438 if b.tophash[i] != top { 439 continue 440 } 441 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 442 if t.indirectkey { 443 k = *((*unsafe.Pointer)(k)) 444 } 445 if alg.equal(key, k) { 446 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 447 if t.indirectvalue { 448 v = *((*unsafe.Pointer)(v)) 449 } 450 return v, true 451 } 452 } 453 } 454 return unsafe.Pointer(&zeroVal[0]), false 455 } 456 457 // returns both key and value. Used by map iterator 458 func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) { 459 if h == nil || h.count == 0 { 460 return nil, nil 461 } 462 alg := t.key.alg 463 hash := alg.hash(key, uintptr(h.hash0)) 464 m := bucketMask(h.B) 465 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize))) 466 if c := h.oldbuckets; c != nil { 467 if !h.sameSizeGrow() { 468 // There used to be half as many buckets; mask down one more power of two. 469 m >>= 1 470 } 471 oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&m)*uintptr(t.bucketsize))) 472 if !evacuated(oldb) { 473 b = oldb 474 } 475 } 476 top := tophash(hash) 477 for ; b != nil; b = b.overflow(t) { 478 for i := uintptr(0); i < bucketCnt; i++ { 479 if b.tophash[i] != top { 480 continue 481 } 482 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 483 if t.indirectkey { 484 k = *((*unsafe.Pointer)(k)) 485 } 486 if alg.equal(key, k) { 487 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 488 if t.indirectvalue { 489 v = *((*unsafe.Pointer)(v)) 490 } 491 return k, v 492 } 493 } 494 } 495 return nil, nil 496 } 497 498 // 找不到返回zero 499 func mapaccess1_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) unsafe.Pointer { 500 v := mapaccess1(t, h, key) 501 if v == unsafe.Pointer(&zeroVal[0]) { 502 return zero 503 } 504 return v 505 } 506 507 func mapaccess2_fat(t *maptype, h *hmap, key, zero unsafe.Pointer) (unsafe.Pointer, bool) { 508 v := mapaccess1(t, h, key) 509 if v == unsafe.Pointer(&zeroVal[0]) { 510 return zero, false 511 } 512 return v, true 513 } 514 515 // Like mapaccess, but allocates a slot for the key if it is not present in the map. 516 func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 517 if h == nil { 518 panic(plainError("assignment to entry in nil map")) 519 } 520 if raceenabled { 521 callerpc := getcallerpc() 522 pc := funcPC(mapassign) 523 racewritepc(unsafe.Pointer(h), callerpc, pc) 524 raceReadObjectPC(t.key, key, callerpc, pc) 525 } 526 if msanenabled { 527 msanread(key, t.key.size) 528 } 529 if h.flags&hashWriting != 0 { 530 throw("concurrent map writes") 531 } 532 alg := t.key.alg 533 hash := alg.hash(key, uintptr(h.hash0)) 534 535 // Set hashWriting after calling alg.hash, since alg.hash may panic, 536 // in which case we have not actually done a write. 537 h.flags |= hashWriting 538 539 if h.buckets == nil { 540 h.buckets = newobject(t.bucket) // newarray(t.bucket, 1) 541 } 542 543 again: 544 bucket := hash & bucketMask(h.B) 545 if h.growing() { 546 growWork(t, h, bucket) 547 } 548 b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize))) 549 top := tophash(hash) 550 551 var inserti *uint8 // 更新点(tophash) 552 var insertk unsafe.Pointer // 更新点(key) 553 var val unsafe.Pointer // 更新点(value) 554 for { 555 for i := uintptr(0); i < bucketCnt; i++ { 556 if b.tophash[i] != top { 557 if b.tophash[i] == empty && inserti == nil { 558 inserti = &b.tophash[i] 559 insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 560 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 561 } 562 continue 563 } 564 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 565 if t.indirectkey { 566 k = *((*unsafe.Pointer)(k)) 567 } 568 if !alg.equal(key, k) { 569 continue 570 } 571 // already have a mapping for key. Update it. 572 if t.needkeyupdate { 573 typedmemmove(t.key, k, key) 574 } 575 val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 576 goto done 577 } 578 ovf := b.overflow(t) 579 if ovf == nil { 580 break 581 } 582 b = ovf 583 } 584 585 // Did not find mapping for key. Allocate new cell & add entry. 586 587 // If we hit the max load factor or we have too many overflow buckets, 588 // and we're not already in the middle of growing, start growing. 589 if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) { 590 hashGrow(t, h) 591 goto again // Growing the table invalidates everything, so try again 592 } 593 594 if inserti == nil { 595 // all current buckets are full, allocate a new one. 596 newb := h.newoverflow(t, b) 597 inserti = &newb.tophash[0] 598 insertk = add(unsafe.Pointer(newb), dataOffset) 599 val = add(insertk, bucketCnt*uintptr(t.keysize)) 600 } 601 602 // store new key/value at insert position 603 if t.indirectkey { 604 kmem := newobject(t.key) 605 *(*unsafe.Pointer)(insertk) = kmem 606 insertk = kmem 607 } 608 if t.indirectvalue { 609 vmem := newobject(t.elem) 610 *(*unsafe.Pointer)(val) = vmem 611 } 612 typedmemmove(t.key, insertk, key) 613 *inserti = top 614 h.count++ 615 616 done: 617 if h.flags&hashWriting == 0 { 618 throw("concurrent map writes") 619 } 620 h.flags &^= hashWriting 621 if t.indirectvalue { 622 val = *((*unsafe.Pointer)(val)) 623 } 624 return val 625 } 626 627 func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { 628 if raceenabled && h != nil { 629 callerpc := getcallerpc() 630 pc := funcPC(mapdelete) 631 racewritepc(unsafe.Pointer(h), callerpc, pc) 632 raceReadObjectPC(t.key, key, callerpc, pc) 633 } 634 if msanenabled && h != nil { 635 msanread(key, t.key.size) 636 } 637 if h == nil || h.count == 0 { 638 return 639 } 640 if h.flags&hashWriting != 0 { 641 throw("concurrent map writes") 642 } 643 644 alg := t.key.alg 645 hash := alg.hash(key, uintptr(h.hash0)) 646 647 // Set hashWriting after calling alg.hash, since alg.hash may panic, 648 // in which case we have not actually done a write (delete). 649 h.flags |= hashWriting 650 651 bucket := hash & bucketMask(h.B) 652 if h.growing() { 653 growWork(t, h, bucket) 654 } 655 b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize))) 656 top := tophash(hash) 657 search: 658 for ; b != nil; b = b.overflow(t) { 659 for i := uintptr(0); i < bucketCnt; i++ { 660 if b.tophash[i] != top { 661 continue 662 } 663 k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize)) 664 k2 := k 665 if t.indirectkey { 666 k2 = *((*unsafe.Pointer)(k2)) 667 } 668 if !alg.equal(key, k2) { 669 continue 670 } 671 // Only clear key if there are pointers in it. 672 if t.indirectkey { 673 *(*unsafe.Pointer)(k) = nil 674 } else if t.key.kind&kindNoPointers == 0 { 675 memclrHasPointers(k, t.key.size) 676 } 677 // Only clear value if there are pointers in it. 678 if t.indirectvalue || t.elem.kind&kindNoPointers == 0 { 679 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize)) 680 if t.indirectvalue { 681 *(*unsafe.Pointer)(v) = nil 682 } else { 683 memclrHasPointers(v, t.elem.size) 684 } 685 } 686 b.tophash[i] = empty 687 h.count-- 688 break search 689 } 690 } 691 692 if h.flags&hashWriting == 0 { 693 throw("concurrent map writes") 694 } 695 h.flags &^= hashWriting 696 } 697 698 // mapiterinit initializes the hiter struct used for ranging over maps. 699 // The hiter struct pointed to by 'it' is allocated on the stack 700 // by the compilers order pass or on the heap by reflect_mapiterinit. 701 // Both need to have zeroed hiter since the struct contains pointers. 702 func mapiterinit(t *maptype, h *hmap, it *hiter) { 703 if raceenabled && h != nil { 704 callerpc := getcallerpc() 705 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit)) 706 } 707 708 if h == nil || h.count == 0 { 709 return 710 } 711 712 if unsafe.Sizeof(hiter{})/sys.PtrSize != 12 { 713 throw("hash_iter size incorrect") // see ../../cmd/internal/gc/reflect.go 714 } 715 it.t = t 716 it.h = h 717 718 // grab snapshot of bucket state 719 it.B = h.B 720 it.buckets = h.buckets 721 if t.bucket.kind&kindNoPointers != 0 { 722 // Allocate the current slice and remember pointers to both current and old. 723 // This preserves all relevant overflow buckets alive even if 724 // the table grows and/or overflow buckets are added to the table 725 // while we are iterating. 726 h.createOverflow() 727 it.overflow = h.extra.overflow 728 it.oldoverflow = h.extra.oldoverflow 729 } 730 731 // decide where to start 732 r := uintptr(fastrand()) 733 if h.B > 31-bucketCntBits { 734 r += uintptr(fastrand()) << 31 735 } 736 it.startBucket = r & bucketMask(h.B) 737 it.offset = uint8(r >> h.B & (bucketCnt - 1)) 738 739 // iterator state 740 it.bucket = it.startBucket 741 742 // Remember we have an iterator. 743 // Can run concurrently with another mapiterinit(). 744 if old := h.flags; old&(iterator|oldIterator) != iterator|oldIterator { 745 atomic.Or8(&h.flags, iterator|oldIterator) 746 } 747 748 mapiternext(it) 749 } 750 751 func mapiternext(it *hiter) { 752 h := it.h 753 if raceenabled { 754 callerpc := getcallerpc() 755 racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext)) 756 } 757 if h.flags&hashWriting != 0 { 758 throw("concurrent map iteration and map write") 759 } 760 t := it.t 761 bucket := it.bucket 762 b := it.bptr 763 i := it.i 764 checkBucket := it.checkBucket 765 alg := t.key.alg 766 767 next: 768 if b == nil { 769 if bucket == it.startBucket && it.wrapped { 770 // end of iteration 771 it.key = nil 772 it.value = nil 773 return 774 } 775 if h.growing() && it.B == h.B { 776 // Iterator was started in the middle of a grow, and the grow isn't done yet. 777 // If the bucket we're looking at hasn't been filled in yet (i.e. the old 778 // bucket hasn't been evacuated) then we need to iterate through the old 779 // bucket and only return the ones that will be migrated to this bucket. 780 oldbucket := bucket & it.h.oldbucketmask() 781 b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 782 if !evacuated(b) { 783 checkBucket = bucket 784 } else { 785 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) 786 checkBucket = noCheck 787 } 788 } else { 789 b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize))) 790 checkBucket = noCheck 791 } 792 bucket++ 793 if bucket == bucketShift(it.B) { 794 bucket = 0 795 it.wrapped = true 796 } 797 i = 0 798 } 799 for ; i < bucketCnt; i++ { 800 offi := (i + it.offset) & (bucketCnt - 1) 801 if b.tophash[offi] == empty || b.tophash[offi] == evacuatedEmpty { 802 continue 803 } 804 k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize)) 805 if t.indirectkey { 806 k = *((*unsafe.Pointer)(k)) 807 } 808 v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize)) 809 if checkBucket != noCheck && !h.sameSizeGrow() { 810 // Special case: iterator was started during a grow to a larger size 811 // and the grow is not done yet. We're working on a bucket whose 812 // oldbucket has not been evacuated yet. Or at least, it wasn't 813 // evacuated when we started the bucket. So we're iterating 814 // through the oldbucket, skipping any keys that will go 815 // to the other new bucket (each oldbucket expands to two 816 // buckets during a grow). 817 if t.reflexivekey || alg.equal(k, k) { 818 // If the item in the oldbucket is not destined for 819 // the current new bucket in the iteration, skip it. 820 hash := alg.hash(k, uintptr(h.hash0)) 821 if hash&bucketMask(it.B) != checkBucket { 822 continue 823 } 824 } else { 825 // Hash isn't repeatable if k != k (NaNs). We need a 826 // repeatable and randomish choice of which direction 827 // to send NaNs during evacuation. We'll use the low 828 // bit of tophash to decide which way NaNs go. 829 // NOTE: this case is why we need two evacuate tophash 830 // values, evacuatedX and evacuatedY, that differ in 831 // their low bit. 832 if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) { 833 continue 834 } 835 } 836 } 837 if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) || 838 !(t.reflexivekey || alg.equal(k, k)) { 839 // This is the golden data, we can return it. 840 // OR 841 // key!=key, so the entry can't be deleted or updated, so we can just return it. 842 // That's lucky for us because when key!=key we can't look it up successfully. 843 it.key = k 844 if t.indirectvalue { 845 v = *((*unsafe.Pointer)(v)) 846 } 847 it.value = v 848 } else { 849 // The hash table has grown since the iterator was started. 850 // The golden data for this key is now somewhere else. 851 // Check the current hash table for the data. 852 // This code handles the case where the key 853 // has been deleted, updated, or deleted and reinserted. 854 // NOTE: we need to regrab the key as it has potentially been 855 // updated to an equal() but not identical key (e.g. +0.0 vs -0.0). 856 rk, rv := mapaccessK(t, h, k) 857 if rk == nil { 858 continue // key has been deleted 859 } 860 it.key = rk 861 it.value = rv 862 } 863 it.bucket = bucket 864 if it.bptr != b { // avoid unnecessary write barrier; see issue 14921 865 it.bptr = b 866 } 867 it.i = i + 1 868 it.checkBucket = checkBucket 869 return 870 } 871 b = b.overflow(t) 872 i = 0 873 goto next 874 } 875 876 func makeBucketArray(t *maptype, b uint8) (buckets unsafe.Pointer, nextOverflow *bmap) { 877 base := bucketShift(b) 878 nbuckets := base // 实际分配数目 879 // For small b, overflow buckets are unlikely. 880 // Avoid the overhead of the calculation. 881 if b >= 4 { 882 // Add on the estimated number of overflow buckets 883 // required to insert the median number of elements 884 // used with this value of b. 885 nbuckets += bucketShift(b - 4) 886 sz := t.bucket.size * nbuckets 887 up := roundupsize(sz) 888 if up != sz { 889 nbuckets = up / t.bucket.size 890 } 891 } 892 buckets = newarray(t.bucket, int(nbuckets)) 893 // 多分配的一些放到nextOverflow里 894 if base != nbuckets { 895 // We preallocated some overflow buckets. 896 // To keep the overhead of tracking these overflow buckets to a minimum, 897 // we use the convention that if a preallocated overflow bucket's overflow 898 // pointer is nil, then there are more available by bumping the pointer. 899 // We need a safe non-nil pointer for the last overflow bucket; just use buckets. 900 nextOverflow = (*bmap)(add(buckets, base*uintptr(t.bucketsize))) 901 last := (*bmap)(add(buckets, (nbuckets-1)*uintptr(t.bucketsize))) 902 last.setoverflow(t, (*bmap)(buckets)) 903 } 904 return buckets, nextOverflow 905 } 906 907 func hashGrow(t *maptype, h *hmap) { 908 // If we've hit the load factor, get bigger. 909 // Otherwise, there are too many overflow buckets, 910 // so keep the same number of buckets and "grow" laterally. 911 bigger := uint8(1) 912 if !overLoadFactor(h.count+1, h.B) { 913 bigger = 0 914 h.flags |= sameSizeGrow 915 } 916 oldbuckets := h.buckets 917 newbuckets, nextOverflow := makeBucketArray(t, h.B+bigger) 918 919 flags := h.flags &^ (iterator | oldIterator) 920 if h.flags&iterator != 0 { 921 flags |= oldIterator 922 } 923 // commit the grow (atomic wrt gc) 924 h.B += bigger 925 h.flags = flags 926 h.oldbuckets = oldbuckets 927 h.buckets = newbuckets 928 h.nevacuate = 0 929 h.noverflow = 0 930 931 if h.extra != nil && h.extra.overflow != nil { 932 // Promote current overflow buckets to the old generation. 933 if h.extra.oldoverflow != nil { 934 throw("oldoverflow is not nil") 935 } 936 h.extra.oldoverflow = h.extra.overflow 937 h.extra.overflow = nil 938 } 939 if nextOverflow != nil { 940 if h.extra == nil { 941 h.extra = new(mapextra) 942 } 943 h.extra.nextOverflow = nextOverflow 944 } 945 946 // the actual copying of the hash table data is done incrementally 947 // by growWork() and evacuate(). 948 } 949 950 // overLoadFactor reports whether count items placed in 1<<B buckets is over loadFactor. 951 func overLoadFactor(count int, B uint8) bool { 952 return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) 953 } 954 955 // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1<<B buckets. 956 // Note that most of these overflow buckets must be in sparse use; 957 // if use was dense, then we'd have already triggered regular map growth. 958 func tooManyOverflowBuckets(noverflow uint16, B uint8) bool { 959 // If the threshold is too low, we do extraneous work. 960 // If the threshold is too high, maps that grow and shrink can hold on to lots of unused memory. 961 // "too many" means (approximately) as many overflow buckets as regular buckets. 962 // See incrnoverflow for more details. 963 if B > 15 { 964 B = 15 965 } 966 // The compiler doesn't see here that B < 16; mask B to generate shorter shift code. 967 return noverflow >= uint16(1)<<(B&15) 968 } 969 970 // growing reports whether h is growing. The growth may be to the same size or bigger. 971 func (h *hmap) growing() bool { 972 return h.oldbuckets != nil 973 } 974 975 // sameSizeGrow reports whether the current growth is to a map of the same size. 976 func (h *hmap) sameSizeGrow() bool { 977 return h.flags&sameSizeGrow != 0 978 } 979 980 // noldbuckets calculates the number of buckets prior to the current map growth. 981 func (h *hmap) noldbuckets() uintptr { 982 oldB := h.B 983 if !h.sameSizeGrow() { 984 oldB-- 985 } 986 return bucketShift(oldB) 987 } 988 989 // oldbucketmask provides a mask that can be applied to calculate n % noldbuckets(). 990 func (h *hmap) oldbucketmask() uintptr { 991 return h.noldbuckets() - 1 992 } 993 994 func growWork(t *maptype, h *hmap, bucket uintptr) { 995 // make sure we evacuate the oldbucket corresponding 996 // to the bucket we're about to use 997 evacuate(t, h, bucket&h.oldbucketmask()) 998 999 // evacuate one more oldbucket to make progress on growing 1000 if h.growing() { 1001 evacuate(t, h, h.nevacuate) 1002 } 1003 } 1004 1005 func bucketEvacuated(t *maptype, h *hmap, bucket uintptr) bool { 1006 b := (*bmap)(add(h.oldbuckets, bucket*uintptr(t.bucketsize))) 1007 return evacuated(b) 1008 } 1009 1010 // evacDst is an evacuation destination. 1011 type evacDst struct { 1012 b *bmap // current destination bucket 1013 i int // key/val index into b 1014 k unsafe.Pointer // pointer to current key storage 1015 v unsafe.Pointer // pointer to current value storage 1016 } 1017 1018 func evacuate(t *maptype, h *hmap, oldbucket uintptr) { 1019 b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize))) 1020 newbit := h.noldbuckets() 1021 if !evacuated(b) { 1022 // TODO: reuse overflow buckets instead of using new ones, if there 1023 // is no iterator using the old buckets. (If !oldIterator.) 1024 1025 // xy contains the x and y (low and high) evacuation destinations. 1026 var xy [2]evacDst 1027 x := &xy[0] 1028 x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) 1029 x.k = add(unsafe.Pointer(x.b), dataOffset) 1030 x.v = add(x.k, bucketCnt*uintptr(t.keysize)) 1031 1032 if !h.sameSizeGrow() { 1033 // Only calculate y pointers if we're growing bigger. 1034 // Otherwise GC can see bad pointers. 1035 y := &xy[1] 1036 y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) 1037 y.k = add(unsafe.Pointer(y.b), dataOffset) 1038 y.v = add(y.k, bucketCnt*uintptr(t.keysize)) 1039 } 1040 1041 for ; b != nil; b = b.overflow(t) { 1042 k := add(unsafe.Pointer(b), dataOffset) 1043 v := add(k, bucketCnt*uintptr(t.keysize)) 1044 for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { 1045 top := b.tophash[i] 1046 if top == empty { 1047 b.tophash[i] = evacuatedEmpty 1048 continue 1049 } 1050 if top < minTopHash { 1051 throw("bad map state") 1052 } 1053 k2 := k 1054 if t.indirectkey { 1055 k2 = *((*unsafe.Pointer)(k2)) 1056 } 1057 var useY uint8 1058 if !h.sameSizeGrow() { 1059 // Compute hash to make our evacuation decision (whether we need 1060 // to send this key/value to bucket x or bucket y). 1061 hash := t.key.alg.hash(k2, uintptr(h.hash0)) 1062 if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) { 1063 // If key != key (NaNs), then the hash could be (and probably 1064 // will be) entirely different from the old hash. Moreover, 1065 // it isn't reproducible. Reproducibility is required in the 1066 // presence of iterators, as our evacuation decision must 1067 // match whatever decision the iterator made. 1068 // Fortunately, we have the freedom to send these keys either 1069 // way. Also, tophash is meaningless for these kinds of keys. 1070 // We let the low bit of tophash drive the evacuation decision. 1071 // We recompute a new random tophash for the next level so 1072 // these keys will get evenly distributed across all buckets 1073 // after multiple grows. 1074 useY = top & 1 1075 top = tophash(hash) 1076 } else { 1077 if hash&newbit != 0 { 1078 useY = 1 1079 } 1080 } 1081 } 1082 1083 if evacuatedX+1 != evacuatedY { 1084 throw("bad evacuatedN") 1085 } 1086 1087 b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY 1088 dst := &xy[useY] // evacuation destination 1089 1090 if dst.i == bucketCnt { 1091 dst.b = h.newoverflow(t, dst.b) 1092 dst.i = 0 1093 dst.k = add(unsafe.Pointer(dst.b), dataOffset) 1094 dst.v = add(dst.k, bucketCnt*uintptr(t.keysize)) 1095 } 1096 dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check 1097 if t.indirectkey { 1098 *(*unsafe.Pointer)(dst.k) = k2 // copy pointer 1099 } else { 1100 typedmemmove(t.key, dst.k, k) // copy value 1101 } 1102 if t.indirectvalue { 1103 *(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v) 1104 } else { 1105 typedmemmove(t.elem, dst.v, v) 1106 } 1107 dst.i++ 1108 // These updates might push these pointers past the end of the 1109 // key or value arrays. That's ok, as we have the overflow pointer 1110 // at the end of the bucket to protect against pointing past the 1111 // end of the bucket. 1112 dst.k = add(dst.k, uintptr(t.keysize)) 1113 dst.v = add(dst.v, uintptr(t.valuesize)) 1114 } 1115 } 1116 // Unlink the overflow buckets & clear key/value to help GC. 1117 if h.flags&oldIterator == 0 && t.bucket.kind&kindNoPointers == 0 { 1118 b := add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)) 1119 // Preserve b.tophash because the evacuation 1120 // state is maintained there. 1121 ptr := add(b, dataOffset) 1122 n := uintptr(t.bucketsize) - dataOffset 1123 memclrHasPointers(ptr, n) 1124 } 1125 } 1126 1127 if oldbucket == h.nevacuate { 1128 advanceEvacuationMark(h, t, newbit) 1129 } 1130 } 1131 1132 func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) { 1133 h.nevacuate++ 1134 // Experiments suggest that 1024 is overkill by at least an order of magnitude. 1135 // Put it in there as a safeguard anyway, to ensure O(1) behavior. 1136 stop := h.nevacuate + 1024 1137 if stop > newbit { 1138 stop = newbit 1139 } 1140 for h.nevacuate != stop && bucketEvacuated(t, h, h.nevacuate) { 1141 h.nevacuate++ 1142 } 1143 if h.nevacuate == newbit { // newbit == # of oldbuckets 1144 // Growing is all done. Free old main bucket array. 1145 h.oldbuckets = nil 1146 // Can discard old overflow buckets as well. 1147 // If they are still referenced by an iterator, 1148 // then the iterator holds a pointers to the slice. 1149 if h.extra != nil { 1150 h.extra.oldoverflow = nil 1151 } 1152 h.flags &^= sameSizeGrow 1153 } 1154 } 1155 1156 func ismapkey(t *_type) bool { 1157 return t.alg.hash != nil 1158 } 1159 1160 // Reflect stubs. Called from ../reflect/asm_*.s 1161 1162 //go:linkname reflect_makemap reflect.makemap 1163 func reflect_makemap(t *maptype, cap int) *hmap { 1164 // Check invariants and reflects math. 1165 if sz := unsafe.Sizeof(hmap{}); sz != t.hmap.size { 1166 println("runtime: sizeof(hmap) =", sz, ", t.hmap.size =", t.hmap.size) 1167 throw("bad hmap size") 1168 } 1169 if !ismapkey(t.key) { 1170 throw("runtime.reflect_makemap: unsupported map key type") 1171 } 1172 if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) || 1173 t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) { 1174 throw("key size wrong") 1175 } 1176 if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) || 1177 t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) { 1178 throw("value size wrong") 1179 } 1180 if t.key.align > bucketCnt { 1181 throw("key align too big") 1182 } 1183 if t.elem.align > bucketCnt { 1184 throw("value align too big") 1185 } 1186 if t.key.size%uintptr(t.key.align) != 0 { 1187 throw("key size not a multiple of key align") 1188 } 1189 if t.elem.size%uintptr(t.elem.align) != 0 { 1190 throw("value size not a multiple of value align") 1191 } 1192 if bucketCnt < 8 { 1193 throw("bucketsize too small for proper alignment") 1194 } 1195 if dataOffset%uintptr(t.key.align) != 0 { 1196 throw("need padding in bucket (key)") 1197 } 1198 if dataOffset%uintptr(t.elem.align) != 0 { 1199 throw("need padding in bucket (value)") 1200 } 1201 1202 return makemap(t, cap, nil) 1203 } 1204 1205 //go:linkname reflect_mapaccess reflect.mapaccess 1206 func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { 1207 val, ok := mapaccess2(t, h, key) 1208 if !ok { 1209 // reflect wants nil for a missing element 1210 val = nil 1211 } 1212 return val 1213 } 1214 1215 //go:linkname reflect_mapassign reflect.mapassign 1216 func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) { 1217 p := mapassign(t, h, key) 1218 typedmemmove(t.elem, p, val) 1219 } 1220 1221 //go:linkname reflect_mapdelete reflect.mapdelete 1222 func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { 1223 mapdelete(t, h, key) 1224 } 1225 1226 //go:linkname reflect_mapiterinit reflect.mapiterinit 1227 func reflect_mapiterinit(t *maptype, h *hmap) *hiter { 1228 it := new(hiter) 1229 mapiterinit(t, h, it) 1230 return it 1231 } 1232 1233 //go:linkname reflect_mapiternext reflect.mapiternext 1234 func reflect_mapiternext(it *hiter) { 1235 mapiternext(it) 1236 } 1237 1238 //go:linkname reflect_mapiterkey reflect.mapiterkey 1239 func reflect_mapiterkey(it *hiter) unsafe.Pointer { 1240 return it.key 1241 } 1242 1243 //go:linkname reflect_maplen reflect.maplen 1244 func reflect_maplen(h *hmap) int { 1245 if h == nil { 1246 return 0 1247 } 1248 if raceenabled { 1249 callerpc := getcallerpc() 1250 racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen)) 1251 } 1252 return h.count 1253 } 1254 1255 //go:linkname reflect_ismapkey reflect.ismapkey 1256 func reflect_ismapkey(t *_type) bool { 1257 return ismapkey(t) 1258 } 1259 1260 const maxZero = 1024 // must match value in ../cmd/compile/internal/gc/walk.go 1261 var zeroVal [maxZero]byte