github.com/m10x/go/src@v0.0.0-20220112094212-ba61592315da/runtime/mbitmap.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: type and heap bitmaps. 6 // 7 // Stack, data, and bss bitmaps 8 // 9 // Stack frames and global variables in the data and bss sections are 10 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit 11 // means the word is a live pointer to be visited by the GC (referred to 12 // as "pointer"). A "0" bit means the word should be ignored by GC 13 // (referred to as "scalar", though it could be a dead pointer value). 14 // 15 // Heap bitmap 16 // 17 // The heap bitmap comprises 2 bits for each pointer-sized word in the heap, 18 // stored in the heapArena metadata backing each heap arena. 19 // That is, if ha is the heapArena for the arena starting a start, 20 // then ha.bitmap[0] holds the 2-bit entries for the four words start 21 // through start+3*ptrSize, ha.bitmap[1] holds the entries for 22 // start+4*ptrSize through start+7*ptrSize, and so on. 23 // 24 // In each 2-bit entry, the lower bit is a pointer/scalar bit, just 25 // like in the stack/data bitmaps described above. The upper bit 26 // indicates scan/dead: a "1" value ("scan") indicates that there may 27 // be pointers in later words of the allocation, and a "0" value 28 // ("dead") indicates there are no more pointers in the allocation. If 29 // the upper bit is 0, the lower bit must also be 0, and this 30 // indicates scanning can ignore the rest of the allocation. 31 // 32 // The 2-bit entries are split when written into the byte, so that the top half 33 // of the byte contains 4 high (scan) bits and the bottom half contains 4 low 34 // (pointer) bits. This form allows a copy from the 1-bit to the 4-bit form to 35 // keep the pointer bits contiguous, instead of having to space them out. 36 // 37 // The code makes use of the fact that the zero value for a heap 38 // bitmap means scalar/dead. This property must be preserved when 39 // modifying the encoding. 40 // 41 // The bitmap for noscan spans is not maintained. Code must ensure 42 // that an object is scannable before consulting its bitmap by 43 // checking either the noscan bit in the span or by consulting its 44 // type's information. 45 46 package runtime 47 48 import ( 49 "internal/goarch" 50 "runtime/internal/atomic" 51 "runtime/internal/sys" 52 "unsafe" 53 ) 54 55 const ( 56 bitPointer = 1 << 0 57 bitScan = 1 << 4 58 59 heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries 60 wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte 61 62 // all scan/pointer bits in a byte 63 bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift) 64 bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift) 65 ) 66 67 // addb returns the byte pointer p+n. 68 //go:nowritebarrier 69 //go:nosplit 70 func addb(p *byte, n uintptr) *byte { 71 // Note: wrote out full expression instead of calling add(p, n) 72 // to reduce the number of temporaries generated by the 73 // compiler for this trivial expression during inlining. 74 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n)) 75 } 76 77 // subtractb returns the byte pointer p-n. 78 //go:nowritebarrier 79 //go:nosplit 80 func subtractb(p *byte, n uintptr) *byte { 81 // Note: wrote out full expression instead of calling add(p, -n) 82 // to reduce the number of temporaries generated by the 83 // compiler for this trivial expression during inlining. 84 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n)) 85 } 86 87 // add1 returns the byte pointer p+1. 88 //go:nowritebarrier 89 //go:nosplit 90 func add1(p *byte) *byte { 91 // Note: wrote out full expression instead of calling addb(p, 1) 92 // to reduce the number of temporaries generated by the 93 // compiler for this trivial expression during inlining. 94 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1)) 95 } 96 97 // subtract1 returns the byte pointer p-1. 98 //go:nowritebarrier 99 // 100 // nosplit because it is used during write barriers and must not be preempted. 101 //go:nosplit 102 func subtract1(p *byte) *byte { 103 // Note: wrote out full expression instead of calling subtractb(p, 1) 104 // to reduce the number of temporaries generated by the 105 // compiler for this trivial expression during inlining. 106 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1)) 107 } 108 109 // heapBits provides access to the bitmap bits for a single heap word. 110 // The methods on heapBits take value receivers so that the compiler 111 // can more easily inline calls to those methods and registerize the 112 // struct fields independently. 113 type heapBits struct { 114 bitp *uint8 115 shift uint32 116 arena uint32 // Index of heap arena containing bitp 117 last *uint8 // Last byte arena's bitmap 118 } 119 120 // Make the compiler check that heapBits.arena is large enough to hold 121 // the maximum arena frame number. 122 var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1} 123 124 // markBits provides access to the mark bit for an object in the heap. 125 // bytep points to the byte holding the mark bit. 126 // mask is a byte with a single bit set that can be &ed with *bytep 127 // to see if the bit has been set. 128 // *m.byte&m.mask != 0 indicates the mark bit is set. 129 // index can be used along with span information to generate 130 // the address of the object in the heap. 131 // We maintain one set of mark bits for allocation and one for 132 // marking purposes. 133 type markBits struct { 134 bytep *uint8 135 mask uint8 136 index uintptr 137 } 138 139 //go:nosplit 140 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits { 141 bytep, mask := s.allocBits.bitp(allocBitIndex) 142 return markBits{bytep, mask, allocBitIndex} 143 } 144 145 // refillAllocCache takes 8 bytes s.allocBits starting at whichByte 146 // and negates them so that ctz (count trailing zeros) instructions 147 // can be used. It then places these 8 bytes into the cached 64 bit 148 // s.allocCache. 149 func (s *mspan) refillAllocCache(whichByte uintptr) { 150 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte))) 151 aCache := uint64(0) 152 aCache |= uint64(bytes[0]) 153 aCache |= uint64(bytes[1]) << (1 * 8) 154 aCache |= uint64(bytes[2]) << (2 * 8) 155 aCache |= uint64(bytes[3]) << (3 * 8) 156 aCache |= uint64(bytes[4]) << (4 * 8) 157 aCache |= uint64(bytes[5]) << (5 * 8) 158 aCache |= uint64(bytes[6]) << (6 * 8) 159 aCache |= uint64(bytes[7]) << (7 * 8) 160 s.allocCache = ^aCache 161 } 162 163 // nextFreeIndex returns the index of the next free object in s at 164 // or after s.freeindex. 165 // There are hardware instructions that can be used to make this 166 // faster if profiling warrants it. 167 func (s *mspan) nextFreeIndex() uintptr { 168 sfreeindex := s.freeindex 169 snelems := s.nelems 170 if sfreeindex == snelems { 171 return sfreeindex 172 } 173 if sfreeindex > snelems { 174 throw("s.freeindex > s.nelems") 175 } 176 177 aCache := s.allocCache 178 179 bitIndex := sys.Ctz64(aCache) 180 for bitIndex == 64 { 181 // Move index to start of next cached bits. 182 sfreeindex = (sfreeindex + 64) &^ (64 - 1) 183 if sfreeindex >= snelems { 184 s.freeindex = snelems 185 return snelems 186 } 187 whichByte := sfreeindex / 8 188 // Refill s.allocCache with the next 64 alloc bits. 189 s.refillAllocCache(whichByte) 190 aCache = s.allocCache 191 bitIndex = sys.Ctz64(aCache) 192 // nothing available in cached bits 193 // grab the next 8 bytes and try again. 194 } 195 result := sfreeindex + uintptr(bitIndex) 196 if result >= snelems { 197 s.freeindex = snelems 198 return snelems 199 } 200 201 s.allocCache >>= uint(bitIndex + 1) 202 sfreeindex = result + 1 203 204 if sfreeindex%64 == 0 && sfreeindex != snelems { 205 // We just incremented s.freeindex so it isn't 0. 206 // As each 1 in s.allocCache was encountered and used for allocation 207 // it was shifted away. At this point s.allocCache contains all 0s. 208 // Refill s.allocCache so that it corresponds 209 // to the bits at s.allocBits starting at s.freeindex. 210 whichByte := sfreeindex / 8 211 s.refillAllocCache(whichByte) 212 } 213 s.freeindex = sfreeindex 214 return result 215 } 216 217 // isFree reports whether the index'th object in s is unallocated. 218 // 219 // The caller must ensure s.state is mSpanInUse, and there must have 220 // been no preemption points since ensuring this (which could allow a 221 // GC transition, which would allow the state to change). 222 func (s *mspan) isFree(index uintptr) bool { 223 if index < s.freeindex { 224 return false 225 } 226 bytep, mask := s.allocBits.bitp(index) 227 return *bytep&mask == 0 228 } 229 230 // divideByElemSize returns n/s.elemsize. 231 // n must be within [0, s.npages*_PageSize), 232 // or may be exactly s.npages*_PageSize 233 // if s.elemsize is from sizeclasses.go. 234 func (s *mspan) divideByElemSize(n uintptr) uintptr { 235 const doubleCheck = false 236 237 // See explanation in mksizeclasses.go's computeDivMagic. 238 q := uintptr((uint64(n) * uint64(s.divMul)) >> 32) 239 240 if doubleCheck && q != n/s.elemsize { 241 println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q) 242 throw("bad magic division") 243 } 244 return q 245 } 246 247 func (s *mspan) objIndex(p uintptr) uintptr { 248 return s.divideByElemSize(p - s.base()) 249 } 250 251 func markBitsForAddr(p uintptr) markBits { 252 s := spanOf(p) 253 objIndex := s.objIndex(p) 254 return s.markBitsForIndex(objIndex) 255 } 256 257 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits { 258 bytep, mask := s.gcmarkBits.bitp(objIndex) 259 return markBits{bytep, mask, objIndex} 260 } 261 262 func (s *mspan) markBitsForBase() markBits { 263 return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0} 264 } 265 266 // isMarked reports whether mark bit m is set. 267 func (m markBits) isMarked() bool { 268 return *m.bytep&m.mask != 0 269 } 270 271 // setMarked sets the marked bit in the markbits, atomically. 272 func (m markBits) setMarked() { 273 // Might be racing with other updates, so use atomic update always. 274 // We used to be clever here and use a non-atomic update in certain 275 // cases, but it's not worth the risk. 276 atomic.Or8(m.bytep, m.mask) 277 } 278 279 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically. 280 func (m markBits) setMarkedNonAtomic() { 281 *m.bytep |= m.mask 282 } 283 284 // clearMarked clears the marked bit in the markbits, atomically. 285 func (m markBits) clearMarked() { 286 // Might be racing with other updates, so use atomic update always. 287 // We used to be clever here and use a non-atomic update in certain 288 // cases, but it's not worth the risk. 289 atomic.And8(m.bytep, ^m.mask) 290 } 291 292 // markBitsForSpan returns the markBits for the span base address base. 293 func markBitsForSpan(base uintptr) (mbits markBits) { 294 mbits = markBitsForAddr(base) 295 if mbits.mask != 1 { 296 throw("markBitsForSpan: unaligned start") 297 } 298 return mbits 299 } 300 301 // advance advances the markBits to the next object in the span. 302 func (m *markBits) advance() { 303 if m.mask == 1<<7 { 304 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1)) 305 m.mask = 1 306 } else { 307 m.mask = m.mask << 1 308 } 309 m.index++ 310 } 311 312 // heapBitsForAddr returns the heapBits for the address addr. 313 // The caller must ensure addr is in an allocated span. 314 // In particular, be careful not to point past the end of an object. 315 // 316 // nosplit because it is used during write barriers and must not be preempted. 317 //go:nosplit 318 func heapBitsForAddr(addr uintptr) (h heapBits) { 319 // 2 bits per word, 4 pairs per byte, and a mask is hard coded. 320 arena := arenaIndex(addr) 321 ha := mheap_.arenas[arena.l1()][arena.l2()] 322 // The compiler uses a load for nil checking ha, but in this 323 // case we'll almost never hit that cache line again, so it 324 // makes more sense to do a value check. 325 if ha == nil { 326 // addr is not in the heap. Return nil heapBits, which 327 // we expect to crash in the caller. 328 return 329 } 330 h.bitp = &ha.bitmap[(addr/(goarch.PtrSize*4))%heapArenaBitmapBytes] 331 h.shift = uint32((addr / goarch.PtrSize) & 3) 332 h.arena = uint32(arena) 333 h.last = &ha.bitmap[len(ha.bitmap)-1] 334 return 335 } 336 337 // clobberdeadPtr is a special value that is used by the compiler to 338 // clobber dead stack slots, when -clobberdead flag is set. 339 const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32)) 340 341 // badPointer throws bad pointer in heap panic. 342 func badPointer(s *mspan, p, refBase, refOff uintptr) { 343 // Typically this indicates an incorrect use 344 // of unsafe or cgo to store a bad pointer in 345 // the Go heap. It may also indicate a runtime 346 // bug. 347 // 348 // TODO(austin): We could be more aggressive 349 // and detect pointers to unallocated objects 350 // in allocated spans. 351 printlock() 352 print("runtime: pointer ", hex(p)) 353 if s != nil { 354 state := s.state.get() 355 if state != mSpanInUse { 356 print(" to unallocated span") 357 } else { 358 print(" to unused region of span") 359 } 360 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state) 361 } 362 print("\n") 363 if refBase != 0 { 364 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n") 365 gcDumpObject("object", refBase, refOff) 366 } 367 getg().m.traceback = 2 368 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)") 369 } 370 371 // findObject returns the base address for the heap object containing 372 // the address p, the object's span, and the index of the object in s. 373 // If p does not point into a heap object, it returns base == 0. 374 // 375 // If p points is an invalid heap pointer and debug.invalidptr != 0, 376 // findObject panics. 377 // 378 // refBase and refOff optionally give the base address of the object 379 // in which the pointer p was found and the byte offset at which it 380 // was found. These are used for error reporting. 381 // 382 // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack. 383 // Since p is a uintptr, it would not be adjusted if the stack were to move. 384 //go:nosplit 385 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) { 386 s = spanOf(p) 387 // If s is nil, the virtual address has never been part of the heap. 388 // This pointer may be to some mmap'd region, so we allow it. 389 if s == nil { 390 if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 { 391 // Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now, 392 // as they are the only platform where compiler's clobberdead mode is 393 // implemented. On these platforms clobberdeadPtr cannot be a valid address. 394 badPointer(s, p, refBase, refOff) 395 } 396 return 397 } 398 // If p is a bad pointer, it may not be in s's bounds. 399 // 400 // Check s.state to synchronize with span initialization 401 // before checking other fields. See also spanOfHeap. 402 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit { 403 // Pointers into stacks are also ok, the runtime manages these explicitly. 404 if state == mSpanManual { 405 return 406 } 407 // The following ensures that we are rigorous about what data 408 // structures hold valid pointers. 409 if debug.invalidptr != 0 { 410 badPointer(s, p, refBase, refOff) 411 } 412 return 413 } 414 415 objIndex = s.objIndex(p) 416 base = s.base() + objIndex*s.elemsize 417 return 418 } 419 420 // verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok. 421 //go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr 422 func reflect_verifyNotInHeapPtr(p uintptr) bool { 423 // Conversion to a pointer is ok as long as findObject above does not call badPointer. 424 // Since we're already promised that p doesn't point into the heap, just disallow heap 425 // pointers and the special clobbered pointer. 426 return spanOf(p) == nil && p != clobberdeadPtr 427 } 428 429 // next returns the heapBits describing the next pointer-sized word in memory. 430 // That is, if h describes address p, h.next() describes p+ptrSize. 431 // Note that next does not modify h. The caller must record the result. 432 // 433 // nosplit because it is used during write barriers and must not be preempted. 434 //go:nosplit 435 func (h heapBits) next() heapBits { 436 if h.shift < 3*heapBitsShift { 437 h.shift += heapBitsShift 438 } else if h.bitp != h.last { 439 h.bitp, h.shift = add1(h.bitp), 0 440 } else { 441 // Move to the next arena. 442 return h.nextArena() 443 } 444 return h 445 } 446 447 // nextArena advances h to the beginning of the next heap arena. 448 // 449 // This is a slow-path helper to next. gc's inliner knows that 450 // heapBits.next can be inlined even though it calls this. This is 451 // marked noinline so it doesn't get inlined into next and cause next 452 // to be too big to inline. 453 // 454 //go:nosplit 455 //go:noinline 456 func (h heapBits) nextArena() heapBits { 457 h.arena++ 458 ai := arenaIdx(h.arena) 459 l2 := mheap_.arenas[ai.l1()] 460 if l2 == nil { 461 // We just passed the end of the object, which 462 // was also the end of the heap. Poison h. It 463 // should never be dereferenced at this point. 464 return heapBits{} 465 } 466 ha := l2[ai.l2()] 467 if ha == nil { 468 return heapBits{} 469 } 470 h.bitp, h.shift = &ha.bitmap[0], 0 471 h.last = &ha.bitmap[len(ha.bitmap)-1] 472 return h 473 } 474 475 // forward returns the heapBits describing n pointer-sized words ahead of h in memory. 476 // That is, if h describes address p, h.forward(n) describes p+n*ptrSize. 477 // h.forward(1) is equivalent to h.next(), just slower. 478 // Note that forward does not modify h. The caller must record the result. 479 // bits returns the heap bits for the current word. 480 //go:nosplit 481 func (h heapBits) forward(n uintptr) heapBits { 482 n += uintptr(h.shift) / heapBitsShift 483 nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4 484 h.shift = uint32(n%4) * heapBitsShift 485 if nbitp <= uintptr(unsafe.Pointer(h.last)) { 486 h.bitp = (*uint8)(unsafe.Pointer(nbitp)) 487 return h 488 } 489 490 // We're in a new heap arena. 491 past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1) 492 h.arena += 1 + uint32(past/heapArenaBitmapBytes) 493 ai := arenaIdx(h.arena) 494 if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil { 495 a := l2[ai.l2()] 496 h.bitp = &a.bitmap[past%heapArenaBitmapBytes] 497 h.last = &a.bitmap[len(a.bitmap)-1] 498 } else { 499 h.bitp, h.last = nil, nil 500 } 501 return h 502 } 503 504 // forwardOrBoundary is like forward, but stops at boundaries between 505 // contiguous sections of the bitmap. It returns the number of words 506 // advanced over, which will be <= n. 507 func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) { 508 maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp))) 509 if n > maxn { 510 n = maxn 511 } 512 return h.forward(n), n 513 } 514 515 // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer. 516 // The result includes in its higher bits the bits for subsequent words 517 // described by the same bitmap byte. 518 // 519 // nosplit because it is used during write barriers and must not be preempted. 520 //go:nosplit 521 func (h heapBits) bits() uint32 { 522 // The (shift & 31) eliminates a test and conditional branch 523 // from the generated code. 524 return uint32(*h.bitp) >> (h.shift & 31) 525 } 526 527 // morePointers reports whether this word and all remaining words in this object 528 // are scalars. 529 // h must not describe the second word of the object. 530 func (h heapBits) morePointers() bool { 531 return h.bits()&bitScan != 0 532 } 533 534 // isPointer reports whether the heap bits describe a pointer word. 535 // 536 // nosplit because it is used during write barriers and must not be preempted. 537 //go:nosplit 538 func (h heapBits) isPointer() bool { 539 return h.bits()&bitPointer != 0 540 } 541 542 // bulkBarrierPreWrite executes a write barrier 543 // for every pointer slot in the memory range [src, src+size), 544 // using pointer/scalar information from [dst, dst+size). 545 // This executes the write barriers necessary before a memmove. 546 // src, dst, and size must be pointer-aligned. 547 // The range [dst, dst+size) must lie within a single object. 548 // It does not perform the actual writes. 549 // 550 // As a special case, src == 0 indicates that this is being used for a 551 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write 552 // barrier. 553 // 554 // Callers should call bulkBarrierPreWrite immediately before 555 // calling memmove(dst, src, size). This function is marked nosplit 556 // to avoid being preempted; the GC must not stop the goroutine 557 // between the memmove and the execution of the barriers. 558 // The caller is also responsible for cgo pointer checks if this 559 // may be writing Go pointers into non-Go memory. 560 // 561 // The pointer bitmap is not maintained for allocations containing 562 // no pointers at all; any caller of bulkBarrierPreWrite must first 563 // make sure the underlying allocation contains pointers, usually 564 // by checking typ.ptrdata. 565 // 566 // Callers must perform cgo checks if writeBarrier.cgo. 567 // 568 //go:nosplit 569 func bulkBarrierPreWrite(dst, src, size uintptr) { 570 if (dst|src|size)&(goarch.PtrSize-1) != 0 { 571 throw("bulkBarrierPreWrite: unaligned arguments") 572 } 573 if !writeBarrier.needed { 574 return 575 } 576 if s := spanOf(dst); s == nil { 577 // If dst is a global, use the data or BSS bitmaps to 578 // execute write barriers. 579 for _, datap := range activeModules() { 580 if datap.data <= dst && dst < datap.edata { 581 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata) 582 return 583 } 584 } 585 for _, datap := range activeModules() { 586 if datap.bss <= dst && dst < datap.ebss { 587 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata) 588 return 589 } 590 } 591 return 592 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst { 593 // dst was heap memory at some point, but isn't now. 594 // It can't be a global. It must be either our stack, 595 // or in the case of direct channel sends, it could be 596 // another stack. Either way, no need for barriers. 597 // This will also catch if dst is in a freed span, 598 // though that should never have. 599 return 600 } 601 602 buf := &getg().m.p.ptr().wbBuf 603 h := heapBitsForAddr(dst) 604 if src == 0 { 605 for i := uintptr(0); i < size; i += goarch.PtrSize { 606 if h.isPointer() { 607 dstx := (*uintptr)(unsafe.Pointer(dst + i)) 608 if !buf.putFast(*dstx, 0) { 609 wbBufFlush(nil, 0) 610 } 611 } 612 h = h.next() 613 } 614 } else { 615 for i := uintptr(0); i < size; i += goarch.PtrSize { 616 if h.isPointer() { 617 dstx := (*uintptr)(unsafe.Pointer(dst + i)) 618 srcx := (*uintptr)(unsafe.Pointer(src + i)) 619 if !buf.putFast(*dstx, *srcx) { 620 wbBufFlush(nil, 0) 621 } 622 } 623 h = h.next() 624 } 625 } 626 } 627 628 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but 629 // does not execute write barriers for [dst, dst+size). 630 // 631 // In addition to the requirements of bulkBarrierPreWrite 632 // callers need to ensure [dst, dst+size) is zeroed. 633 // 634 // This is used for special cases where e.g. dst was just 635 // created and zeroed with malloc. 636 //go:nosplit 637 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) { 638 if (dst|src|size)&(goarch.PtrSize-1) != 0 { 639 throw("bulkBarrierPreWrite: unaligned arguments") 640 } 641 if !writeBarrier.needed { 642 return 643 } 644 buf := &getg().m.p.ptr().wbBuf 645 h := heapBitsForAddr(dst) 646 for i := uintptr(0); i < size; i += goarch.PtrSize { 647 if h.isPointer() { 648 srcx := (*uintptr)(unsafe.Pointer(src + i)) 649 if !buf.putFast(0, *srcx) { 650 wbBufFlush(nil, 0) 651 } 652 } 653 h = h.next() 654 } 655 } 656 657 // bulkBarrierBitmap executes write barriers for copying from [src, 658 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is 659 // assumed to start maskOffset bytes into the data covered by the 660 // bitmap in bits (which may not be a multiple of 8). 661 // 662 // This is used by bulkBarrierPreWrite for writes to data and BSS. 663 // 664 //go:nosplit 665 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) { 666 word := maskOffset / goarch.PtrSize 667 bits = addb(bits, word/8) 668 mask := uint8(1) << (word % 8) 669 670 buf := &getg().m.p.ptr().wbBuf 671 for i := uintptr(0); i < size; i += goarch.PtrSize { 672 if mask == 0 { 673 bits = addb(bits, 1) 674 if *bits == 0 { 675 // Skip 8 words. 676 i += 7 * goarch.PtrSize 677 continue 678 } 679 mask = 1 680 } 681 if *bits&mask != 0 { 682 dstx := (*uintptr)(unsafe.Pointer(dst + i)) 683 if src == 0 { 684 if !buf.putFast(*dstx, 0) { 685 wbBufFlush(nil, 0) 686 } 687 } else { 688 srcx := (*uintptr)(unsafe.Pointer(src + i)) 689 if !buf.putFast(*dstx, *srcx) { 690 wbBufFlush(nil, 0) 691 } 692 } 693 } 694 mask <<= 1 695 } 696 } 697 698 // typeBitsBulkBarrier executes a write barrier for every 699 // pointer that would be copied from [src, src+size) to [dst, 700 // dst+size) by a memmove using the type bitmap to locate those 701 // pointer slots. 702 // 703 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size). 704 // dst, src, and size must be pointer-aligned. 705 // The type typ must have a plain bitmap, not a GC program. 706 // The only use of this function is in channel sends, and the 707 // 64 kB channel element limit takes care of this for us. 708 // 709 // Must not be preempted because it typically runs right before memmove, 710 // and the GC must observe them as an atomic action. 711 // 712 // Callers must perform cgo checks if writeBarrier.cgo. 713 // 714 //go:nosplit 715 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) { 716 if typ == nil { 717 throw("runtime: typeBitsBulkBarrier without type") 718 } 719 if typ.size != size { 720 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size) 721 throw("runtime: invalid typeBitsBulkBarrier") 722 } 723 if typ.kind&kindGCProg != 0 { 724 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog") 725 throw("runtime: invalid typeBitsBulkBarrier") 726 } 727 if !writeBarrier.needed { 728 return 729 } 730 ptrmask := typ.gcdata 731 buf := &getg().m.p.ptr().wbBuf 732 var bits uint32 733 for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize { 734 if i&(goarch.PtrSize*8-1) == 0 { 735 bits = uint32(*ptrmask) 736 ptrmask = addb(ptrmask, 1) 737 } else { 738 bits = bits >> 1 739 } 740 if bits&1 != 0 { 741 dstx := (*uintptr)(unsafe.Pointer(dst + i)) 742 srcx := (*uintptr)(unsafe.Pointer(src + i)) 743 if !buf.putFast(*dstx, *srcx) { 744 wbBufFlush(nil, 0) 745 } 746 } 747 } 748 } 749 750 // The methods operating on spans all require that h has been returned 751 // by heapBitsForSpan and that size, n, total are the span layout description 752 // returned by the mspan's layout method. 753 // If total > size*n, it means that there is extra leftover memory in the span, 754 // usually due to rounding. 755 // 756 // TODO(rsc): Perhaps introduce a different heapBitsSpan type. 757 758 // initSpan initializes the heap bitmap for a span. 759 // If this is a span of pointer-sized objects, it initializes all 760 // words to pointer/scan. 761 // Otherwise, it initializes all words to scalar/dead. 762 func (h heapBits) initSpan(s *mspan) { 763 // Clear bits corresponding to objects. 764 nw := (s.npages << _PageShift) / goarch.PtrSize 765 if nw%wordsPerBitmapByte != 0 { 766 throw("initSpan: unaligned length") 767 } 768 if h.shift != 0 { 769 throw("initSpan: unaligned base") 770 } 771 isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize 772 for nw > 0 { 773 hNext, anw := h.forwardOrBoundary(nw) 774 nbyte := anw / wordsPerBitmapByte 775 if isPtrs { 776 bitp := h.bitp 777 for i := uintptr(0); i < nbyte; i++ { 778 *bitp = bitPointerAll | bitScanAll 779 bitp = add1(bitp) 780 } 781 } else { 782 memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte) 783 } 784 h = hNext 785 nw -= anw 786 } 787 } 788 789 // countAlloc returns the number of objects allocated in span s by 790 // scanning the allocation bitmap. 791 func (s *mspan) countAlloc() int { 792 count := 0 793 bytes := divRoundUp(s.nelems, 8) 794 // Iterate over each 8-byte chunk and count allocations 795 // with an intrinsic. Note that newMarkBits guarantees that 796 // gcmarkBits will be 8-byte aligned, so we don't have to 797 // worry about edge cases, irrelevant bits will simply be zero. 798 for i := uintptr(0); i < bytes; i += 8 { 799 // Extract 64 bits from the byte pointer and get a OnesCount. 800 // Note that the unsafe cast here doesn't preserve endianness, 801 // but that's OK. We only care about how many bits are 1, not 802 // about the order we discover them in. 803 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i))) 804 count += sys.OnesCount64(mrkBits) 805 } 806 return count 807 } 808 809 // heapBitsSetType records that the new allocation [x, x+size) 810 // holds in [x, x+dataSize) one or more values of type typ. 811 // (The number of values is given by dataSize / typ.size.) 812 // If dataSize < size, the fragment [x+dataSize, x+size) is 813 // recorded as non-pointer data. 814 // It is known that the type has pointers somewhere; 815 // malloc does not call heapBitsSetType when there are no pointers, 816 // because all free objects are marked as noscan during 817 // heapBitsSweepSpan. 818 // 819 // There can only be one allocation from a given span active at a time, 820 // and the bitmap for a span always falls on byte boundaries, 821 // so there are no write-write races for access to the heap bitmap. 822 // Hence, heapBitsSetType can access the bitmap without atomics. 823 // 824 // There can be read-write races between heapBitsSetType and things 825 // that read the heap bitmap like scanobject. However, since 826 // heapBitsSetType is only used for objects that have not yet been 827 // made reachable, readers will ignore bits being modified by this 828 // function. This does mean this function cannot transiently modify 829 // bits that belong to neighboring objects. Also, on weakly-ordered 830 // machines, callers must execute a store/store (publication) barrier 831 // between calling this function and making the object reachable. 832 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { 833 const doubleCheck = false // slow but helpful; enable to test modifications to this code 834 835 const ( 836 mask1 = bitPointer | bitScan // 00010001 837 mask2 = bitPointer | bitScan | mask1<<heapBitsShift // 00110011 838 mask3 = bitPointer | bitScan | mask2<<heapBitsShift // 01110111 839 ) 840 841 // dataSize is always size rounded up to the next malloc size class, 842 // except in the case of allocating a defer block, in which case 843 // size is sizeof(_defer{}) (at least 6 words) and dataSize may be 844 // arbitrarily larger. 845 // 846 // The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore 847 // assume that dataSize == size without checking it explicitly. 848 849 if goarch.PtrSize == 8 && size == goarch.PtrSize { 850 // It's one word and it has pointers, it must be a pointer. 851 // Since all allocated one-word objects are pointers 852 // (non-pointers are aggregated into tinySize allocations), 853 // initSpan sets the pointer bits for us. Nothing to do here. 854 if doubleCheck { 855 h := heapBitsForAddr(x) 856 if !h.isPointer() { 857 throw("heapBitsSetType: pointer bit missing") 858 } 859 if !h.morePointers() { 860 throw("heapBitsSetType: scan bit missing") 861 } 862 } 863 return 864 } 865 866 h := heapBitsForAddr(x) 867 ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below) 868 869 // 2-word objects only have 4 bitmap bits and 3-word objects only have 6 bitmap bits. 870 // Therefore, these objects share a heap bitmap byte with the objects next to them. 871 // These are called out as a special case primarily so the code below can assume all 872 // objects are at least 4 words long and that their bitmaps start either at the beginning 873 // of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively). 874 875 if size == 2*goarch.PtrSize { 876 if typ.size == goarch.PtrSize { 877 // We're allocating a block big enough to hold two pointers. 878 // On 64-bit, that means the actual object must be two pointers, 879 // or else we'd have used the one-pointer-sized block. 880 // On 32-bit, however, this is the 8-byte block, the smallest one. 881 // So it could be that we're allocating one pointer and this was 882 // just the smallest block available. Distinguish by checking dataSize. 883 // (In general the number of instances of typ being allocated is 884 // dataSize/typ.size.) 885 if goarch.PtrSize == 4 && dataSize == goarch.PtrSize { 886 // 1 pointer object. On 32-bit machines clear the bit for the 887 // unused second word. 888 *h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift 889 *h.bitp |= (bitPointer | bitScan) << h.shift 890 } else { 891 // 2-element array of pointer. 892 *h.bitp |= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift 893 } 894 return 895 } 896 // Otherwise typ.size must be 2*sys.PtrSize, 897 // and typ.kind&kindGCProg == 0. 898 if doubleCheck { 899 if typ.size != 2*goarch.PtrSize || typ.kind&kindGCProg != 0 { 900 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n") 901 throw("heapBitsSetType") 902 } 903 } 904 b := uint32(*ptrmask) 905 hb := b & 3 906 hb |= bitScanAll & ((bitScan << (typ.ptrdata / goarch.PtrSize)) - 1) 907 // Clear the bits for this object so we can set the 908 // appropriate ones. 909 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift 910 *h.bitp |= uint8(hb << h.shift) 911 return 912 } else if size == 3*goarch.PtrSize { 913 b := uint8(*ptrmask) 914 if doubleCheck { 915 if b == 0 { 916 println("runtime: invalid type ", typ.string()) 917 throw("heapBitsSetType: called with non-pointer type") 918 } 919 if goarch.PtrSize != 8 { 920 throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit") 921 } 922 if typ.kind&kindGCProg != 0 { 923 throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class") 924 } 925 if typ.size == 2*goarch.PtrSize { 926 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n") 927 throw("heapBitsSetType: inconsistent object sizes") 928 } 929 } 930 if typ.size == goarch.PtrSize { 931 // The type contains a pointer otherwise heapBitsSetType wouldn't have been called. 932 // Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1. 933 if doubleCheck && *typ.gcdata != 1 { 934 print("runtime: heapBitsSetType size=", size, " typ.size=", typ.size, "but *typ.gcdata", *typ.gcdata, "\n") 935 throw("heapBitsSetType: unexpected gcdata for 1 pointer wide type size in 3 pointer wide size class") 936 } 937 // 3 element array of pointers. Unrolling ptrmask 3 times into p yields 00000111. 938 b = 7 939 } 940 941 hb := b & 7 942 // Set bitScan bits for all pointers. 943 hb |= hb << wordsPerBitmapByte 944 // First bitScan bit is always set since the type contains pointers. 945 hb |= bitScan 946 // Second bitScan bit needs to also be set if the third bitScan bit is set. 947 hb |= hb & (bitScan << (2 * heapBitsShift)) >> 1 948 949 // For h.shift > 1 heap bits cross a byte boundary and need to be written part 950 // to h.bitp and part to the next h.bitp. 951 switch h.shift { 952 case 0: 953 *h.bitp &^= mask3 << 0 954 *h.bitp |= hb << 0 955 case 1: 956 *h.bitp &^= mask3 << 1 957 *h.bitp |= hb << 1 958 case 2: 959 *h.bitp &^= mask2 << 2 960 *h.bitp |= (hb & mask2) << 2 961 // Two words written to the first byte. 962 // Advance two words to get to the next byte. 963 h = h.next().next() 964 *h.bitp &^= mask1 965 *h.bitp |= (hb >> 2) & mask1 966 case 3: 967 *h.bitp &^= mask1 << 3 968 *h.bitp |= (hb & mask1) << 3 969 // One word written to the first byte. 970 // Advance one word to get to the next byte. 971 h = h.next() 972 *h.bitp &^= mask2 973 *h.bitp |= (hb >> 1) & mask2 974 } 975 return 976 } 977 978 // Copy from 1-bit ptrmask into 2-bit bitmap. 979 // The basic approach is to use a single uintptr as a bit buffer, 980 // alternating between reloading the buffer and writing bitmap bytes. 981 // In general, one load can supply two bitmap byte writes. 982 // This is a lot of lines of code, but it compiles into relatively few 983 // machine instructions. 984 985 outOfPlace := false 986 if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrandn(2) == 0) { 987 // This object spans heap arenas, so the bitmap may be 988 // discontiguous. Unroll it into the object instead 989 // and then copy it out. 990 // 991 // In doubleCheck mode, we randomly do this anyway to 992 // stress test the bitmap copying path. 993 outOfPlace = true 994 h.bitp = (*uint8)(unsafe.Pointer(x)) 995 h.last = nil 996 } 997 998 var ( 999 // Ptrmask input. 1000 p *byte // last ptrmask byte read 1001 b uintptr // ptrmask bits already loaded 1002 nb uintptr // number of bits in b at next read 1003 endp *byte // final ptrmask byte to read (then repeat) 1004 endnb uintptr // number of valid bits in *endp 1005 pbits uintptr // alternate source of bits 1006 1007 // Heap bitmap output. 1008 w uintptr // words processed 1009 nw uintptr // number of words to process 1010 hbitp *byte // next heap bitmap byte to write 1011 hb uintptr // bits being prepared for *hbitp 1012 ) 1013 1014 hbitp = h.bitp 1015 1016 // Handle GC program. Delayed until this part of the code 1017 // so that we can use the same double-checking mechanism 1018 // as the 1-bit case. Nothing above could have encountered 1019 // GC programs: the cases were all too small. 1020 if typ.kind&kindGCProg != 0 { 1021 heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4)) 1022 if doubleCheck { 1023 // Double-check the heap bits written by GC program 1024 // by running the GC program to create a 1-bit pointer mask 1025 // and then jumping to the double-check code below. 1026 // This doesn't catch bugs shared between the 1-bit and 4-bit 1027 // GC program execution, but it does catch mistakes specific 1028 // to just one of those and bugs in heapBitsSetTypeGCProg's 1029 // implementation of arrays. 1030 lock(&debugPtrmask.lock) 1031 if debugPtrmask.data == nil { 1032 debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys)) 1033 } 1034 ptrmask = debugPtrmask.data 1035 runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1) 1036 } 1037 goto Phase4 1038 } 1039 1040 // Note about sizes: 1041 // 1042 // typ.size is the number of words in the object, 1043 // and typ.ptrdata is the number of words in the prefix 1044 // of the object that contains pointers. That is, the final 1045 // typ.size - typ.ptrdata words contain no pointers. 1046 // This allows optimization of a common pattern where 1047 // an object has a small header followed by a large scalar 1048 // buffer. If we know the pointers are over, we don't have 1049 // to scan the buffer's heap bitmap at all. 1050 // The 1-bit ptrmasks are sized to contain only bits for 1051 // the typ.ptrdata prefix, zero padded out to a full byte 1052 // of bitmap. This code sets nw (below) so that heap bitmap 1053 // bits are only written for the typ.ptrdata prefix; if there is 1054 // more room in the allocated object, the next heap bitmap 1055 // entry is a 00, indicating that there are no more pointers 1056 // to scan. So only the ptrmask for the ptrdata bytes is needed. 1057 // 1058 // Replicated copies are not as nice: if there is an array of 1059 // objects with scalar tails, all but the last tail does have to 1060 // be initialized, because there is no way to say "skip forward". 1061 // However, because of the possibility of a repeated type with 1062 // size not a multiple of 4 pointers (one heap bitmap byte), 1063 // the code already must handle the last ptrmask byte specially 1064 // by treating it as containing only the bits for endnb pointers, 1065 // where endnb <= 4. We represent large scalar tails that must 1066 // be expanded in the replication by setting endnb larger than 4. 1067 // This will have the effect of reading many bits out of b, 1068 // but once the real bits are shifted out, b will supply as many 1069 // zero bits as we try to read, which is exactly what we need. 1070 1071 p = ptrmask 1072 if typ.size < dataSize { 1073 // Filling in bits for an array of typ. 1074 // Set up for repetition of ptrmask during main loop. 1075 // Note that ptrmask describes only a prefix of 1076 const maxBits = goarch.PtrSize*8 - 7 1077 if typ.ptrdata/goarch.PtrSize <= maxBits { 1078 // Entire ptrmask fits in uintptr with room for a byte fragment. 1079 // Load into pbits and never read from ptrmask again. 1080 // This is especially important when the ptrmask has 1081 // fewer than 8 bits in it; otherwise the reload in the middle 1082 // of the Phase 2 loop would itself need to loop to gather 1083 // at least 8 bits. 1084 1085 // Accumulate ptrmask into b. 1086 // ptrmask is sized to describe only typ.ptrdata, but we record 1087 // it as describing typ.size bytes, since all the high bits are zero. 1088 nb = typ.ptrdata / goarch.PtrSize 1089 for i := uintptr(0); i < nb; i += 8 { 1090 b |= uintptr(*p) << i 1091 p = add1(p) 1092 } 1093 nb = typ.size / goarch.PtrSize 1094 1095 // Replicate ptrmask to fill entire pbits uintptr. 1096 // Doubling and truncating is fewer steps than 1097 // iterating by nb each time. (nb could be 1.) 1098 // Since we loaded typ.ptrdata/sys.PtrSize bits 1099 // but are pretending to have typ.size/sys.PtrSize, 1100 // there might be no replication necessary/possible. 1101 pbits = b 1102 endnb = nb 1103 if nb+nb <= maxBits { 1104 for endnb <= goarch.PtrSize*8 { 1105 pbits |= pbits << endnb 1106 endnb += endnb 1107 } 1108 // Truncate to a multiple of original ptrmask. 1109 // Because nb+nb <= maxBits, nb fits in a byte. 1110 // Byte division is cheaper than uintptr division. 1111 endnb = uintptr(maxBits/byte(nb)) * nb 1112 pbits &= 1<<endnb - 1 1113 b = pbits 1114 nb = endnb 1115 } 1116 1117 // Clear p and endp as sentinel for using pbits. 1118 // Checked during Phase 2 loop. 1119 p = nil 1120 endp = nil 1121 } else { 1122 // Ptrmask is larger. Read it multiple times. 1123 n := (typ.ptrdata/goarch.PtrSize+7)/8 - 1 1124 endp = addb(ptrmask, n) 1125 endnb = typ.size/goarch.PtrSize - n*8 1126 } 1127 } 1128 if p != nil { 1129 b = uintptr(*p) 1130 p = add1(p) 1131 nb = 8 1132 } 1133 1134 if typ.size == dataSize { 1135 // Single entry: can stop once we reach the non-pointer data. 1136 nw = typ.ptrdata / goarch.PtrSize 1137 } else { 1138 // Repeated instances of typ in an array. 1139 // Have to process first N-1 entries in full, but can stop 1140 // once we reach the non-pointer data in the final entry. 1141 nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / goarch.PtrSize 1142 } 1143 if nw == 0 { 1144 // No pointers! Caller was supposed to check. 1145 println("runtime: invalid type ", typ.string()) 1146 throw("heapBitsSetType: called with non-pointer type") 1147 return 1148 } 1149 1150 // Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2). 1151 // The leading byte is special because it contains the bits for word 1, 1152 // which does not have the scan bit set. 1153 // The leading half-byte is special because it's a half a byte, 1154 // so we have to be careful with the bits already there. 1155 switch { 1156 default: 1157 throw("heapBitsSetType: unexpected shift") 1158 1159 case h.shift == 0: 1160 // Ptrmask and heap bitmap are aligned. 1161 // 1162 // This is a fast path for small objects. 1163 // 1164 // The first byte we write out covers the first four 1165 // words of the object. The scan/dead bit on the first 1166 // word must be set to scan since there are pointers 1167 // somewhere in the object. 1168 // In all following words, we set the scan/dead 1169 // appropriately to indicate that the object continues 1170 // to the next 2-bit entry in the bitmap. 1171 // 1172 // We set four bits at a time here, but if the object 1173 // is fewer than four words, phase 3 will clear 1174 // unnecessary bits. 1175 hb = b & bitPointerAll 1176 hb |= bitScanAll 1177 if w += 4; w >= nw { 1178 goto Phase3 1179 } 1180 *hbitp = uint8(hb) 1181 hbitp = add1(hbitp) 1182 b >>= 4 1183 nb -= 4 1184 1185 case h.shift == 2: 1186 // Ptrmask and heap bitmap are misaligned. 1187 // 1188 // On 32 bit architectures only the 6-word object that corresponds 1189 // to a 24 bytes size class can start with h.shift of 2 here since 1190 // all other non 16 byte aligned size classes have been handled by 1191 // special code paths at the beginning of heapBitsSetType on 32 bit. 1192 // 1193 // Many size classes are only 16 byte aligned. On 64 bit architectures 1194 // this results in a heap bitmap position starting with a h.shift of 2. 1195 // 1196 // The bits for the first two words are in a byte shared 1197 // with another object, so we must be careful with the bits 1198 // already there. 1199 // 1200 // We took care of 1-word, 2-word, and 3-word objects above, 1201 // so this is at least a 6-word object. 1202 hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift) 1203 hb |= bitScan << (2 * heapBitsShift) 1204 if nw > 1 { 1205 hb |= bitScan << (3 * heapBitsShift) 1206 } 1207 b >>= 2 1208 nb -= 2 1209 *hbitp &^= uint8((bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << (2 * heapBitsShift)) 1210 *hbitp |= uint8(hb) 1211 hbitp = add1(hbitp) 1212 if w += 2; w >= nw { 1213 // We know that there is more data, because we handled 2-word and 3-word objects above. 1214 // This must be at least a 6-word object. If we're out of pointer words, 1215 // mark no scan in next bitmap byte and finish. 1216 hb = 0 1217 w += 4 1218 goto Phase3 1219 } 1220 } 1221 1222 // Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap. 1223 // The loop computes the bits for that last write but does not execute the write; 1224 // it leaves the bits in hb for processing by phase 3. 1225 // To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to 1226 // use in the first half of the loop right now, and then we only adjust nb explicitly 1227 // if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop. 1228 nb -= 4 1229 for { 1230 // Emit bitmap byte. 1231 // b has at least nb+4 bits, with one exception: 1232 // if w+4 >= nw, then b has only nw-w bits, 1233 // but we'll stop at the break and then truncate 1234 // appropriately in Phase 3. 1235 hb = b & bitPointerAll 1236 hb |= bitScanAll 1237 if w += 4; w >= nw { 1238 break 1239 } 1240 *hbitp = uint8(hb) 1241 hbitp = add1(hbitp) 1242 b >>= 4 1243 1244 // Load more bits. b has nb right now. 1245 if p != endp { 1246 // Fast path: keep reading from ptrmask. 1247 // nb unmodified: we just loaded 8 bits, 1248 // and the next iteration will consume 8 bits, 1249 // leaving us with the same nb the next time we're here. 1250 if nb < 8 { 1251 b |= uintptr(*p) << nb 1252 p = add1(p) 1253 } else { 1254 // Reduce the number of bits in b. 1255 // This is important if we skipped 1256 // over a scalar tail, since nb could 1257 // be larger than the bit width of b. 1258 nb -= 8 1259 } 1260 } else if p == nil { 1261 // Almost as fast path: track bit count and refill from pbits. 1262 // For short repetitions. 1263 if nb < 8 { 1264 b |= pbits << nb 1265 nb += endnb 1266 } 1267 nb -= 8 // for next iteration 1268 } else { 1269 // Slow path: reached end of ptrmask. 1270 // Process final partial byte and rewind to start. 1271 b |= uintptr(*p) << nb 1272 nb += endnb 1273 if nb < 8 { 1274 b |= uintptr(*ptrmask) << nb 1275 p = add1(ptrmask) 1276 } else { 1277 nb -= 8 1278 p = ptrmask 1279 } 1280 } 1281 1282 // Emit bitmap byte. 1283 hb = b & bitPointerAll 1284 hb |= bitScanAll 1285 if w += 4; w >= nw { 1286 break 1287 } 1288 *hbitp = uint8(hb) 1289 hbitp = add1(hbitp) 1290 b >>= 4 1291 } 1292 1293 Phase3: 1294 // Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries. 1295 if w > nw { 1296 // Counting the 4 entries in hb not yet written to memory, 1297 // there are more entries than possible pointer slots. 1298 // Discard the excess entries (can't be more than 3). 1299 mask := uintptr(1)<<(4-(w-nw)) - 1 1300 hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits 1301 } 1302 1303 // Change nw from counting possibly-pointer words to total words in allocation. 1304 nw = size / goarch.PtrSize 1305 1306 // Write whole bitmap bytes. 1307 // The first is hb, the rest are zero. 1308 if w <= nw { 1309 *hbitp = uint8(hb) 1310 hbitp = add1(hbitp) 1311 hb = 0 // for possible final half-byte below 1312 for w += 4; w <= nw; w += 4 { 1313 *hbitp = 0 1314 hbitp = add1(hbitp) 1315 } 1316 } 1317 1318 // Write final partial bitmap byte if any. 1319 // We know w > nw, or else we'd still be in the loop above. 1320 // It can be bigger only due to the 4 entries in hb that it counts. 1321 // If w == nw+4 then there's nothing left to do: we wrote all nw entries 1322 // and can discard the 4 sitting in hb. 1323 // But if w == nw+2, we need to write first two in hb. 1324 // The byte is shared with the next object, so be careful with 1325 // existing bits. 1326 if w == nw+2 { 1327 *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb) 1328 } 1329 1330 Phase4: 1331 // Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary. 1332 if outOfPlace { 1333 // TODO: We could probably make this faster by 1334 // handling [x+dataSize, x+size) specially. 1335 h := heapBitsForAddr(x) 1336 // cnw is the number of heap words, or bit pairs 1337 // remaining (like nw above). 1338 cnw := size / goarch.PtrSize 1339 src := (*uint8)(unsafe.Pointer(x)) 1340 // We know the first and last byte of the bitmap are 1341 // not the same, but it's still possible for small 1342 // objects span arenas, so it may share bitmap bytes 1343 // with neighboring objects. 1344 // 1345 // Handle the first byte specially if it's shared. See 1346 // Phase 1 for why this is the only special case we need. 1347 if doubleCheck { 1348 if !(h.shift == 0 || h.shift == 2) { 1349 print("x=", x, " size=", size, " cnw=", h.shift, "\n") 1350 throw("bad start shift") 1351 } 1352 } 1353 if h.shift == 2 { 1354 *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src 1355 h = h.next().next() 1356 cnw -= 2 1357 src = addb(src, 1) 1358 } 1359 // We're now byte aligned. Copy out to per-arena 1360 // bitmaps until the last byte (which may again be 1361 // partial). 1362 for cnw >= 4 { 1363 // This loop processes four words at a time, 1364 // so round cnw down accordingly. 1365 hNext, words := h.forwardOrBoundary(cnw / 4 * 4) 1366 1367 // n is the number of bitmap bytes to copy. 1368 n := words / 4 1369 memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n) 1370 cnw -= words 1371 h = hNext 1372 src = addb(src, n) 1373 } 1374 if doubleCheck && h.shift != 0 { 1375 print("cnw=", cnw, " h.shift=", h.shift, "\n") 1376 throw("bad shift after block copy") 1377 } 1378 // Handle the last byte if it's shared. 1379 if cnw == 2 { 1380 *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src 1381 src = addb(src, 1) 1382 h = h.next().next() 1383 } 1384 if doubleCheck { 1385 if uintptr(unsafe.Pointer(src)) > x+size { 1386 throw("copy exceeded object size") 1387 } 1388 if !(cnw == 0 || cnw == 2) { 1389 print("x=", x, " size=", size, " cnw=", cnw, "\n") 1390 throw("bad number of remaining words") 1391 } 1392 // Set up hbitp so doubleCheck code below can check it. 1393 hbitp = h.bitp 1394 } 1395 // Zero the object where we wrote the bitmap. 1396 memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x) 1397 } 1398 1399 // Double check the whole bitmap. 1400 if doubleCheck { 1401 // x+size may not point to the heap, so back up one 1402 // word and then advance it the way we do above. 1403 end := heapBitsForAddr(x + size - goarch.PtrSize) 1404 if outOfPlace { 1405 // In out-of-place copying, we just advance 1406 // using next. 1407 end = end.next() 1408 } else { 1409 // Don't use next because that may advance to 1410 // the next arena and the in-place logic 1411 // doesn't do that. 1412 end.shift += heapBitsShift 1413 if end.shift == 4*heapBitsShift { 1414 end.bitp, end.shift = add1(end.bitp), 0 1415 } 1416 } 1417 if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) { 1418 println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size) 1419 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n") 1420 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n") 1421 h0 := heapBitsForAddr(x) 1422 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n") 1423 print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n") 1424 throw("bad heapBitsSetType") 1425 } 1426 1427 // Double-check that bits to be written were written correctly. 1428 // Does not check that other bits were not written, unfortunately. 1429 h := heapBitsForAddr(x) 1430 nptr := typ.ptrdata / goarch.PtrSize 1431 ndata := typ.size / goarch.PtrSize 1432 count := dataSize / typ.size 1433 totalptr := ((count-1)*typ.size + typ.ptrdata) / goarch.PtrSize 1434 for i := uintptr(0); i < size/goarch.PtrSize; i++ { 1435 j := i % ndata 1436 var have, want uint8 1437 have = (*h.bitp >> h.shift) & (bitPointer | bitScan) 1438 if i >= totalptr { 1439 if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 { 1440 // heapBitsSetTypeGCProg always fills 1441 // in full nibbles of bitScan. 1442 want = bitScan 1443 } 1444 } else { 1445 if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 { 1446 want |= bitPointer 1447 } 1448 want |= bitScan 1449 } 1450 if have != want { 1451 println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size) 1452 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n") 1453 print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n") 1454 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n") 1455 h0 := heapBitsForAddr(x) 1456 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n") 1457 print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n") 1458 print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n") 1459 println("at word", i, "offset", i*goarch.PtrSize, "have", hex(have), "want", hex(want)) 1460 if typ.kind&kindGCProg != 0 { 1461 println("GC program:") 1462 dumpGCProg(addb(typ.gcdata, 4)) 1463 } 1464 throw("bad heapBitsSetType") 1465 } 1466 h = h.next() 1467 } 1468 if ptrmask == debugPtrmask.data { 1469 unlock(&debugPtrmask.lock) 1470 } 1471 } 1472 } 1473 1474 var debugPtrmask struct { 1475 lock mutex 1476 data *byte 1477 } 1478 1479 // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program. 1480 // progSize is the size of the memory described by the program. 1481 // elemSize is the size of the element that the GC program describes (a prefix of). 1482 // dataSize is the total size of the intended data, a multiple of elemSize. 1483 // allocSize is the total size of the allocated memory. 1484 // 1485 // GC programs are only used for large allocations. 1486 // heapBitsSetType requires that allocSize is a multiple of 4 words, 1487 // so that the relevant bitmap bytes are not shared with surrounding 1488 // objects. 1489 func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) { 1490 if goarch.PtrSize == 8 && allocSize%(4*goarch.PtrSize) != 0 { 1491 // Alignment will be wrong. 1492 throw("heapBitsSetTypeGCProg: small allocation") 1493 } 1494 var totalBits uintptr 1495 if elemSize == dataSize { 1496 totalBits = runGCProg(prog, nil, h.bitp, 2) 1497 if totalBits*goarch.PtrSize != progSize { 1498 println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize) 1499 throw("heapBitsSetTypeGCProg: unexpected bit count") 1500 } 1501 } else { 1502 count := dataSize / elemSize 1503 1504 // Piece together program trailer to run after prog that does: 1505 // literal(0) 1506 // repeat(1, elemSize-progSize-1) // zeros to fill element size 1507 // repeat(elemSize, count-1) // repeat that element for count 1508 // This zero-pads the data remaining in the first element and then 1509 // repeats that first element to fill the array. 1510 var trailer [40]byte // 3 varints (max 10 each) + some bytes 1511 i := 0 1512 if n := elemSize/goarch.PtrSize - progSize/goarch.PtrSize; n > 0 { 1513 // literal(0) 1514 trailer[i] = 0x01 1515 i++ 1516 trailer[i] = 0 1517 i++ 1518 if n > 1 { 1519 // repeat(1, n-1) 1520 trailer[i] = 0x81 1521 i++ 1522 n-- 1523 for ; n >= 0x80; n >>= 7 { 1524 trailer[i] = byte(n | 0x80) 1525 i++ 1526 } 1527 trailer[i] = byte(n) 1528 i++ 1529 } 1530 } 1531 // repeat(elemSize/ptrSize, count-1) 1532 trailer[i] = 0x80 1533 i++ 1534 n := elemSize / goarch.PtrSize 1535 for ; n >= 0x80; n >>= 7 { 1536 trailer[i] = byte(n | 0x80) 1537 i++ 1538 } 1539 trailer[i] = byte(n) 1540 i++ 1541 n = count - 1 1542 for ; n >= 0x80; n >>= 7 { 1543 trailer[i] = byte(n | 0x80) 1544 i++ 1545 } 1546 trailer[i] = byte(n) 1547 i++ 1548 trailer[i] = 0 1549 i++ 1550 1551 runGCProg(prog, &trailer[0], h.bitp, 2) 1552 1553 // Even though we filled in the full array just now, 1554 // record that we only filled in up to the ptrdata of the 1555 // last element. This will cause the code below to 1556 // memclr the dead section of the final array element, 1557 // so that scanobject can stop early in the final element. 1558 totalBits = (elemSize*(count-1) + progSize) / goarch.PtrSize 1559 } 1560 endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4)) 1561 endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/goarch.PtrSize/wordsPerBitmapByte)) 1562 memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg)) 1563 } 1564 1565 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog. 1566 // size the size of the region described by prog, in bytes. 1567 // The resulting bitvector will have no more than size/sys.PtrSize bits. 1568 func progToPointerMask(prog *byte, size uintptr) bitvector { 1569 n := (size/goarch.PtrSize + 7) / 8 1570 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1] 1571 x[len(x)-1] = 0xa1 // overflow check sentinel 1572 n = runGCProg(prog, nil, &x[0], 1) 1573 if x[len(x)-1] != 0xa1 { 1574 throw("progToPointerMask: overflow") 1575 } 1576 return bitvector{int32(n), &x[0]} 1577 } 1578 1579 // Packed GC pointer bitmaps, aka GC programs. 1580 // 1581 // For large types containing arrays, the type information has a 1582 // natural repetition that can be encoded to save space in the 1583 // binary and in the memory representation of the type information. 1584 // 1585 // The encoding is a simple Lempel-Ziv style bytecode machine 1586 // with the following instructions: 1587 // 1588 // 00000000: stop 1589 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes 1590 // 10000000 n c: repeat the previous n bits c times; n, c are varints 1591 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint 1592 1593 // runGCProg executes the GC program prog, and then trailer if non-nil, 1594 // writing to dst with entries of the given size. 1595 // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst. 1596 // If size == 2, dst is the 2-bit heap bitmap, and writes move backward 1597 // starting at dst (because the heap bitmap does). In this case, the caller guarantees 1598 // that only whole bytes in dst need to be written. 1599 // 1600 // runGCProg returns the number of 1- or 2-bit entries written to memory. 1601 func runGCProg(prog, trailer, dst *byte, size int) uintptr { 1602 dstStart := dst 1603 1604 // Bits waiting to be written to memory. 1605 var bits uintptr 1606 var nbits uintptr 1607 1608 p := prog 1609 Run: 1610 for { 1611 // Flush accumulated full bytes. 1612 // The rest of the loop assumes that nbits <= 7. 1613 for ; nbits >= 8; nbits -= 8 { 1614 if size == 1 { 1615 *dst = uint8(bits) 1616 dst = add1(dst) 1617 bits >>= 8 1618 } else { 1619 v := bits&bitPointerAll | bitScanAll 1620 *dst = uint8(v) 1621 dst = add1(dst) 1622 bits >>= 4 1623 v = bits&bitPointerAll | bitScanAll 1624 *dst = uint8(v) 1625 dst = add1(dst) 1626 bits >>= 4 1627 } 1628 } 1629 1630 // Process one instruction. 1631 inst := uintptr(*p) 1632 p = add1(p) 1633 n := inst & 0x7F 1634 if inst&0x80 == 0 { 1635 // Literal bits; n == 0 means end of program. 1636 if n == 0 { 1637 // Program is over; continue in trailer if present. 1638 if trailer != nil { 1639 p = trailer 1640 trailer = nil 1641 continue 1642 } 1643 break Run 1644 } 1645 nbyte := n / 8 1646 for i := uintptr(0); i < nbyte; i++ { 1647 bits |= uintptr(*p) << nbits 1648 p = add1(p) 1649 if size == 1 { 1650 *dst = uint8(bits) 1651 dst = add1(dst) 1652 bits >>= 8 1653 } else { 1654 v := bits&0xf | bitScanAll 1655 *dst = uint8(v) 1656 dst = add1(dst) 1657 bits >>= 4 1658 v = bits&0xf | bitScanAll 1659 *dst = uint8(v) 1660 dst = add1(dst) 1661 bits >>= 4 1662 } 1663 } 1664 if n %= 8; n > 0 { 1665 bits |= uintptr(*p) << nbits 1666 p = add1(p) 1667 nbits += n 1668 } 1669 continue Run 1670 } 1671 1672 // Repeat. If n == 0, it is encoded in a varint in the next bytes. 1673 if n == 0 { 1674 for off := uint(0); ; off += 7 { 1675 x := uintptr(*p) 1676 p = add1(p) 1677 n |= (x & 0x7F) << off 1678 if x&0x80 == 0 { 1679 break 1680 } 1681 } 1682 } 1683 1684 // Count is encoded in a varint in the next bytes. 1685 c := uintptr(0) 1686 for off := uint(0); ; off += 7 { 1687 x := uintptr(*p) 1688 p = add1(p) 1689 c |= (x & 0x7F) << off 1690 if x&0x80 == 0 { 1691 break 1692 } 1693 } 1694 c *= n // now total number of bits to copy 1695 1696 // If the number of bits being repeated is small, load them 1697 // into a register and use that register for the entire loop 1698 // instead of repeatedly reading from memory. 1699 // Handling fewer than 8 bits here makes the general loop simpler. 1700 // The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add 1701 // the pattern to a bit buffer holding at most 7 bits (a partial byte) 1702 // it will not overflow. 1703 src := dst 1704 const maxBits = goarch.PtrSize*8 - 7 1705 if n <= maxBits { 1706 // Start with bits in output buffer. 1707 pattern := bits 1708 npattern := nbits 1709 1710 // If we need more bits, fetch them from memory. 1711 if size == 1 { 1712 src = subtract1(src) 1713 for npattern < n { 1714 pattern <<= 8 1715 pattern |= uintptr(*src) 1716 src = subtract1(src) 1717 npattern += 8 1718 } 1719 } else { 1720 src = subtract1(src) 1721 for npattern < n { 1722 pattern <<= 4 1723 pattern |= uintptr(*src) & 0xf 1724 src = subtract1(src) 1725 npattern += 4 1726 } 1727 } 1728 1729 // We started with the whole bit output buffer, 1730 // and then we loaded bits from whole bytes. 1731 // Either way, we might now have too many instead of too few. 1732 // Discard the extra. 1733 if npattern > n { 1734 pattern >>= npattern - n 1735 npattern = n 1736 } 1737 1738 // Replicate pattern to at most maxBits. 1739 if npattern == 1 { 1740 // One bit being repeated. 1741 // If the bit is 1, make the pattern all 1s. 1742 // If the bit is 0, the pattern is already all 0s, 1743 // but we can claim that the number of bits 1744 // in the word is equal to the number we need (c), 1745 // because right shift of bits will zero fill. 1746 if pattern == 1 { 1747 pattern = 1<<maxBits - 1 1748 npattern = maxBits 1749 } else { 1750 npattern = c 1751 } 1752 } else { 1753 b := pattern 1754 nb := npattern 1755 if nb+nb <= maxBits { 1756 // Double pattern until the whole uintptr is filled. 1757 for nb <= goarch.PtrSize*8 { 1758 b |= b << nb 1759 nb += nb 1760 } 1761 // Trim away incomplete copy of original pattern in high bits. 1762 // TODO(rsc): Replace with table lookup or loop on systems without divide? 1763 nb = maxBits / npattern * npattern 1764 b &= 1<<nb - 1 1765 pattern = b 1766 npattern = nb 1767 } 1768 } 1769 1770 // Add pattern to bit buffer and flush bit buffer, c/npattern times. 1771 // Since pattern contains >8 bits, there will be full bytes to flush 1772 // on each iteration. 1773 for ; c >= npattern; c -= npattern { 1774 bits |= pattern << nbits 1775 nbits += npattern 1776 if size == 1 { 1777 for nbits >= 8 { 1778 *dst = uint8(bits) 1779 dst = add1(dst) 1780 bits >>= 8 1781 nbits -= 8 1782 } 1783 } else { 1784 for nbits >= 4 { 1785 *dst = uint8(bits&0xf | bitScanAll) 1786 dst = add1(dst) 1787 bits >>= 4 1788 nbits -= 4 1789 } 1790 } 1791 } 1792 1793 // Add final fragment to bit buffer. 1794 if c > 0 { 1795 pattern &= 1<<c - 1 1796 bits |= pattern << nbits 1797 nbits += c 1798 } 1799 continue Run 1800 } 1801 1802 // Repeat; n too large to fit in a register. 1803 // Since nbits <= 7, we know the first few bytes of repeated data 1804 // are already written to memory. 1805 off := n - nbits // n > nbits because n > maxBits and nbits <= 7 1806 if size == 1 { 1807 // Leading src fragment. 1808 src = subtractb(src, (off+7)/8) 1809 if frag := off & 7; frag != 0 { 1810 bits |= uintptr(*src) >> (8 - frag) << nbits 1811 src = add1(src) 1812 nbits += frag 1813 c -= frag 1814 } 1815 // Main loop: load one byte, write another. 1816 // The bits are rotating through the bit buffer. 1817 for i := c / 8; i > 0; i-- { 1818 bits |= uintptr(*src) << nbits 1819 src = add1(src) 1820 *dst = uint8(bits) 1821 dst = add1(dst) 1822 bits >>= 8 1823 } 1824 // Final src fragment. 1825 if c %= 8; c > 0 { 1826 bits |= (uintptr(*src) & (1<<c - 1)) << nbits 1827 nbits += c 1828 } 1829 } else { 1830 // Leading src fragment. 1831 src = subtractb(src, (off+3)/4) 1832 if frag := off & 3; frag != 0 { 1833 bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits 1834 src = add1(src) 1835 nbits += frag 1836 c -= frag 1837 } 1838 // Main loop: load one byte, write another. 1839 // The bits are rotating through the bit buffer. 1840 for i := c / 4; i > 0; i-- { 1841 bits |= (uintptr(*src) & 0xf) << nbits 1842 src = add1(src) 1843 *dst = uint8(bits&0xf | bitScanAll) 1844 dst = add1(dst) 1845 bits >>= 4 1846 } 1847 // Final src fragment. 1848 if c %= 4; c > 0 { 1849 bits |= (uintptr(*src) & (1<<c - 1)) << nbits 1850 nbits += c 1851 } 1852 } 1853 } 1854 1855 // Write any final bits out, using full-byte writes, even for the final byte. 1856 var totalBits uintptr 1857 if size == 1 { 1858 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits 1859 nbits += -nbits & 7 1860 for ; nbits > 0; nbits -= 8 { 1861 *dst = uint8(bits) 1862 dst = add1(dst) 1863 bits >>= 8 1864 } 1865 } else { 1866 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits 1867 nbits += -nbits & 3 1868 for ; nbits > 0; nbits -= 4 { 1869 v := bits&0xf | bitScanAll 1870 *dst = uint8(v) 1871 dst = add1(dst) 1872 bits >>= 4 1873 } 1874 } 1875 return totalBits 1876 } 1877 1878 // materializeGCProg allocates space for the (1-bit) pointer bitmask 1879 // for an object of size ptrdata. Then it fills that space with the 1880 // pointer bitmask specified by the program prog. 1881 // The bitmask starts at s.startAddr. 1882 // The result must be deallocated with dematerializeGCProg. 1883 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan { 1884 // Each word of ptrdata needs one bit in the bitmap. 1885 bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize) 1886 // Compute the number of pages needed for bitmapBytes. 1887 pages := divRoundUp(bitmapBytes, pageSize) 1888 s := mheap_.allocManual(pages, spanAllocPtrScalarBits) 1889 runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1) 1890 return s 1891 } 1892 func dematerializeGCProg(s *mspan) { 1893 mheap_.freeManual(s, spanAllocPtrScalarBits) 1894 } 1895 1896 func dumpGCProg(p *byte) { 1897 nptr := 0 1898 for { 1899 x := *p 1900 p = add1(p) 1901 if x == 0 { 1902 print("\t", nptr, " end\n") 1903 break 1904 } 1905 if x&0x80 == 0 { 1906 print("\t", nptr, " lit ", x, ":") 1907 n := int(x+7) / 8 1908 for i := 0; i < n; i++ { 1909 print(" ", hex(*p)) 1910 p = add1(p) 1911 } 1912 print("\n") 1913 nptr += int(x) 1914 } else { 1915 nbit := int(x &^ 0x80) 1916 if nbit == 0 { 1917 for nb := uint(0); ; nb += 7 { 1918 x := *p 1919 p = add1(p) 1920 nbit |= int(x&0x7f) << nb 1921 if x&0x80 == 0 { 1922 break 1923 } 1924 } 1925 } 1926 count := 0 1927 for nb := uint(0); ; nb += 7 { 1928 x := *p 1929 p = add1(p) 1930 count |= int(x&0x7f) << nb 1931 if x&0x80 == 0 { 1932 break 1933 } 1934 } 1935 print("\t", nptr, " repeat ", nbit, " × ", count, "\n") 1936 nptr += nbit * count 1937 } 1938 } 1939 } 1940 1941 // Testing. 1942 1943 func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool { 1944 target := (*stkframe)(ctxt) 1945 if frame.sp <= target.sp && target.sp < frame.varp { 1946 *target = *frame 1947 return false 1948 } 1949 return true 1950 } 1951 1952 // gcbits returns the GC type info for x, for testing. 1953 // The result is the bitmap entries (0 or 1), one entry per byte. 1954 //go:linkname reflect_gcbits reflect.gcbits 1955 func reflect_gcbits(x any) []byte { 1956 ret := getgcmask(x) 1957 typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem 1958 nptr := typ.ptrdata / goarch.PtrSize 1959 for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 { 1960 ret = ret[:len(ret)-1] 1961 } 1962 return ret 1963 } 1964 1965 // Returns GC type info for the pointer stored in ep for testing. 1966 // If ep points to the stack, only static live information will be returned 1967 // (i.e. not for objects which are only dynamically live stack objects). 1968 func getgcmask(ep any) (mask []byte) { 1969 e := *efaceOf(&ep) 1970 p := e.data 1971 t := e._type 1972 // data or bss 1973 for _, datap := range activeModules() { 1974 // data 1975 if datap.data <= uintptr(p) && uintptr(p) < datap.edata { 1976 bitmap := datap.gcdatamask.bytedata 1977 n := (*ptrtype)(unsafe.Pointer(t)).elem.size 1978 mask = make([]byte, n/goarch.PtrSize) 1979 for i := uintptr(0); i < n; i += goarch.PtrSize { 1980 off := (uintptr(p) + i - datap.data) / goarch.PtrSize 1981 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1 1982 } 1983 return 1984 } 1985 1986 // bss 1987 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss { 1988 bitmap := datap.gcbssmask.bytedata 1989 n := (*ptrtype)(unsafe.Pointer(t)).elem.size 1990 mask = make([]byte, n/goarch.PtrSize) 1991 for i := uintptr(0); i < n; i += goarch.PtrSize { 1992 off := (uintptr(p) + i - datap.bss) / goarch.PtrSize 1993 mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1 1994 } 1995 return 1996 } 1997 } 1998 1999 // heap 2000 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 { 2001 hbits := heapBitsForAddr(base) 2002 n := s.elemsize 2003 mask = make([]byte, n/goarch.PtrSize) 2004 for i := uintptr(0); i < n; i += goarch.PtrSize { 2005 if hbits.isPointer() { 2006 mask[i/goarch.PtrSize] = 1 2007 } 2008 if !hbits.morePointers() { 2009 mask = mask[:i/goarch.PtrSize] 2010 break 2011 } 2012 hbits = hbits.next() 2013 } 2014 return 2015 } 2016 2017 // stack 2018 if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi { 2019 var frame stkframe 2020 frame.sp = uintptr(p) 2021 _g_ := getg() 2022 gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0) 2023 if frame.fn.valid() { 2024 locals, _, _ := getStackMap(&frame, nil, false) 2025 if locals.n == 0 { 2026 return 2027 } 2028 size := uintptr(locals.n) * goarch.PtrSize 2029 n := (*ptrtype)(unsafe.Pointer(t)).elem.size 2030 mask = make([]byte, n/goarch.PtrSize) 2031 for i := uintptr(0); i < n; i += goarch.PtrSize { 2032 off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize 2033 mask[i/goarch.PtrSize] = locals.ptrbit(off) 2034 } 2035 } 2036 return 2037 } 2038 2039 // otherwise, not something the GC knows about. 2040 // possibly read-only data, like malloc(0). 2041 // must not have pointers 2042 return 2043 }