github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/mbitmap.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector: type and heap bitmaps. 6 // 7 // Stack, data, and bss bitmaps 8 // 9 // Stack frames and global variables in the data and bss sections are 10 // described by bitmaps with 1 bit per pointer-sized word. A "1" bit 11 // means the word is a live pointer to be visited by the GC (referred to 12 // as "pointer"). A "0" bit means the word should be ignored by GC 13 // (referred to as "scalar", though it could be a dead pointer value). 14 // 15 // Heap bitmap 16 // 17 // The heap bitmap comprises 2 bits for each pointer-sized word in the heap, 18 // stored in the heapArena metadata backing each heap arena. 19 // That is, if ha is the heapArena for the arena starting a start, 20 // then ha.bitmap[0] holds the 2-bit entries for the four words start 21 // through start+3*ptrSize, ha.bitmap[1] holds the entries for 22 // start+4*ptrSize through start+7*ptrSize, and so on. 23 // 24 // In each 2-bit entry, the lower bit is a pointer/scalar bit, just 25 // like in the stack/data bitmaps described above. The upper bit 26 // indicates scan/dead: a "1" value ("scan") indicates that there may 27 // be pointers in later words of the allocation, and a "0" value 28 // ("dead") indicates there are no more pointers in the allocation. If 29 // the upper bit is 0, the lower bit must also be 0, and this 30 // indicates scanning can ignore the rest of the allocation. 31 // 32 // The 2-bit entries are split when written into the byte, so that the top half 33 // of the byte contains 4 high (scan) bits and the bottom half contains 4 low 34 // (pointer) bits. This form allows a copy from the 1-bit to the 4-bit form to 35 // keep the pointer bits contiguous, instead of having to space them out. 36 // 37 // The code makes use of the fact that the zero value for a heap 38 // bitmap means scalar/dead. This property must be preserved when 39 // modifying the encoding. 40 // 41 // The bitmap for noscan spans is not maintained. Code must ensure 42 // that an object is scannable before consulting its bitmap by 43 // checking either the noscan bit in the span or by consulting its 44 // type's information. 45 46 package runtime 47 48 import ( 49 "runtime/internal/atomic" 50 "runtime/internal/sys" 51 "unsafe" 52 ) 53 54 const ( 55 bitPointer = 1 << 0 56 bitScan = 1 << 4 57 58 heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries 59 wordsPerBitmapByte = 8 / 2 // heap words described by one bitmap byte 60 61 // all scan/pointer bits in a byte 62 bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift) 63 bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift) 64 ) 65 66 // addb returns the byte pointer p+n. 67 //go:nowritebarrier 68 //go:nosplit 69 func addb(p *byte, n uintptr) *byte { 70 // Note: wrote out full expression instead of calling add(p, n) 71 // to reduce the number of temporaries generated by the 72 // compiler for this trivial expression during inlining. 73 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n)) 74 } 75 76 // subtractb returns the byte pointer p-n. 77 //go:nowritebarrier 78 //go:nosplit 79 func subtractb(p *byte, n uintptr) *byte { 80 // Note: wrote out full expression instead of calling add(p, -n) 81 // to reduce the number of temporaries generated by the 82 // compiler for this trivial expression during inlining. 83 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n)) 84 } 85 86 // add1 returns the byte pointer p+1. 87 //go:nowritebarrier 88 //go:nosplit 89 func add1(p *byte) *byte { 90 // Note: wrote out full expression instead of calling addb(p, 1) 91 // to reduce the number of temporaries generated by the 92 // compiler for this trivial expression during inlining. 93 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1)) 94 } 95 96 // subtract1 returns the byte pointer p-1. 97 //go:nowritebarrier 98 // 99 // nosplit because it is used during write barriers and must not be preempted. 100 //go:nosplit 101 func subtract1(p *byte) *byte { 102 // Note: wrote out full expression instead of calling subtractb(p, 1) 103 // to reduce the number of temporaries generated by the 104 // compiler for this trivial expression during inlining. 105 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1)) 106 } 107 108 // heapBits provides access to the bitmap bits for a single heap word. 109 // The methods on heapBits take value receivers so that the compiler 110 // can more easily inline calls to those methods and registerize the 111 // struct fields independently. 112 type heapBits struct { 113 bitp *uint8 114 shift uint32 115 arena uint32 // Index of heap arena containing bitp 116 last *uint8 // Last byte arena's bitmap 117 } 118 119 // Make the compiler check that heapBits.arena is large enough to hold 120 // the maximum arena frame number. 121 var _ = heapBits{arena: (1<<heapAddrBits)/heapArenaBytes - 1} 122 123 // markBits provides access to the mark bit for an object in the heap. 124 // bytep points to the byte holding the mark bit. 125 // mask is a byte with a single bit set that can be &ed with *bytep 126 // to see if the bit has been set. 127 // *m.byte&m.mask != 0 indicates the mark bit is set. 128 // index can be used along with span information to generate 129 // the address of the object in the heap. 130 // We maintain one set of mark bits for allocation and one for 131 // marking purposes. 132 type markBits struct { 133 bytep *uint8 134 mask uint8 135 index uintptr 136 } 137 138 //go:nosplit 139 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits { 140 bytep, mask := s.allocBits.bitp(allocBitIndex) 141 return markBits{bytep, mask, allocBitIndex} 142 } 143 144 // refillAllocCache takes 8 bytes s.allocBits starting at whichByte 145 // and negates them so that ctz (count trailing zeros) instructions 146 // can be used. It then places these 8 bytes into the cached 64 bit 147 // s.allocCache. 148 func (s *mspan) refillAllocCache(whichByte uintptr) { 149 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte))) 150 aCache := uint64(0) 151 aCache |= uint64(bytes[0]) 152 aCache |= uint64(bytes[1]) << (1 * 8) 153 aCache |= uint64(bytes[2]) << (2 * 8) 154 aCache |= uint64(bytes[3]) << (3 * 8) 155 aCache |= uint64(bytes[4]) << (4 * 8) 156 aCache |= uint64(bytes[5]) << (5 * 8) 157 aCache |= uint64(bytes[6]) << (6 * 8) 158 aCache |= uint64(bytes[7]) << (7 * 8) 159 s.allocCache = ^aCache 160 } 161 162 // nextFreeIndex returns the index of the next free object in s at 163 // or after s.freeindex. 164 // There are hardware instructions that can be used to make this 165 // faster if profiling warrants it. 166 func (s *mspan) nextFreeIndex() uintptr { 167 sfreeindex := s.freeindex 168 snelems := s.nelems 169 if sfreeindex == snelems { 170 return sfreeindex 171 } 172 if sfreeindex > snelems { 173 throw("s.freeindex > s.nelems") 174 } 175 176 aCache := s.allocCache 177 178 bitIndex := sys.Ctz64(aCache) 179 for bitIndex == 64 { 180 // Move index to start of next cached bits. 181 sfreeindex = (sfreeindex + 64) &^ (64 - 1) 182 if sfreeindex >= snelems { 183 s.freeindex = snelems 184 return snelems 185 } 186 whichByte := sfreeindex / 8 187 // Refill s.allocCache with the next 64 alloc bits. 188 s.refillAllocCache(whichByte) 189 aCache = s.allocCache 190 bitIndex = sys.Ctz64(aCache) 191 // nothing available in cached bits 192 // grab the next 8 bytes and try again. 193 } 194 result := sfreeindex + uintptr(bitIndex) 195 if result >= snelems { 196 s.freeindex = snelems 197 return snelems 198 } 199 200 s.allocCache >>= uint(bitIndex + 1) 201 sfreeindex = result + 1 202 203 if sfreeindex%64 == 0 && sfreeindex != snelems { 204 // We just incremented s.freeindex so it isn't 0. 205 // As each 1 in s.allocCache was encountered and used for allocation 206 // it was shifted away. At this point s.allocCache contains all 0s. 207 // Refill s.allocCache so that it corresponds 208 // to the bits at s.allocBits starting at s.freeindex. 209 whichByte := sfreeindex / 8 210 s.refillAllocCache(whichByte) 211 } 212 s.freeindex = sfreeindex 213 return result 214 } 215 216 // isFree reports whether the index'th object in s is unallocated. 217 // 218 // The caller must ensure s.state is mSpanInUse, and there must have 219 // been no preemption points since ensuring this (which could allow a 220 // GC transition, which would allow the state to change). 221 func (s *mspan) isFree(index uintptr) bool { 222 if index < s.freeindex { 223 return false 224 } 225 bytep, mask := s.allocBits.bitp(index) 226 return *bytep&mask == 0 227 } 228 229 func (s *mspan) objIndex(p uintptr) uintptr { 230 byteOffset := p - s.base() 231 if byteOffset == 0 { 232 return 0 233 } 234 if s.baseMask != 0 { 235 // s.baseMask is non-0, elemsize is a power of two, so shift by s.divShift 236 return byteOffset >> s.divShift 237 } 238 return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2) 239 } 240 241 func markBitsForAddr(p uintptr) markBits { 242 s := spanOf(p) 243 objIndex := s.objIndex(p) 244 return s.markBitsForIndex(objIndex) 245 } 246 247 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits { 248 bytep, mask := s.gcmarkBits.bitp(objIndex) 249 return markBits{bytep, mask, objIndex} 250 } 251 252 func (s *mspan) markBitsForBase() markBits { 253 return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0} 254 } 255 256 // isMarked reports whether mark bit m is set. 257 func (m markBits) isMarked() bool { 258 return *m.bytep&m.mask != 0 259 } 260 261 // setMarked sets the marked bit in the markbits, atomically. 262 func (m markBits) setMarked() { 263 // Might be racing with other updates, so use atomic update always. 264 // We used to be clever here and use a non-atomic update in certain 265 // cases, but it's not worth the risk. 266 atomic.Or8(m.bytep, m.mask) 267 } 268 269 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically. 270 func (m markBits) setMarkedNonAtomic() { 271 *m.bytep |= m.mask 272 } 273 274 // clearMarked clears the marked bit in the markbits, atomically. 275 func (m markBits) clearMarked() { 276 // Might be racing with other updates, so use atomic update always. 277 // We used to be clever here and use a non-atomic update in certain 278 // cases, but it's not worth the risk. 279 atomic.And8(m.bytep, ^m.mask) 280 } 281 282 // markBitsForSpan returns the markBits for the span base address base. 283 func markBitsForSpan(base uintptr) (mbits markBits) { 284 mbits = markBitsForAddr(base) 285 if mbits.mask != 1 { 286 throw("markBitsForSpan: unaligned start") 287 } 288 return mbits 289 } 290 291 // advance advances the markBits to the next object in the span. 292 func (m *markBits) advance() { 293 if m.mask == 1<<7 { 294 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1)) 295 m.mask = 1 296 } else { 297 m.mask = m.mask << 1 298 } 299 m.index++ 300 } 301 302 // heapBitsForAddr returns the heapBits for the address addr. 303 // The caller must ensure addr is in an allocated span. 304 // In particular, be careful not to point past the end of an object. 305 // 306 // nosplit because it is used during write barriers and must not be preempted. 307 //go:nosplit 308 func heapBitsForAddr(addr uintptr) (h heapBits) { 309 // 2 bits per word, 4 pairs per byte, and a mask is hard coded. 310 arena := arenaIndex(addr) 311 ha := mheap_.arenas[arena.l1()][arena.l2()] 312 // The compiler uses a load for nil checking ha, but in this 313 // case we'll almost never hit that cache line again, so it 314 // makes more sense to do a value check. 315 if ha == nil { 316 // addr is not in the heap. Return nil heapBits, which 317 // we expect to crash in the caller. 318 return 319 } 320 h.bitp = &ha.bitmap[(addr/(sys.PtrSize*4))%heapArenaBitmapBytes] 321 h.shift = uint32((addr / sys.PtrSize) & 3) 322 h.arena = uint32(arena) 323 h.last = &ha.bitmap[len(ha.bitmap)-1] 324 return 325 } 326 327 // badPointer throws bad pointer in heap panic. 328 func badPointer(s *mspan, p, refBase, refOff uintptr) { 329 // Typically this indicates an incorrect use 330 // of unsafe or cgo to store a bad pointer in 331 // the Go heap. It may also indicate a runtime 332 // bug. 333 // 334 // TODO(austin): We could be more aggressive 335 // and detect pointers to unallocated objects 336 // in allocated spans. 337 printlock() 338 print("runtime: pointer ", hex(p)) 339 state := s.state.get() 340 if state != mSpanInUse { 341 print(" to unallocated span") 342 } else { 343 print(" to unused region of span") 344 } 345 print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state, "\n") 346 if refBase != 0 { 347 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n") 348 gcDumpObject("object", refBase, refOff) 349 } 350 getg().m.traceback = 2 351 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)") 352 } 353 354 // findObject returns the base address for the heap object containing 355 // the address p, the object's span, and the index of the object in s. 356 // If p does not point into a heap object, it returns base == 0. 357 // 358 // If p points is an invalid heap pointer and debug.invalidptr != 0, 359 // findObject panics. 360 // 361 // refBase and refOff optionally give the base address of the object 362 // in which the pointer p was found and the byte offset at which it 363 // was found. These are used for error reporting. 364 // 365 // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack. 366 // Since p is a uintptr, it would not be adjusted if the stack were to move. 367 //go:nosplit 368 func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) { 369 s = spanOf(p) 370 // If s is nil, the virtual address has never been part of the heap. 371 // This pointer may be to some mmap'd region, so we allow it. 372 if s == nil { 373 return 374 } 375 // If p is a bad pointer, it may not be in s's bounds. 376 // 377 // Check s.state to synchronize with span initialization 378 // before checking other fields. See also spanOfHeap. 379 if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit { 380 // Pointers into stacks are also ok, the runtime manages these explicitly. 381 if state == mSpanManual { 382 return 383 } 384 // The following ensures that we are rigorous about what data 385 // structures hold valid pointers. 386 if debug.invalidptr != 0 { 387 badPointer(s, p, refBase, refOff) 388 } 389 return 390 } 391 // If this span holds object of a power of 2 size, just mask off the bits to 392 // the interior of the object. Otherwise use the size to get the base. 393 if s.baseMask != 0 { 394 // optimize for power of 2 sized objects. 395 base = s.base() 396 base = base + (p-base)&uintptr(s.baseMask) 397 objIndex = (base - s.base()) >> s.divShift 398 // base = p & s.baseMask is faster for small spans, 399 // but doesn't work for large spans. 400 // Overall, it's faster to use the more general computation above. 401 } else { 402 base = s.base() 403 if p-base >= s.elemsize { 404 // n := (p - base) / s.elemsize, using division by multiplication 405 objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2 406 base += objIndex * s.elemsize 407 } 408 } 409 return 410 } 411 412 // next returns the heapBits describing the next pointer-sized word in memory. 413 // That is, if h describes address p, h.next() describes p+ptrSize. 414 // Note that next does not modify h. The caller must record the result. 415 // 416 // nosplit because it is used during write barriers and must not be preempted. 417 //go:nosplit 418 func (h heapBits) next() heapBits { 419 if h.shift < 3*heapBitsShift { 420 h.shift += heapBitsShift 421 } else if h.bitp != h.last { 422 h.bitp, h.shift = add1(h.bitp), 0 423 } else { 424 // Move to the next arena. 425 return h.nextArena() 426 } 427 return h 428 } 429 430 // nextArena advances h to the beginning of the next heap arena. 431 // 432 // This is a slow-path helper to next. gc's inliner knows that 433 // heapBits.next can be inlined even though it calls this. This is 434 // marked noinline so it doesn't get inlined into next and cause next 435 // to be too big to inline. 436 // 437 //go:nosplit 438 //go:noinline 439 func (h heapBits) nextArena() heapBits { 440 h.arena++ 441 ai := arenaIdx(h.arena) 442 l2 := mheap_.arenas[ai.l1()] 443 if l2 == nil { 444 // We just passed the end of the object, which 445 // was also the end of the heap. Poison h. It 446 // should never be dereferenced at this point. 447 return heapBits{} 448 } 449 ha := l2[ai.l2()] 450 if ha == nil { 451 return heapBits{} 452 } 453 h.bitp, h.shift = &ha.bitmap[0], 0 454 h.last = &ha.bitmap[len(ha.bitmap)-1] 455 return h 456 } 457 458 // forward returns the heapBits describing n pointer-sized words ahead of h in memory. 459 // That is, if h describes address p, h.forward(n) describes p+n*ptrSize. 460 // h.forward(1) is equivalent to h.next(), just slower. 461 // Note that forward does not modify h. The caller must record the result. 462 // bits returns the heap bits for the current word. 463 //go:nosplit 464 func (h heapBits) forward(n uintptr) heapBits { 465 n += uintptr(h.shift) / heapBitsShift 466 nbitp := uintptr(unsafe.Pointer(h.bitp)) + n/4 467 h.shift = uint32(n%4) * heapBitsShift 468 if nbitp <= uintptr(unsafe.Pointer(h.last)) { 469 h.bitp = (*uint8)(unsafe.Pointer(nbitp)) 470 return h 471 } 472 473 // We're in a new heap arena. 474 past := nbitp - (uintptr(unsafe.Pointer(h.last)) + 1) 475 h.arena += 1 + uint32(past/heapArenaBitmapBytes) 476 ai := arenaIdx(h.arena) 477 if l2 := mheap_.arenas[ai.l1()]; l2 != nil && l2[ai.l2()] != nil { 478 a := l2[ai.l2()] 479 h.bitp = &a.bitmap[past%heapArenaBitmapBytes] 480 h.last = &a.bitmap[len(a.bitmap)-1] 481 } else { 482 h.bitp, h.last = nil, nil 483 } 484 return h 485 } 486 487 // forwardOrBoundary is like forward, but stops at boundaries between 488 // contiguous sections of the bitmap. It returns the number of words 489 // advanced over, which will be <= n. 490 func (h heapBits) forwardOrBoundary(n uintptr) (heapBits, uintptr) { 491 maxn := 4 * ((uintptr(unsafe.Pointer(h.last)) + 1) - uintptr(unsafe.Pointer(h.bitp))) 492 if n > maxn { 493 n = maxn 494 } 495 return h.forward(n), n 496 } 497 498 // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer. 499 // The result includes in its higher bits the bits for subsequent words 500 // described by the same bitmap byte. 501 // 502 // nosplit because it is used during write barriers and must not be preempted. 503 //go:nosplit 504 func (h heapBits) bits() uint32 { 505 // The (shift & 31) eliminates a test and conditional branch 506 // from the generated code. 507 return uint32(*h.bitp) >> (h.shift & 31) 508 } 509 510 // morePointers reports whether this word and all remaining words in this object 511 // are scalars. 512 // h must not describe the second word of the object. 513 func (h heapBits) morePointers() bool { 514 return h.bits()&bitScan != 0 515 } 516 517 // isPointer reports whether the heap bits describe a pointer word. 518 // 519 // nosplit because it is used during write barriers and must not be preempted. 520 //go:nosplit 521 func (h heapBits) isPointer() bool { 522 return h.bits()&bitPointer != 0 523 } 524 525 // bulkBarrierPreWrite executes a write barrier 526 // for every pointer slot in the memory range [src, src+size), 527 // using pointer/scalar information from [dst, dst+size). 528 // This executes the write barriers necessary before a memmove. 529 // src, dst, and size must be pointer-aligned. 530 // The range [dst, dst+size) must lie within a single object. 531 // It does not perform the actual writes. 532 // 533 // As a special case, src == 0 indicates that this is being used for a 534 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write 535 // barrier. 536 // 537 // Callers should call bulkBarrierPreWrite immediately before 538 // calling memmove(dst, src, size). This function is marked nosplit 539 // to avoid being preempted; the GC must not stop the goroutine 540 // between the memmove and the execution of the barriers. 541 // The caller is also responsible for cgo pointer checks if this 542 // may be writing Go pointers into non-Go memory. 543 // 544 // The pointer bitmap is not maintained for allocations containing 545 // no pointers at all; any caller of bulkBarrierPreWrite must first 546 // make sure the underlying allocation contains pointers, usually 547 // by checking typ.ptrdata. 548 // 549 // Callers must perform cgo checks if writeBarrier.cgo. 550 // 551 //go:nosplit 552 func bulkBarrierPreWrite(dst, src, size uintptr) { 553 if (dst|src|size)&(sys.PtrSize-1) != 0 { 554 throw("bulkBarrierPreWrite: unaligned arguments") 555 } 556 if !writeBarrier.needed { 557 return 558 } 559 if s := spanOf(dst); s == nil { 560 // If dst is a global, use the data or BSS bitmaps to 561 // execute write barriers. 562 for _, datap := range activeModules() { 563 if datap.data <= dst && dst < datap.edata { 564 bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata) 565 return 566 } 567 } 568 for _, datap := range activeModules() { 569 if datap.bss <= dst && dst < datap.ebss { 570 bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata) 571 return 572 } 573 } 574 return 575 } else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst { 576 // dst was heap memory at some point, but isn't now. 577 // It can't be a global. It must be either our stack, 578 // or in the case of direct channel sends, it could be 579 // another stack. Either way, no need for barriers. 580 // This will also catch if dst is in a freed span, 581 // though that should never have. 582 return 583 } 584 585 buf := &getg().m.p.ptr().wbBuf 586 h := heapBitsForAddr(dst) 587 if src == 0 { 588 for i := uintptr(0); i < size; i += sys.PtrSize { 589 if h.isPointer() { 590 dstx := (*uintptr)(unsafe.Pointer(dst + i)) 591 if !buf.putFast(*dstx, 0) { 592 wbBufFlush(nil, 0) 593 } 594 } 595 h = h.next() 596 } 597 } else { 598 for i := uintptr(0); i < size; i += sys.PtrSize { 599 if h.isPointer() { 600 dstx := (*uintptr)(unsafe.Pointer(dst + i)) 601 srcx := (*uintptr)(unsafe.Pointer(src + i)) 602 if !buf.putFast(*dstx, *srcx) { 603 wbBufFlush(nil, 0) 604 } 605 } 606 h = h.next() 607 } 608 } 609 } 610 611 // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but 612 // does not execute write barriers for [dst, dst+size). 613 // 614 // In addition to the requirements of bulkBarrierPreWrite 615 // callers need to ensure [dst, dst+size) is zeroed. 616 // 617 // This is used for special cases where e.g. dst was just 618 // created and zeroed with malloc. 619 //go:nosplit 620 func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) { 621 if (dst|src|size)&(sys.PtrSize-1) != 0 { 622 throw("bulkBarrierPreWrite: unaligned arguments") 623 } 624 if !writeBarrier.needed { 625 return 626 } 627 buf := &getg().m.p.ptr().wbBuf 628 h := heapBitsForAddr(dst) 629 for i := uintptr(0); i < size; i += sys.PtrSize { 630 if h.isPointer() { 631 srcx := (*uintptr)(unsafe.Pointer(src + i)) 632 if !buf.putFast(0, *srcx) { 633 wbBufFlush(nil, 0) 634 } 635 } 636 h = h.next() 637 } 638 } 639 640 // bulkBarrierBitmap executes write barriers for copying from [src, 641 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is 642 // assumed to start maskOffset bytes into the data covered by the 643 // bitmap in bits (which may not be a multiple of 8). 644 // 645 // This is used by bulkBarrierPreWrite for writes to data and BSS. 646 // 647 //go:nosplit 648 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) { 649 word := maskOffset / sys.PtrSize 650 bits = addb(bits, word/8) 651 mask := uint8(1) << (word % 8) 652 653 buf := &getg().m.p.ptr().wbBuf 654 for i := uintptr(0); i < size; i += sys.PtrSize { 655 if mask == 0 { 656 bits = addb(bits, 1) 657 if *bits == 0 { 658 // Skip 8 words. 659 i += 7 * sys.PtrSize 660 continue 661 } 662 mask = 1 663 } 664 if *bits&mask != 0 { 665 dstx := (*uintptr)(unsafe.Pointer(dst + i)) 666 if src == 0 { 667 if !buf.putFast(*dstx, 0) { 668 wbBufFlush(nil, 0) 669 } 670 } else { 671 srcx := (*uintptr)(unsafe.Pointer(src + i)) 672 if !buf.putFast(*dstx, *srcx) { 673 wbBufFlush(nil, 0) 674 } 675 } 676 } 677 mask <<= 1 678 } 679 } 680 681 // typeBitsBulkBarrier executes a write barrier for every 682 // pointer that would be copied from [src, src+size) to [dst, 683 // dst+size) by a memmove using the type bitmap to locate those 684 // pointer slots. 685 // 686 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size). 687 // dst, src, and size must be pointer-aligned. 688 // The type typ must have a plain bitmap, not a GC program. 689 // The only use of this function is in channel sends, and the 690 // 64 kB channel element limit takes care of this for us. 691 // 692 // Must not be preempted because it typically runs right before memmove, 693 // and the GC must observe them as an atomic action. 694 // 695 // Callers must perform cgo checks if writeBarrier.cgo. 696 // 697 //go:nosplit 698 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) { 699 if typ == nil { 700 throw("runtime: typeBitsBulkBarrier without type") 701 } 702 if typ.size != size { 703 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size) 704 throw("runtime: invalid typeBitsBulkBarrier") 705 } 706 if typ.kind&kindGCProg != 0 { 707 println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog") 708 throw("runtime: invalid typeBitsBulkBarrier") 709 } 710 if !writeBarrier.needed { 711 return 712 } 713 ptrmask := typ.gcdata 714 buf := &getg().m.p.ptr().wbBuf 715 var bits uint32 716 for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize { 717 if i&(sys.PtrSize*8-1) == 0 { 718 bits = uint32(*ptrmask) 719 ptrmask = addb(ptrmask, 1) 720 } else { 721 bits = bits >> 1 722 } 723 if bits&1 != 0 { 724 dstx := (*uintptr)(unsafe.Pointer(dst + i)) 725 srcx := (*uintptr)(unsafe.Pointer(src + i)) 726 if !buf.putFast(*dstx, *srcx) { 727 wbBufFlush(nil, 0) 728 } 729 } 730 } 731 } 732 733 // The methods operating on spans all require that h has been returned 734 // by heapBitsForSpan and that size, n, total are the span layout description 735 // returned by the mspan's layout method. 736 // If total > size*n, it means that there is extra leftover memory in the span, 737 // usually due to rounding. 738 // 739 // TODO(rsc): Perhaps introduce a different heapBitsSpan type. 740 741 // initSpan initializes the heap bitmap for a span. 742 // If this is a span of pointer-sized objects, it initializes all 743 // words to pointer/scan. 744 // Otherwise, it initializes all words to scalar/dead. 745 func (h heapBits) initSpan(s *mspan) { 746 // Clear bits corresponding to objects. 747 nw := (s.npages << _PageShift) / sys.PtrSize 748 if nw%wordsPerBitmapByte != 0 { 749 throw("initSpan: unaligned length") 750 } 751 if h.shift != 0 { 752 throw("initSpan: unaligned base") 753 } 754 isPtrs := sys.PtrSize == 8 && s.elemsize == sys.PtrSize 755 for nw > 0 { 756 hNext, anw := h.forwardOrBoundary(nw) 757 nbyte := anw / wordsPerBitmapByte 758 if isPtrs { 759 bitp := h.bitp 760 for i := uintptr(0); i < nbyte; i++ { 761 *bitp = bitPointerAll | bitScanAll 762 bitp = add1(bitp) 763 } 764 } else { 765 memclrNoHeapPointers(unsafe.Pointer(h.bitp), nbyte) 766 } 767 h = hNext 768 nw -= anw 769 } 770 } 771 772 // countAlloc returns the number of objects allocated in span s by 773 // scanning the allocation bitmap. 774 func (s *mspan) countAlloc() int { 775 count := 0 776 bytes := divRoundUp(s.nelems, 8) 777 // Iterate over each 8-byte chunk and count allocations 778 // with an intrinsic. Note that newMarkBits guarantees that 779 // gcmarkBits will be 8-byte aligned, so we don't have to 780 // worry about edge cases, irrelevant bits will simply be zero. 781 for i := uintptr(0); i < bytes; i += 8 { 782 // Extract 64 bits from the byte pointer and get a OnesCount. 783 // Note that the unsafe cast here doesn't preserve endianness, 784 // but that's OK. We only care about how many bits are 1, not 785 // about the order we discover them in. 786 mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i))) 787 count += sys.OnesCount64(mrkBits) 788 } 789 return count 790 } 791 792 // heapBitsSetType records that the new allocation [x, x+size) 793 // holds in [x, x+dataSize) one or more values of type typ. 794 // (The number of values is given by dataSize / typ.size.) 795 // If dataSize < size, the fragment [x+dataSize, x+size) is 796 // recorded as non-pointer data. 797 // It is known that the type has pointers somewhere; 798 // malloc does not call heapBitsSetType when there are no pointers, 799 // because all free objects are marked as noscan during 800 // heapBitsSweepSpan. 801 // 802 // There can only be one allocation from a given span active at a time, 803 // and the bitmap for a span always falls on byte boundaries, 804 // so there are no write-write races for access to the heap bitmap. 805 // Hence, heapBitsSetType can access the bitmap without atomics. 806 // 807 // There can be read-write races between heapBitsSetType and things 808 // that read the heap bitmap like scanobject. However, since 809 // heapBitsSetType is only used for objects that have not yet been 810 // made reachable, readers will ignore bits being modified by this 811 // function. This does mean this function cannot transiently modify 812 // bits that belong to neighboring objects. Also, on weakly-ordered 813 // machines, callers must execute a store/store (publication) barrier 814 // between calling this function and making the object reachable. 815 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { 816 const doubleCheck = false // slow but helpful; enable to test modifications to this code 817 818 const ( 819 mask1 = bitPointer | bitScan // 00010001 820 mask2 = bitPointer | bitScan | mask1<<heapBitsShift // 00110011 821 mask3 = bitPointer | bitScan | mask2<<heapBitsShift // 01110111 822 ) 823 824 // dataSize is always size rounded up to the next malloc size class, 825 // except in the case of allocating a defer block, in which case 826 // size is sizeof(_defer{}) (at least 6 words) and dataSize may be 827 // arbitrarily larger. 828 // 829 // The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore 830 // assume that dataSize == size without checking it explicitly. 831 832 if sys.PtrSize == 8 && size == sys.PtrSize { 833 // It's one word and it has pointers, it must be a pointer. 834 // Since all allocated one-word objects are pointers 835 // (non-pointers are aggregated into tinySize allocations), 836 // initSpan sets the pointer bits for us. Nothing to do here. 837 if doubleCheck { 838 h := heapBitsForAddr(x) 839 if !h.isPointer() { 840 throw("heapBitsSetType: pointer bit missing") 841 } 842 if !h.morePointers() { 843 throw("heapBitsSetType: scan bit missing") 844 } 845 } 846 return 847 } 848 849 h := heapBitsForAddr(x) 850 ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below) 851 852 // 2-word objects only have 4 bitmap bits and 3-word objects only have 6 bitmap bits. 853 // Therefore, these objects share a heap bitmap byte with the objects next to them. 854 // These are called out as a special case primarily so the code below can assume all 855 // objects are at least 4 words long and that their bitmaps start either at the beginning 856 // of a bitmap byte, or half-way in (h.shift of 0 and 2 respectively). 857 858 if size == 2*sys.PtrSize { 859 if typ.size == sys.PtrSize { 860 // We're allocating a block big enough to hold two pointers. 861 // On 64-bit, that means the actual object must be two pointers, 862 // or else we'd have used the one-pointer-sized block. 863 // On 32-bit, however, this is the 8-byte block, the smallest one. 864 // So it could be that we're allocating one pointer and this was 865 // just the smallest block available. Distinguish by checking dataSize. 866 // (In general the number of instances of typ being allocated is 867 // dataSize/typ.size.) 868 if sys.PtrSize == 4 && dataSize == sys.PtrSize { 869 // 1 pointer object. On 32-bit machines clear the bit for the 870 // unused second word. 871 *h.bitp &^= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift 872 *h.bitp |= (bitPointer | bitScan) << h.shift 873 } else { 874 // 2-element array of pointer. 875 *h.bitp |= (bitPointer | bitScan | (bitPointer|bitScan)<<heapBitsShift) << h.shift 876 } 877 return 878 } 879 // Otherwise typ.size must be 2*sys.PtrSize, 880 // and typ.kind&kindGCProg == 0. 881 if doubleCheck { 882 if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 { 883 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n") 884 throw("heapBitsSetType") 885 } 886 } 887 b := uint32(*ptrmask) 888 hb := b & 3 889 hb |= bitScanAll & ((bitScan << (typ.ptrdata / sys.PtrSize)) - 1) 890 // Clear the bits for this object so we can set the 891 // appropriate ones. 892 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift 893 *h.bitp |= uint8(hb << h.shift) 894 return 895 } else if size == 3*sys.PtrSize { 896 b := uint8(*ptrmask) 897 if doubleCheck { 898 if b == 0 { 899 println("runtime: invalid type ", typ.string()) 900 throw("heapBitsSetType: called with non-pointer type") 901 } 902 if sys.PtrSize != 8 { 903 throw("heapBitsSetType: unexpected 3 pointer wide size class on 32 bit") 904 } 905 if typ.kind&kindGCProg != 0 { 906 throw("heapBitsSetType: unexpected GC prog for 3 pointer wide size class") 907 } 908 if typ.size == 2*sys.PtrSize { 909 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, "\n") 910 throw("heapBitsSetType: inconsistent object sizes") 911 } 912 } 913 if typ.size == sys.PtrSize { 914 // The type contains a pointer otherwise heapBitsSetType wouldn't have been called. 915 // Since the type is only 1 pointer wide and contains a pointer, its gcdata must be exactly 1. 916 if doubleCheck && *typ.gcdata != 1 { 917 print("runtime: heapBitsSetType size=", size, " typ.size=", typ.size, "but *typ.gcdata", *typ.gcdata, "\n") 918 throw("heapBitsSetType: unexpected gcdata for 1 pointer wide type size in 3 pointer wide size class") 919 } 920 // 3 element array of pointers. Unrolling ptrmask 3 times into p yields 00000111. 921 b = 7 922 } 923 924 hb := b & 7 925 // Set bitScan bits for all pointers. 926 hb |= hb << wordsPerBitmapByte 927 // First bitScan bit is always set since the type contains pointers. 928 hb |= bitScan 929 // Second bitScan bit needs to also be set if the third bitScan bit is set. 930 hb |= hb & (bitScan << (2 * heapBitsShift)) >> 1 931 932 // For h.shift > 1 heap bits cross a byte boundary and need to be written part 933 // to h.bitp and part to the next h.bitp. 934 switch h.shift { 935 case 0: 936 *h.bitp &^= mask3 << 0 937 *h.bitp |= hb << 0 938 case 1: 939 *h.bitp &^= mask3 << 1 940 *h.bitp |= hb << 1 941 case 2: 942 *h.bitp &^= mask2 << 2 943 *h.bitp |= (hb & mask2) << 2 944 // Two words written to the first byte. 945 // Advance two words to get to the next byte. 946 h = h.next().next() 947 *h.bitp &^= mask1 948 *h.bitp |= (hb >> 2) & mask1 949 case 3: 950 *h.bitp &^= mask1 << 3 951 *h.bitp |= (hb & mask1) << 3 952 // One word written to the first byte. 953 // Advance one word to get to the next byte. 954 h = h.next() 955 *h.bitp &^= mask2 956 *h.bitp |= (hb >> 1) & mask2 957 } 958 return 959 } 960 961 // Copy from 1-bit ptrmask into 2-bit bitmap. 962 // The basic approach is to use a single uintptr as a bit buffer, 963 // alternating between reloading the buffer and writing bitmap bytes. 964 // In general, one load can supply two bitmap byte writes. 965 // This is a lot of lines of code, but it compiles into relatively few 966 // machine instructions. 967 968 outOfPlace := false 969 if arenaIndex(x+size-1) != arenaIdx(h.arena) || (doubleCheck && fastrand()%2 == 0) { 970 // This object spans heap arenas, so the bitmap may be 971 // discontiguous. Unroll it into the object instead 972 // and then copy it out. 973 // 974 // In doubleCheck mode, we randomly do this anyway to 975 // stress test the bitmap copying path. 976 outOfPlace = true 977 h.bitp = (*uint8)(unsafe.Pointer(x)) 978 h.last = nil 979 } 980 981 var ( 982 // Ptrmask input. 983 p *byte // last ptrmask byte read 984 b uintptr // ptrmask bits already loaded 985 nb uintptr // number of bits in b at next read 986 endp *byte // final ptrmask byte to read (then repeat) 987 endnb uintptr // number of valid bits in *endp 988 pbits uintptr // alternate source of bits 989 990 // Heap bitmap output. 991 w uintptr // words processed 992 nw uintptr // number of words to process 993 hbitp *byte // next heap bitmap byte to write 994 hb uintptr // bits being prepared for *hbitp 995 ) 996 997 hbitp = h.bitp 998 999 // Handle GC program. Delayed until this part of the code 1000 // so that we can use the same double-checking mechanism 1001 // as the 1-bit case. Nothing above could have encountered 1002 // GC programs: the cases were all too small. 1003 if typ.kind&kindGCProg != 0 { 1004 heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4)) 1005 if doubleCheck { 1006 // Double-check the heap bits written by GC program 1007 // by running the GC program to create a 1-bit pointer mask 1008 // and then jumping to the double-check code below. 1009 // This doesn't catch bugs shared between the 1-bit and 4-bit 1010 // GC program execution, but it does catch mistakes specific 1011 // to just one of those and bugs in heapBitsSetTypeGCProg's 1012 // implementation of arrays. 1013 lock(&debugPtrmask.lock) 1014 if debugPtrmask.data == nil { 1015 debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys)) 1016 } 1017 ptrmask = debugPtrmask.data 1018 runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1) 1019 } 1020 goto Phase4 1021 } 1022 1023 // Note about sizes: 1024 // 1025 // typ.size is the number of words in the object, 1026 // and typ.ptrdata is the number of words in the prefix 1027 // of the object that contains pointers. That is, the final 1028 // typ.size - typ.ptrdata words contain no pointers. 1029 // This allows optimization of a common pattern where 1030 // an object has a small header followed by a large scalar 1031 // buffer. If we know the pointers are over, we don't have 1032 // to scan the buffer's heap bitmap at all. 1033 // The 1-bit ptrmasks are sized to contain only bits for 1034 // the typ.ptrdata prefix, zero padded out to a full byte 1035 // of bitmap. This code sets nw (below) so that heap bitmap 1036 // bits are only written for the typ.ptrdata prefix; if there is 1037 // more room in the allocated object, the next heap bitmap 1038 // entry is a 00, indicating that there are no more pointers 1039 // to scan. So only the ptrmask for the ptrdata bytes is needed. 1040 // 1041 // Replicated copies are not as nice: if there is an array of 1042 // objects with scalar tails, all but the last tail does have to 1043 // be initialized, because there is no way to say "skip forward". 1044 // However, because of the possibility of a repeated type with 1045 // size not a multiple of 4 pointers (one heap bitmap byte), 1046 // the code already must handle the last ptrmask byte specially 1047 // by treating it as containing only the bits for endnb pointers, 1048 // where endnb <= 4. We represent large scalar tails that must 1049 // be expanded in the replication by setting endnb larger than 4. 1050 // This will have the effect of reading many bits out of b, 1051 // but once the real bits are shifted out, b will supply as many 1052 // zero bits as we try to read, which is exactly what we need. 1053 1054 p = ptrmask 1055 if typ.size < dataSize { 1056 // Filling in bits for an array of typ. 1057 // Set up for repetition of ptrmask during main loop. 1058 // Note that ptrmask describes only a prefix of 1059 const maxBits = sys.PtrSize*8 - 7 1060 if typ.ptrdata/sys.PtrSize <= maxBits { 1061 // Entire ptrmask fits in uintptr with room for a byte fragment. 1062 // Load into pbits and never read from ptrmask again. 1063 // This is especially important when the ptrmask has 1064 // fewer than 8 bits in it; otherwise the reload in the middle 1065 // of the Phase 2 loop would itself need to loop to gather 1066 // at least 8 bits. 1067 1068 // Accumulate ptrmask into b. 1069 // ptrmask is sized to describe only typ.ptrdata, but we record 1070 // it as describing typ.size bytes, since all the high bits are zero. 1071 nb = typ.ptrdata / sys.PtrSize 1072 for i := uintptr(0); i < nb; i += 8 { 1073 b |= uintptr(*p) << i 1074 p = add1(p) 1075 } 1076 nb = typ.size / sys.PtrSize 1077 1078 // Replicate ptrmask to fill entire pbits uintptr. 1079 // Doubling and truncating is fewer steps than 1080 // iterating by nb each time. (nb could be 1.) 1081 // Since we loaded typ.ptrdata/sys.PtrSize bits 1082 // but are pretending to have typ.size/sys.PtrSize, 1083 // there might be no replication necessary/possible. 1084 pbits = b 1085 endnb = nb 1086 if nb+nb <= maxBits { 1087 for endnb <= sys.PtrSize*8 { 1088 pbits |= pbits << endnb 1089 endnb += endnb 1090 } 1091 // Truncate to a multiple of original ptrmask. 1092 // Because nb+nb <= maxBits, nb fits in a byte. 1093 // Byte division is cheaper than uintptr division. 1094 endnb = uintptr(maxBits/byte(nb)) * nb 1095 pbits &= 1<<endnb - 1 1096 b = pbits 1097 nb = endnb 1098 } 1099 1100 // Clear p and endp as sentinel for using pbits. 1101 // Checked during Phase 2 loop. 1102 p = nil 1103 endp = nil 1104 } else { 1105 // Ptrmask is larger. Read it multiple times. 1106 n := (typ.ptrdata/sys.PtrSize+7)/8 - 1 1107 endp = addb(ptrmask, n) 1108 endnb = typ.size/sys.PtrSize - n*8 1109 } 1110 } 1111 if p != nil { 1112 b = uintptr(*p) 1113 p = add1(p) 1114 nb = 8 1115 } 1116 1117 if typ.size == dataSize { 1118 // Single entry: can stop once we reach the non-pointer data. 1119 nw = typ.ptrdata / sys.PtrSize 1120 } else { 1121 // Repeated instances of typ in an array. 1122 // Have to process first N-1 entries in full, but can stop 1123 // once we reach the non-pointer data in the final entry. 1124 nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize 1125 } 1126 if nw == 0 { 1127 // No pointers! Caller was supposed to check. 1128 println("runtime: invalid type ", typ.string()) 1129 throw("heapBitsSetType: called with non-pointer type") 1130 return 1131 } 1132 1133 // Phase 1: Special case for leading byte (shift==0) or half-byte (shift==2). 1134 // The leading byte is special because it contains the bits for word 1, 1135 // which does not have the scan bit set. 1136 // The leading half-byte is special because it's a half a byte, 1137 // so we have to be careful with the bits already there. 1138 switch { 1139 default: 1140 throw("heapBitsSetType: unexpected shift") 1141 1142 case h.shift == 0: 1143 // Ptrmask and heap bitmap are aligned. 1144 // 1145 // This is a fast path for small objects. 1146 // 1147 // The first byte we write out covers the first four 1148 // words of the object. The scan/dead bit on the first 1149 // word must be set to scan since there are pointers 1150 // somewhere in the object. 1151 // In all following words, we set the scan/dead 1152 // appropriately to indicate that the object continues 1153 // to the next 2-bit entry in the bitmap. 1154 // 1155 // We set four bits at a time here, but if the object 1156 // is fewer than four words, phase 3 will clear 1157 // unnecessary bits. 1158 hb = b & bitPointerAll 1159 hb |= bitScanAll 1160 if w += 4; w >= nw { 1161 goto Phase3 1162 } 1163 *hbitp = uint8(hb) 1164 hbitp = add1(hbitp) 1165 b >>= 4 1166 nb -= 4 1167 1168 case h.shift == 2: 1169 // Ptrmask and heap bitmap are misaligned. 1170 // 1171 // On 32 bit architectures only the 6-word object that corresponds 1172 // to a 24 bytes size class can start with h.shift of 2 here since 1173 // all other non 16 byte aligned size classes have been handled by 1174 // special code paths at the beginning of heapBitsSetType on 32 bit. 1175 // 1176 // Many size classes are only 16 byte aligned. On 64 bit architectures 1177 // this results in a heap bitmap position starting with a h.shift of 2. 1178 // 1179 // The bits for the first two words are in a byte shared 1180 // with another object, so we must be careful with the bits 1181 // already there. 1182 // 1183 // We took care of 1-word, 2-word, and 3-word objects above, 1184 // so this is at least a 6-word object. 1185 hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift) 1186 hb |= bitScan << (2 * heapBitsShift) 1187 if nw > 1 { 1188 hb |= bitScan << (3 * heapBitsShift) 1189 } 1190 b >>= 2 1191 nb -= 2 1192 *hbitp &^= uint8((bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << (2 * heapBitsShift)) 1193 *hbitp |= uint8(hb) 1194 hbitp = add1(hbitp) 1195 if w += 2; w >= nw { 1196 // We know that there is more data, because we handled 2-word and 3-word objects above. 1197 // This must be at least a 6-word object. If we're out of pointer words, 1198 // mark no scan in next bitmap byte and finish. 1199 hb = 0 1200 w += 4 1201 goto Phase3 1202 } 1203 } 1204 1205 // Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap. 1206 // The loop computes the bits for that last write but does not execute the write; 1207 // it leaves the bits in hb for processing by phase 3. 1208 // To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to 1209 // use in the first half of the loop right now, and then we only adjust nb explicitly 1210 // if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop. 1211 nb -= 4 1212 for { 1213 // Emit bitmap byte. 1214 // b has at least nb+4 bits, with one exception: 1215 // if w+4 >= nw, then b has only nw-w bits, 1216 // but we'll stop at the break and then truncate 1217 // appropriately in Phase 3. 1218 hb = b & bitPointerAll 1219 hb |= bitScanAll 1220 if w += 4; w >= nw { 1221 break 1222 } 1223 *hbitp = uint8(hb) 1224 hbitp = add1(hbitp) 1225 b >>= 4 1226 1227 // Load more bits. b has nb right now. 1228 if p != endp { 1229 // Fast path: keep reading from ptrmask. 1230 // nb unmodified: we just loaded 8 bits, 1231 // and the next iteration will consume 8 bits, 1232 // leaving us with the same nb the next time we're here. 1233 if nb < 8 { 1234 b |= uintptr(*p) << nb 1235 p = add1(p) 1236 } else { 1237 // Reduce the number of bits in b. 1238 // This is important if we skipped 1239 // over a scalar tail, since nb could 1240 // be larger than the bit width of b. 1241 nb -= 8 1242 } 1243 } else if p == nil { 1244 // Almost as fast path: track bit count and refill from pbits. 1245 // For short repetitions. 1246 if nb < 8 { 1247 b |= pbits << nb 1248 nb += endnb 1249 } 1250 nb -= 8 // for next iteration 1251 } else { 1252 // Slow path: reached end of ptrmask. 1253 // Process final partial byte and rewind to start. 1254 b |= uintptr(*p) << nb 1255 nb += endnb 1256 if nb < 8 { 1257 b |= uintptr(*ptrmask) << nb 1258 p = add1(ptrmask) 1259 } else { 1260 nb -= 8 1261 p = ptrmask 1262 } 1263 } 1264 1265 // Emit bitmap byte. 1266 hb = b & bitPointerAll 1267 hb |= bitScanAll 1268 if w += 4; w >= nw { 1269 break 1270 } 1271 *hbitp = uint8(hb) 1272 hbitp = add1(hbitp) 1273 b >>= 4 1274 } 1275 1276 Phase3: 1277 // Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries. 1278 if w > nw { 1279 // Counting the 4 entries in hb not yet written to memory, 1280 // there are more entries than possible pointer slots. 1281 // Discard the excess entries (can't be more than 3). 1282 mask := uintptr(1)<<(4-(w-nw)) - 1 1283 hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits 1284 } 1285 1286 // Change nw from counting possibly-pointer words to total words in allocation. 1287 nw = size / sys.PtrSize 1288 1289 // Write whole bitmap bytes. 1290 // The first is hb, the rest are zero. 1291 if w <= nw { 1292 *hbitp = uint8(hb) 1293 hbitp = add1(hbitp) 1294 hb = 0 // for possible final half-byte below 1295 for w += 4; w <= nw; w += 4 { 1296 *hbitp = 0 1297 hbitp = add1(hbitp) 1298 } 1299 } 1300 1301 // Write final partial bitmap byte if any. 1302 // We know w > nw, or else we'd still be in the loop above. 1303 // It can be bigger only due to the 4 entries in hb that it counts. 1304 // If w == nw+4 then there's nothing left to do: we wrote all nw entries 1305 // and can discard the 4 sitting in hb. 1306 // But if w == nw+2, we need to write first two in hb. 1307 // The byte is shared with the next object, so be careful with 1308 // existing bits. 1309 if w == nw+2 { 1310 *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb) 1311 } 1312 1313 Phase4: 1314 // Phase 4: Copy unrolled bitmap to per-arena bitmaps, if necessary. 1315 if outOfPlace { 1316 // TODO: We could probably make this faster by 1317 // handling [x+dataSize, x+size) specially. 1318 h := heapBitsForAddr(x) 1319 // cnw is the number of heap words, or bit pairs 1320 // remaining (like nw above). 1321 cnw := size / sys.PtrSize 1322 src := (*uint8)(unsafe.Pointer(x)) 1323 // We know the first and last byte of the bitmap are 1324 // not the same, but it's still possible for small 1325 // objects span arenas, so it may share bitmap bytes 1326 // with neighboring objects. 1327 // 1328 // Handle the first byte specially if it's shared. See 1329 // Phase 1 for why this is the only special case we need. 1330 if doubleCheck { 1331 if !(h.shift == 0 || h.shift == 2) { 1332 print("x=", x, " size=", size, " cnw=", h.shift, "\n") 1333 throw("bad start shift") 1334 } 1335 } 1336 if h.shift == 2 { 1337 *h.bitp = *h.bitp&^((bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift)<<(2*heapBitsShift)) | *src 1338 h = h.next().next() 1339 cnw -= 2 1340 src = addb(src, 1) 1341 } 1342 // We're now byte aligned. Copy out to per-arena 1343 // bitmaps until the last byte (which may again be 1344 // partial). 1345 for cnw >= 4 { 1346 // This loop processes four words at a time, 1347 // so round cnw down accordingly. 1348 hNext, words := h.forwardOrBoundary(cnw / 4 * 4) 1349 1350 // n is the number of bitmap bytes to copy. 1351 n := words / 4 1352 memmove(unsafe.Pointer(h.bitp), unsafe.Pointer(src), n) 1353 cnw -= words 1354 h = hNext 1355 src = addb(src, n) 1356 } 1357 if doubleCheck && h.shift != 0 { 1358 print("cnw=", cnw, " h.shift=", h.shift, "\n") 1359 throw("bad shift after block copy") 1360 } 1361 // Handle the last byte if it's shared. 1362 if cnw == 2 { 1363 *h.bitp = *h.bitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | *src 1364 src = addb(src, 1) 1365 h = h.next().next() 1366 } 1367 if doubleCheck { 1368 if uintptr(unsafe.Pointer(src)) > x+size { 1369 throw("copy exceeded object size") 1370 } 1371 if !(cnw == 0 || cnw == 2) { 1372 print("x=", x, " size=", size, " cnw=", cnw, "\n") 1373 throw("bad number of remaining words") 1374 } 1375 // Set up hbitp so doubleCheck code below can check it. 1376 hbitp = h.bitp 1377 } 1378 // Zero the object where we wrote the bitmap. 1379 memclrNoHeapPointers(unsafe.Pointer(x), uintptr(unsafe.Pointer(src))-x) 1380 } 1381 1382 // Double check the whole bitmap. 1383 if doubleCheck { 1384 // x+size may not point to the heap, so back up one 1385 // word and then advance it the way we do above. 1386 end := heapBitsForAddr(x + size - sys.PtrSize) 1387 if outOfPlace { 1388 // In out-of-place copying, we just advance 1389 // using next. 1390 end = end.next() 1391 } else { 1392 // Don't use next because that may advance to 1393 // the next arena and the in-place logic 1394 // doesn't do that. 1395 end.shift += heapBitsShift 1396 if end.shift == 4*heapBitsShift { 1397 end.bitp, end.shift = add1(end.bitp), 0 1398 } 1399 } 1400 if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) { 1401 println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size) 1402 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n") 1403 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n") 1404 h0 := heapBitsForAddr(x) 1405 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n") 1406 print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n") 1407 throw("bad heapBitsSetType") 1408 } 1409 1410 // Double-check that bits to be written were written correctly. 1411 // Does not check that other bits were not written, unfortunately. 1412 h := heapBitsForAddr(x) 1413 nptr := typ.ptrdata / sys.PtrSize 1414 ndata := typ.size / sys.PtrSize 1415 count := dataSize / typ.size 1416 totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize 1417 for i := uintptr(0); i < size/sys.PtrSize; i++ { 1418 j := i % ndata 1419 var have, want uint8 1420 have = (*h.bitp >> h.shift) & (bitPointer | bitScan) 1421 if i >= totalptr { 1422 if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 { 1423 // heapBitsSetTypeGCProg always fills 1424 // in full nibbles of bitScan. 1425 want = bitScan 1426 } 1427 } else { 1428 if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 { 1429 want |= bitPointer 1430 } 1431 want |= bitScan 1432 } 1433 if have != want { 1434 println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size) 1435 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n") 1436 print("kindGCProg=", typ.kind&kindGCProg != 0, " outOfPlace=", outOfPlace, "\n") 1437 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n") 1438 h0 := heapBitsForAddr(x) 1439 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n") 1440 print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n") 1441 print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n") 1442 println("at word", i, "offset", i*sys.PtrSize, "have", hex(have), "want", hex(want)) 1443 if typ.kind&kindGCProg != 0 { 1444 println("GC program:") 1445 dumpGCProg(addb(typ.gcdata, 4)) 1446 } 1447 throw("bad heapBitsSetType") 1448 } 1449 h = h.next() 1450 } 1451 if ptrmask == debugPtrmask.data { 1452 unlock(&debugPtrmask.lock) 1453 } 1454 } 1455 } 1456 1457 var debugPtrmask struct { 1458 lock mutex 1459 data *byte 1460 } 1461 1462 // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program. 1463 // progSize is the size of the memory described by the program. 1464 // elemSize is the size of the element that the GC program describes (a prefix of). 1465 // dataSize is the total size of the intended data, a multiple of elemSize. 1466 // allocSize is the total size of the allocated memory. 1467 // 1468 // GC programs are only used for large allocations. 1469 // heapBitsSetType requires that allocSize is a multiple of 4 words, 1470 // so that the relevant bitmap bytes are not shared with surrounding 1471 // objects. 1472 func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) { 1473 if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 { 1474 // Alignment will be wrong. 1475 throw("heapBitsSetTypeGCProg: small allocation") 1476 } 1477 var totalBits uintptr 1478 if elemSize == dataSize { 1479 totalBits = runGCProg(prog, nil, h.bitp, 2) 1480 if totalBits*sys.PtrSize != progSize { 1481 println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize) 1482 throw("heapBitsSetTypeGCProg: unexpected bit count") 1483 } 1484 } else { 1485 count := dataSize / elemSize 1486 1487 // Piece together program trailer to run after prog that does: 1488 // literal(0) 1489 // repeat(1, elemSize-progSize-1) // zeros to fill element size 1490 // repeat(elemSize, count-1) // repeat that element for count 1491 // This zero-pads the data remaining in the first element and then 1492 // repeats that first element to fill the array. 1493 var trailer [40]byte // 3 varints (max 10 each) + some bytes 1494 i := 0 1495 if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 { 1496 // literal(0) 1497 trailer[i] = 0x01 1498 i++ 1499 trailer[i] = 0 1500 i++ 1501 if n > 1 { 1502 // repeat(1, n-1) 1503 trailer[i] = 0x81 1504 i++ 1505 n-- 1506 for ; n >= 0x80; n >>= 7 { 1507 trailer[i] = byte(n | 0x80) 1508 i++ 1509 } 1510 trailer[i] = byte(n) 1511 i++ 1512 } 1513 } 1514 // repeat(elemSize/ptrSize, count-1) 1515 trailer[i] = 0x80 1516 i++ 1517 n := elemSize / sys.PtrSize 1518 for ; n >= 0x80; n >>= 7 { 1519 trailer[i] = byte(n | 0x80) 1520 i++ 1521 } 1522 trailer[i] = byte(n) 1523 i++ 1524 n = count - 1 1525 for ; n >= 0x80; n >>= 7 { 1526 trailer[i] = byte(n | 0x80) 1527 i++ 1528 } 1529 trailer[i] = byte(n) 1530 i++ 1531 trailer[i] = 0 1532 i++ 1533 1534 runGCProg(prog, &trailer[0], h.bitp, 2) 1535 1536 // Even though we filled in the full array just now, 1537 // record that we only filled in up to the ptrdata of the 1538 // last element. This will cause the code below to 1539 // memclr the dead section of the final array element, 1540 // so that scanobject can stop early in the final element. 1541 totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize 1542 } 1543 endProg := unsafe.Pointer(addb(h.bitp, (totalBits+3)/4)) 1544 endAlloc := unsafe.Pointer(addb(h.bitp, allocSize/sys.PtrSize/wordsPerBitmapByte)) 1545 memclrNoHeapPointers(endProg, uintptr(endAlloc)-uintptr(endProg)) 1546 } 1547 1548 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog. 1549 // size the size of the region described by prog, in bytes. 1550 // The resulting bitvector will have no more than size/sys.PtrSize bits. 1551 func progToPointerMask(prog *byte, size uintptr) bitvector { 1552 n := (size/sys.PtrSize + 7) / 8 1553 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1] 1554 x[len(x)-1] = 0xa1 // overflow check sentinel 1555 n = runGCProg(prog, nil, &x[0], 1) 1556 if x[len(x)-1] != 0xa1 { 1557 throw("progToPointerMask: overflow") 1558 } 1559 return bitvector{int32(n), &x[0]} 1560 } 1561 1562 // Packed GC pointer bitmaps, aka GC programs. 1563 // 1564 // For large types containing arrays, the type information has a 1565 // natural repetition that can be encoded to save space in the 1566 // binary and in the memory representation of the type information. 1567 // 1568 // The encoding is a simple Lempel-Ziv style bytecode machine 1569 // with the following instructions: 1570 // 1571 // 00000000: stop 1572 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes 1573 // 10000000 n c: repeat the previous n bits c times; n, c are varints 1574 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint 1575 1576 // runGCProg executes the GC program prog, and then trailer if non-nil, 1577 // writing to dst with entries of the given size. 1578 // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst. 1579 // If size == 2, dst is the 2-bit heap bitmap, and writes move backward 1580 // starting at dst (because the heap bitmap does). In this case, the caller guarantees 1581 // that only whole bytes in dst need to be written. 1582 // 1583 // runGCProg returns the number of 1- or 2-bit entries written to memory. 1584 func runGCProg(prog, trailer, dst *byte, size int) uintptr { 1585 dstStart := dst 1586 1587 // Bits waiting to be written to memory. 1588 var bits uintptr 1589 var nbits uintptr 1590 1591 p := prog 1592 Run: 1593 for { 1594 // Flush accumulated full bytes. 1595 // The rest of the loop assumes that nbits <= 7. 1596 for ; nbits >= 8; nbits -= 8 { 1597 if size == 1 { 1598 *dst = uint8(bits) 1599 dst = add1(dst) 1600 bits >>= 8 1601 } else { 1602 v := bits&bitPointerAll | bitScanAll 1603 *dst = uint8(v) 1604 dst = add1(dst) 1605 bits >>= 4 1606 v = bits&bitPointerAll | bitScanAll 1607 *dst = uint8(v) 1608 dst = add1(dst) 1609 bits >>= 4 1610 } 1611 } 1612 1613 // Process one instruction. 1614 inst := uintptr(*p) 1615 p = add1(p) 1616 n := inst & 0x7F 1617 if inst&0x80 == 0 { 1618 // Literal bits; n == 0 means end of program. 1619 if n == 0 { 1620 // Program is over; continue in trailer if present. 1621 if trailer != nil { 1622 p = trailer 1623 trailer = nil 1624 continue 1625 } 1626 break Run 1627 } 1628 nbyte := n / 8 1629 for i := uintptr(0); i < nbyte; i++ { 1630 bits |= uintptr(*p) << nbits 1631 p = add1(p) 1632 if size == 1 { 1633 *dst = uint8(bits) 1634 dst = add1(dst) 1635 bits >>= 8 1636 } else { 1637 v := bits&0xf | bitScanAll 1638 *dst = uint8(v) 1639 dst = add1(dst) 1640 bits >>= 4 1641 v = bits&0xf | bitScanAll 1642 *dst = uint8(v) 1643 dst = add1(dst) 1644 bits >>= 4 1645 } 1646 } 1647 if n %= 8; n > 0 { 1648 bits |= uintptr(*p) << nbits 1649 p = add1(p) 1650 nbits += n 1651 } 1652 continue Run 1653 } 1654 1655 // Repeat. If n == 0, it is encoded in a varint in the next bytes. 1656 if n == 0 { 1657 for off := uint(0); ; off += 7 { 1658 x := uintptr(*p) 1659 p = add1(p) 1660 n |= (x & 0x7F) << off 1661 if x&0x80 == 0 { 1662 break 1663 } 1664 } 1665 } 1666 1667 // Count is encoded in a varint in the next bytes. 1668 c := uintptr(0) 1669 for off := uint(0); ; off += 7 { 1670 x := uintptr(*p) 1671 p = add1(p) 1672 c |= (x & 0x7F) << off 1673 if x&0x80 == 0 { 1674 break 1675 } 1676 } 1677 c *= n // now total number of bits to copy 1678 1679 // If the number of bits being repeated is small, load them 1680 // into a register and use that register for the entire loop 1681 // instead of repeatedly reading from memory. 1682 // Handling fewer than 8 bits here makes the general loop simpler. 1683 // The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add 1684 // the pattern to a bit buffer holding at most 7 bits (a partial byte) 1685 // it will not overflow. 1686 src := dst 1687 const maxBits = sys.PtrSize*8 - 7 1688 if n <= maxBits { 1689 // Start with bits in output buffer. 1690 pattern := bits 1691 npattern := nbits 1692 1693 // If we need more bits, fetch them from memory. 1694 if size == 1 { 1695 src = subtract1(src) 1696 for npattern < n { 1697 pattern <<= 8 1698 pattern |= uintptr(*src) 1699 src = subtract1(src) 1700 npattern += 8 1701 } 1702 } else { 1703 src = subtract1(src) 1704 for npattern < n { 1705 pattern <<= 4 1706 pattern |= uintptr(*src) & 0xf 1707 src = subtract1(src) 1708 npattern += 4 1709 } 1710 } 1711 1712 // We started with the whole bit output buffer, 1713 // and then we loaded bits from whole bytes. 1714 // Either way, we might now have too many instead of too few. 1715 // Discard the extra. 1716 if npattern > n { 1717 pattern >>= npattern - n 1718 npattern = n 1719 } 1720 1721 // Replicate pattern to at most maxBits. 1722 if npattern == 1 { 1723 // One bit being repeated. 1724 // If the bit is 1, make the pattern all 1s. 1725 // If the bit is 0, the pattern is already all 0s, 1726 // but we can claim that the number of bits 1727 // in the word is equal to the number we need (c), 1728 // because right shift of bits will zero fill. 1729 if pattern == 1 { 1730 pattern = 1<<maxBits - 1 1731 npattern = maxBits 1732 } else { 1733 npattern = c 1734 } 1735 } else { 1736 b := pattern 1737 nb := npattern 1738 if nb+nb <= maxBits { 1739 // Double pattern until the whole uintptr is filled. 1740 for nb <= sys.PtrSize*8 { 1741 b |= b << nb 1742 nb += nb 1743 } 1744 // Trim away incomplete copy of original pattern in high bits. 1745 // TODO(rsc): Replace with table lookup or loop on systems without divide? 1746 nb = maxBits / npattern * npattern 1747 b &= 1<<nb - 1 1748 pattern = b 1749 npattern = nb 1750 } 1751 } 1752 1753 // Add pattern to bit buffer and flush bit buffer, c/npattern times. 1754 // Since pattern contains >8 bits, there will be full bytes to flush 1755 // on each iteration. 1756 for ; c >= npattern; c -= npattern { 1757 bits |= pattern << nbits 1758 nbits += npattern 1759 if size == 1 { 1760 for nbits >= 8 { 1761 *dst = uint8(bits) 1762 dst = add1(dst) 1763 bits >>= 8 1764 nbits -= 8 1765 } 1766 } else { 1767 for nbits >= 4 { 1768 *dst = uint8(bits&0xf | bitScanAll) 1769 dst = add1(dst) 1770 bits >>= 4 1771 nbits -= 4 1772 } 1773 } 1774 } 1775 1776 // Add final fragment to bit buffer. 1777 if c > 0 { 1778 pattern &= 1<<c - 1 1779 bits |= pattern << nbits 1780 nbits += c 1781 } 1782 continue Run 1783 } 1784 1785 // Repeat; n too large to fit in a register. 1786 // Since nbits <= 7, we know the first few bytes of repeated data 1787 // are already written to memory. 1788 off := n - nbits // n > nbits because n > maxBits and nbits <= 7 1789 if size == 1 { 1790 // Leading src fragment. 1791 src = subtractb(src, (off+7)/8) 1792 if frag := off & 7; frag != 0 { 1793 bits |= uintptr(*src) >> (8 - frag) << nbits 1794 src = add1(src) 1795 nbits += frag 1796 c -= frag 1797 } 1798 // Main loop: load one byte, write another. 1799 // The bits are rotating through the bit buffer. 1800 for i := c / 8; i > 0; i-- { 1801 bits |= uintptr(*src) << nbits 1802 src = add1(src) 1803 *dst = uint8(bits) 1804 dst = add1(dst) 1805 bits >>= 8 1806 } 1807 // Final src fragment. 1808 if c %= 8; c > 0 { 1809 bits |= (uintptr(*src) & (1<<c - 1)) << nbits 1810 nbits += c 1811 } 1812 } else { 1813 // Leading src fragment. 1814 src = subtractb(src, (off+3)/4) 1815 if frag := off & 3; frag != 0 { 1816 bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits 1817 src = add1(src) 1818 nbits += frag 1819 c -= frag 1820 } 1821 // Main loop: load one byte, write another. 1822 // The bits are rotating through the bit buffer. 1823 for i := c / 4; i > 0; i-- { 1824 bits |= (uintptr(*src) & 0xf) << nbits 1825 src = add1(src) 1826 *dst = uint8(bits&0xf | bitScanAll) 1827 dst = add1(dst) 1828 bits >>= 4 1829 } 1830 // Final src fragment. 1831 if c %= 4; c > 0 { 1832 bits |= (uintptr(*src) & (1<<c - 1)) << nbits 1833 nbits += c 1834 } 1835 } 1836 } 1837 1838 // Write any final bits out, using full-byte writes, even for the final byte. 1839 var totalBits uintptr 1840 if size == 1 { 1841 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits 1842 nbits += -nbits & 7 1843 for ; nbits > 0; nbits -= 8 { 1844 *dst = uint8(bits) 1845 dst = add1(dst) 1846 bits >>= 8 1847 } 1848 } else { 1849 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*4 + nbits 1850 nbits += -nbits & 3 1851 for ; nbits > 0; nbits -= 4 { 1852 v := bits&0xf | bitScanAll 1853 *dst = uint8(v) 1854 dst = add1(dst) 1855 bits >>= 4 1856 } 1857 } 1858 return totalBits 1859 } 1860 1861 // materializeGCProg allocates space for the (1-bit) pointer bitmask 1862 // for an object of size ptrdata. Then it fills that space with the 1863 // pointer bitmask specified by the program prog. 1864 // The bitmask starts at s.startAddr. 1865 // The result must be deallocated with dematerializeGCProg. 1866 func materializeGCProg(ptrdata uintptr, prog *byte) *mspan { 1867 // Each word of ptrdata needs one bit in the bitmap. 1868 bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize) 1869 // Compute the number of pages needed for bitmapBytes. 1870 pages := divRoundUp(bitmapBytes, pageSize) 1871 s := mheap_.allocManual(pages, spanAllocPtrScalarBits) 1872 runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1) 1873 return s 1874 } 1875 func dematerializeGCProg(s *mspan) { 1876 mheap_.freeManual(s, spanAllocPtrScalarBits) 1877 } 1878 1879 func dumpGCProg(p *byte) { 1880 nptr := 0 1881 for { 1882 x := *p 1883 p = add1(p) 1884 if x == 0 { 1885 print("\t", nptr, " end\n") 1886 break 1887 } 1888 if x&0x80 == 0 { 1889 print("\t", nptr, " lit ", x, ":") 1890 n := int(x+7) / 8 1891 for i := 0; i < n; i++ { 1892 print(" ", hex(*p)) 1893 p = add1(p) 1894 } 1895 print("\n") 1896 nptr += int(x) 1897 } else { 1898 nbit := int(x &^ 0x80) 1899 if nbit == 0 { 1900 for nb := uint(0); ; nb += 7 { 1901 x := *p 1902 p = add1(p) 1903 nbit |= int(x&0x7f) << nb 1904 if x&0x80 == 0 { 1905 break 1906 } 1907 } 1908 } 1909 count := 0 1910 for nb := uint(0); ; nb += 7 { 1911 x := *p 1912 p = add1(p) 1913 count |= int(x&0x7f) << nb 1914 if x&0x80 == 0 { 1915 break 1916 } 1917 } 1918 print("\t", nptr, " repeat ", nbit, " × ", count, "\n") 1919 nptr += nbit * count 1920 } 1921 } 1922 } 1923 1924 // Testing. 1925 1926 func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool { 1927 target := (*stkframe)(ctxt) 1928 if frame.sp <= target.sp && target.sp < frame.varp { 1929 *target = *frame 1930 return false 1931 } 1932 return true 1933 } 1934 1935 // gcbits returns the GC type info for x, for testing. 1936 // The result is the bitmap entries (0 or 1), one entry per byte. 1937 //go:linkname reflect_gcbits reflect.gcbits 1938 func reflect_gcbits(x interface{}) []byte { 1939 ret := getgcmask(x) 1940 typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem 1941 nptr := typ.ptrdata / sys.PtrSize 1942 for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 { 1943 ret = ret[:len(ret)-1] 1944 } 1945 return ret 1946 } 1947 1948 // Returns GC type info for the pointer stored in ep for testing. 1949 // If ep points to the stack, only static live information will be returned 1950 // (i.e. not for objects which are only dynamically live stack objects). 1951 func getgcmask(ep interface{}) (mask []byte) { 1952 e := *efaceOf(&ep) 1953 p := e.data 1954 t := e._type 1955 // data or bss 1956 for _, datap := range activeModules() { 1957 // data 1958 if datap.data <= uintptr(p) && uintptr(p) < datap.edata { 1959 bitmap := datap.gcdatamask.bytedata 1960 n := (*ptrtype)(unsafe.Pointer(t)).elem.size 1961 mask = make([]byte, n/sys.PtrSize) 1962 for i := uintptr(0); i < n; i += sys.PtrSize { 1963 off := (uintptr(p) + i - datap.data) / sys.PtrSize 1964 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1 1965 } 1966 return 1967 } 1968 1969 // bss 1970 if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss { 1971 bitmap := datap.gcbssmask.bytedata 1972 n := (*ptrtype)(unsafe.Pointer(t)).elem.size 1973 mask = make([]byte, n/sys.PtrSize) 1974 for i := uintptr(0); i < n; i += sys.PtrSize { 1975 off := (uintptr(p) + i - datap.bss) / sys.PtrSize 1976 mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1 1977 } 1978 return 1979 } 1980 } 1981 1982 // heap 1983 if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 { 1984 hbits := heapBitsForAddr(base) 1985 n := s.elemsize 1986 mask = make([]byte, n/sys.PtrSize) 1987 for i := uintptr(0); i < n; i += sys.PtrSize { 1988 if hbits.isPointer() { 1989 mask[i/sys.PtrSize] = 1 1990 } 1991 if !hbits.morePointers() { 1992 mask = mask[:i/sys.PtrSize] 1993 break 1994 } 1995 hbits = hbits.next() 1996 } 1997 return 1998 } 1999 2000 // stack 2001 if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi { 2002 var frame stkframe 2003 frame.sp = uintptr(p) 2004 _g_ := getg() 2005 gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0) 2006 if frame.fn.valid() { 2007 locals, _, _ := getStackMap(&frame, nil, false) 2008 if locals.n == 0 { 2009 return 2010 } 2011 size := uintptr(locals.n) * sys.PtrSize 2012 n := (*ptrtype)(unsafe.Pointer(t)).elem.size 2013 mask = make([]byte, n/sys.PtrSize) 2014 for i := uintptr(0); i < n; i += sys.PtrSize { 2015 off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize 2016 mask[i/sys.PtrSize] = locals.ptrbit(off) 2017 } 2018 } 2019 return 2020 } 2021 2022 // otherwise, not something the GC knows about. 2023 // possibly read-only data, like malloc(0). 2024 // must not have pointers 2025 return 2026 }