github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/mbitmap.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: type and heap bitmaps.
     6  //
     7  // Stack, data, and bss bitmaps
     8  //
     9  // Stack frames and global variables in the data and bss sections are
    10  // described by bitmaps with 1 bit per pointer-sized word. A "1" bit
    11  // means the word is a live pointer to be visited by the GC (referred to
    12  // as "pointer"). A "0" bit means the word should be ignored by GC
    13  // (referred to as "scalar", though it could be a dead pointer value).
    14  //
    15  // Heap bitmap
    16  //
    17  // The heap bitmap comprises 1 bit for each pointer-sized word in the heap,
    18  // recording whether a pointer is stored in that word or not. This bitmap
    19  // is stored in the heapArena metadata backing each heap arena.
    20  // That is, if ha is the heapArena for the arena starting at "start",
    21  // then ha.bitmap[0] holds the 64 bits for the 64 words "start"
    22  // through start+63*ptrSize, ha.bitmap[1] holds the entries for
    23  // start+64*ptrSize through start+127*ptrSize, and so on.
    24  // Bits correspond to words in little-endian order. ha.bitmap[0]&1 represents
    25  // the word at "start", ha.bitmap[0]>>1&1 represents the word at start+8, etc.
    26  // (For 32-bit platforms, s/64/32/.)
    27  //
    28  // We also keep a noMorePtrs bitmap which allows us to stop scanning
    29  // the heap bitmap early in certain situations. If ha.noMorePtrs[i]>>j&1
    30  // is 1, then the object containing the last word described by ha.bitmap[8*i+j]
    31  // has no more pointers beyond those described by ha.bitmap[8*i+j].
    32  // If ha.noMorePtrs[i]>>j&1 is set, the entries in ha.bitmap[8*i+j+1] and
    33  // beyond must all be zero until the start of the next object.
    34  //
    35  // The bitmap for noscan spans is set to all zero at span allocation time.
    36  //
    37  // The bitmap for unallocated objects in scannable spans is not maintained
    38  // (can be junk).
    39  
    40  package runtime
    41  
    42  import (
    43  	"internal/goarch"
    44  	"runtime/internal/atomic"
    45  	"runtime/internal/sys"
    46  	"unsafe"
    47  )
    48  
    49  // addb returns the byte pointer p+n.
    50  //
    51  //go:nowritebarrier
    52  //go:nosplit
    53  func addb(p *byte, n uintptr) *byte {
    54  	// Note: wrote out full expression instead of calling add(p, n)
    55  	// to reduce the number of temporaries generated by the
    56  	// compiler for this trivial expression during inlining.
    57  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
    58  }
    59  
    60  // subtractb returns the byte pointer p-n.
    61  //
    62  //go:nowritebarrier
    63  //go:nosplit
    64  func subtractb(p *byte, n uintptr) *byte {
    65  	// Note: wrote out full expression instead of calling add(p, -n)
    66  	// to reduce the number of temporaries generated by the
    67  	// compiler for this trivial expression during inlining.
    68  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
    69  }
    70  
    71  // add1 returns the byte pointer p+1.
    72  //
    73  //go:nowritebarrier
    74  //go:nosplit
    75  func add1(p *byte) *byte {
    76  	// Note: wrote out full expression instead of calling addb(p, 1)
    77  	// to reduce the number of temporaries generated by the
    78  	// compiler for this trivial expression during inlining.
    79  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
    80  }
    81  
    82  // subtract1 returns the byte pointer p-1.
    83  //
    84  // nosplit because it is used during write barriers and must not be preempted.
    85  //
    86  //go:nowritebarrier
    87  //go:nosplit
    88  func subtract1(p *byte) *byte {
    89  	// Note: wrote out full expression instead of calling subtractb(p, 1)
    90  	// to reduce the number of temporaries generated by the
    91  	// compiler for this trivial expression during inlining.
    92  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
    93  }
    94  
    95  // markBits provides access to the mark bit for an object in the heap.
    96  // bytep points to the byte holding the mark bit.
    97  // mask is a byte with a single bit set that can be &ed with *bytep
    98  // to see if the bit has been set.
    99  // *m.byte&m.mask != 0 indicates the mark bit is set.
   100  // index can be used along with span information to generate
   101  // the address of the object in the heap.
   102  // We maintain one set of mark bits for allocation and one for
   103  // marking purposes.
   104  type markBits struct {
   105  	bytep *uint8
   106  	mask  uint8
   107  	index uintptr
   108  }
   109  
   110  //go:nosplit
   111  func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
   112  	bytep, mask := s.allocBits.bitp(allocBitIndex)
   113  	return markBits{bytep, mask, allocBitIndex}
   114  }
   115  
   116  // refillAllocCache takes 8 bytes s.allocBits starting at whichByte
   117  // and negates them so that ctz (count trailing zeros) instructions
   118  // can be used. It then places these 8 bytes into the cached 64 bit
   119  // s.allocCache.
   120  func (s *mspan) refillAllocCache(whichByte uintptr) {
   121  	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
   122  	aCache := uint64(0)
   123  	aCache |= uint64(bytes[0])
   124  	aCache |= uint64(bytes[1]) << (1 * 8)
   125  	aCache |= uint64(bytes[2]) << (2 * 8)
   126  	aCache |= uint64(bytes[3]) << (3 * 8)
   127  	aCache |= uint64(bytes[4]) << (4 * 8)
   128  	aCache |= uint64(bytes[5]) << (5 * 8)
   129  	aCache |= uint64(bytes[6]) << (6 * 8)
   130  	aCache |= uint64(bytes[7]) << (7 * 8)
   131  	s.allocCache = ^aCache
   132  }
   133  
   134  // nextFreeIndex returns the index of the next free object in s at
   135  // or after s.freeindex.
   136  // There are hardware instructions that can be used to make this
   137  // faster if profiling warrants it.
   138  func (s *mspan) nextFreeIndex() uintptr {
   139  	sfreeindex := s.freeindex
   140  	snelems := s.nelems
   141  	if sfreeindex == snelems {
   142  		return sfreeindex
   143  	}
   144  	if sfreeindex > snelems {
   145  		throw("s.freeindex > s.nelems")
   146  	}
   147  
   148  	aCache := s.allocCache
   149  
   150  	bitIndex := sys.TrailingZeros64(aCache)
   151  	for bitIndex == 64 {
   152  		// Move index to start of next cached bits.
   153  		sfreeindex = (sfreeindex + 64) &^ (64 - 1)
   154  		if sfreeindex >= snelems {
   155  			s.freeindex = snelems
   156  			return snelems
   157  		}
   158  		whichByte := sfreeindex / 8
   159  		// Refill s.allocCache with the next 64 alloc bits.
   160  		s.refillAllocCache(whichByte)
   161  		aCache = s.allocCache
   162  		bitIndex = sys.TrailingZeros64(aCache)
   163  		// nothing available in cached bits
   164  		// grab the next 8 bytes and try again.
   165  	}
   166  	result := sfreeindex + uintptr(bitIndex)
   167  	if result >= snelems {
   168  		s.freeindex = snelems
   169  		return snelems
   170  	}
   171  
   172  	s.allocCache >>= uint(bitIndex + 1)
   173  	sfreeindex = result + 1
   174  
   175  	if sfreeindex%64 == 0 && sfreeindex != snelems {
   176  		// We just incremented s.freeindex so it isn't 0.
   177  		// As each 1 in s.allocCache was encountered and used for allocation
   178  		// it was shifted away. At this point s.allocCache contains all 0s.
   179  		// Refill s.allocCache so that it corresponds
   180  		// to the bits at s.allocBits starting at s.freeindex.
   181  		whichByte := sfreeindex / 8
   182  		s.refillAllocCache(whichByte)
   183  	}
   184  	s.freeindex = sfreeindex
   185  	return result
   186  }
   187  
   188  // isFree reports whether the index'th object in s is unallocated.
   189  //
   190  // The caller must ensure s.state is mSpanInUse, and there must have
   191  // been no preemption points since ensuring this (which could allow a
   192  // GC transition, which would allow the state to change).
   193  func (s *mspan) isFree(index uintptr) bool {
   194  	if index < s.freeindex {
   195  		return false
   196  	}
   197  	bytep, mask := s.allocBits.bitp(index)
   198  	return *bytep&mask == 0
   199  }
   200  
   201  // divideByElemSize returns n/s.elemsize.
   202  // n must be within [0, s.npages*_PageSize),
   203  // or may be exactly s.npages*_PageSize
   204  // if s.elemsize is from sizeclasses.go.
   205  func (s *mspan) divideByElemSize(n uintptr) uintptr {
   206  	const doubleCheck = false
   207  
   208  	// See explanation in mksizeclasses.go's computeDivMagic.
   209  	q := uintptr((uint64(n) * uint64(s.divMul)) >> 32)
   210  
   211  	if doubleCheck && q != n/s.elemsize {
   212  		println(n, "/", s.elemsize, "should be", n/s.elemsize, "but got", q)
   213  		throw("bad magic division")
   214  	}
   215  	return q
   216  }
   217  
   218  func (s *mspan) objIndex(p uintptr) uintptr {
   219  	return s.divideByElemSize(p - s.base())
   220  }
   221  
   222  func markBitsForAddr(p uintptr) markBits {
   223  	s := spanOf(p)
   224  	objIndex := s.objIndex(p)
   225  	return s.markBitsForIndex(objIndex)
   226  }
   227  
   228  func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
   229  	bytep, mask := s.gcmarkBits.bitp(objIndex)
   230  	return markBits{bytep, mask, objIndex}
   231  }
   232  
   233  func (s *mspan) markBitsForBase() markBits {
   234  	return markBits{&s.gcmarkBits.x, uint8(1), 0}
   235  }
   236  
   237  // isMarked reports whether mark bit m is set.
   238  func (m markBits) isMarked() bool {
   239  	return *m.bytep&m.mask != 0
   240  }
   241  
   242  // setMarked sets the marked bit in the markbits, atomically.
   243  func (m markBits) setMarked() {
   244  	// Might be racing with other updates, so use atomic update always.
   245  	// We used to be clever here and use a non-atomic update in certain
   246  	// cases, but it's not worth the risk.
   247  	atomic.Or8(m.bytep, m.mask)
   248  }
   249  
   250  // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
   251  func (m markBits) setMarkedNonAtomic() {
   252  	*m.bytep |= m.mask
   253  }
   254  
   255  // clearMarked clears the marked bit in the markbits, atomically.
   256  func (m markBits) clearMarked() {
   257  	// Might be racing with other updates, so use atomic update always.
   258  	// We used to be clever here and use a non-atomic update in certain
   259  	// cases, but it's not worth the risk.
   260  	atomic.And8(m.bytep, ^m.mask)
   261  }
   262  
   263  // markBitsForSpan returns the markBits for the span base address base.
   264  func markBitsForSpan(base uintptr) (mbits markBits) {
   265  	mbits = markBitsForAddr(base)
   266  	if mbits.mask != 1 {
   267  		throw("markBitsForSpan: unaligned start")
   268  	}
   269  	return mbits
   270  }
   271  
   272  // advance advances the markBits to the next object in the span.
   273  func (m *markBits) advance() {
   274  	if m.mask == 1<<7 {
   275  		m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
   276  		m.mask = 1
   277  	} else {
   278  		m.mask = m.mask << 1
   279  	}
   280  	m.index++
   281  }
   282  
   283  // clobberdeadPtr is a special value that is used by the compiler to
   284  // clobber dead stack slots, when -clobberdead flag is set.
   285  const clobberdeadPtr = uintptr(0xdeaddead | 0xdeaddead<<((^uintptr(0)>>63)*32))
   286  
   287  // badPointer throws bad pointer in heap panic.
   288  func badPointer(s *mspan, p, refBase, refOff uintptr) {
   289  	// Typically this indicates an incorrect use
   290  	// of unsafe or cgo to store a bad pointer in
   291  	// the Go heap. It may also indicate a runtime
   292  	// bug.
   293  	//
   294  	// TODO(austin): We could be more aggressive
   295  	// and detect pointers to unallocated objects
   296  	// in allocated spans.
   297  	printlock()
   298  	print("runtime: pointer ", hex(p))
   299  	if s != nil {
   300  		state := s.state.get()
   301  		if state != mSpanInUse {
   302  			print(" to unallocated span")
   303  		} else {
   304  			print(" to unused region of span")
   305  		}
   306  		print(" span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", state)
   307  	}
   308  	print("\n")
   309  	if refBase != 0 {
   310  		print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
   311  		gcDumpObject("object", refBase, refOff)
   312  	}
   313  	getg().m.traceback = 2
   314  	throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
   315  }
   316  
   317  // findObject returns the base address for the heap object containing
   318  // the address p, the object's span, and the index of the object in s.
   319  // If p does not point into a heap object, it returns base == 0.
   320  //
   321  // If p points is an invalid heap pointer and debug.invalidptr != 0,
   322  // findObject panics.
   323  //
   324  // refBase and refOff optionally give the base address of the object
   325  // in which the pointer p was found and the byte offset at which it
   326  // was found. These are used for error reporting.
   327  //
   328  // It is nosplit so it is safe for p to be a pointer to the current goroutine's stack.
   329  // Since p is a uintptr, it would not be adjusted if the stack were to move.
   330  //
   331  //go:nosplit
   332  func findObject(p, refBase, refOff uintptr) (base uintptr, s *mspan, objIndex uintptr) {
   333  	s = spanOf(p)
   334  	// If s is nil, the virtual address has never been part of the heap.
   335  	// This pointer may be to some mmap'd region, so we allow it.
   336  	if s == nil {
   337  		if (GOARCH == "amd64" || GOARCH == "arm64") && p == clobberdeadPtr && debug.invalidptr != 0 {
   338  			// Crash if clobberdeadPtr is seen. Only on AMD64 and ARM64 for now,
   339  			// as they are the only platform where compiler's clobberdead mode is
   340  			// implemented. On these platforms clobberdeadPtr cannot be a valid address.
   341  			badPointer(s, p, refBase, refOff)
   342  		}
   343  		return
   344  	}
   345  	// If p is a bad pointer, it may not be in s's bounds.
   346  	//
   347  	// Check s.state to synchronize with span initialization
   348  	// before checking other fields. See also spanOfHeap.
   349  	if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit {
   350  		// Pointers into stacks are also ok, the runtime manages these explicitly.
   351  		if state == mSpanManual {
   352  			return
   353  		}
   354  		// The following ensures that we are rigorous about what data
   355  		// structures hold valid pointers.
   356  		if debug.invalidptr != 0 {
   357  			badPointer(s, p, refBase, refOff)
   358  		}
   359  		return
   360  	}
   361  
   362  	objIndex = s.objIndex(p)
   363  	base = s.base() + objIndex*s.elemsize
   364  	return
   365  }
   366  
   367  // verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok.
   368  //
   369  //go:linkname reflect_verifyNotInHeapPtr reflect.verifyNotInHeapPtr
   370  func reflect_verifyNotInHeapPtr(p uintptr) bool {
   371  	// Conversion to a pointer is ok as long as findObject above does not call badPointer.
   372  	// Since we're already promised that p doesn't point into the heap, just disallow heap
   373  	// pointers and the special clobbered pointer.
   374  	return spanOf(p) == nil && p != clobberdeadPtr
   375  }
   376  
   377  const ptrBits = 8 * goarch.PtrSize
   378  
   379  // heapBits provides access to the bitmap bits for a single heap word.
   380  // The methods on heapBits take value receivers so that the compiler
   381  // can more easily inline calls to those methods and registerize the
   382  // struct fields independently.
   383  type heapBits struct {
   384  	// heapBits will report on pointers in the range [addr,addr+size).
   385  	// The low bit of mask contains the pointerness of the word at addr
   386  	// (assuming valid>0).
   387  	addr, size uintptr
   388  
   389  	// The next few pointer bits representing words starting at addr.
   390  	// Those bits already returned by next() are zeroed.
   391  	mask uintptr
   392  	// Number of bits in mask that are valid. mask is always less than 1<<valid.
   393  	valid uintptr
   394  }
   395  
   396  // heapBitsForAddr returns the heapBits for the address addr.
   397  // The caller must ensure [addr,addr+size) is in an allocated span.
   398  // In particular, be careful not to point past the end of an object.
   399  //
   400  // nosplit because it is used during write barriers and must not be preempted.
   401  //
   402  //go:nosplit
   403  func heapBitsForAddr(addr, size uintptr) heapBits {
   404  	// Find arena
   405  	ai := arenaIndex(addr)
   406  	ha := mheap_.arenas[ai.l1()][ai.l2()]
   407  
   408  	// Word index in arena.
   409  	word := addr / goarch.PtrSize % heapArenaWords
   410  
   411  	// Word index and bit offset in bitmap array.
   412  	idx := word / ptrBits
   413  	off := word % ptrBits
   414  
   415  	// Grab relevant bits of bitmap.
   416  	mask := ha.bitmap[idx] >> off
   417  	valid := ptrBits - off
   418  
   419  	// Process depending on where the object ends.
   420  	nptr := size / goarch.PtrSize
   421  	if nptr < valid {
   422  		// Bits for this object end before the end of this bitmap word.
   423  		// Squash bits for the following objects.
   424  		mask &= 1<<(nptr&(ptrBits-1)) - 1
   425  		valid = nptr
   426  	} else if nptr == valid {
   427  		// Bits for this object end at exactly the end of this bitmap word.
   428  		// All good.
   429  	} else {
   430  		// Bits for this object extend into the next bitmap word. See if there
   431  		// may be any pointers recorded there.
   432  		if uintptr(ha.noMorePtrs[idx/8])>>(idx%8)&1 != 0 {
   433  			// No more pointers in this object after this bitmap word.
   434  			// Update size so we know not to look there.
   435  			size = valid * goarch.PtrSize
   436  		}
   437  	}
   438  
   439  	return heapBits{addr: addr, size: size, mask: mask, valid: valid}
   440  }
   441  
   442  // Returns the (absolute) address of the next known pointer and
   443  // a heapBits iterator representing any remaining pointers.
   444  // If there are no more pointers, returns address 0.
   445  // Note that next does not modify h. The caller must record the result.
   446  //
   447  // nosplit because it is used during write barriers and must not be preempted.
   448  //
   449  //go:nosplit
   450  func (h heapBits) next() (heapBits, uintptr) {
   451  	for {
   452  		if h.mask != 0 {
   453  			var i int
   454  			if goarch.PtrSize == 8 {
   455  				i = sys.TrailingZeros64(uint64(h.mask))
   456  			} else {
   457  				i = sys.TrailingZeros32(uint32(h.mask))
   458  			}
   459  			h.mask ^= uintptr(1) << (i & (ptrBits - 1))
   460  			return h, h.addr + uintptr(i)*goarch.PtrSize
   461  		}
   462  
   463  		// Skip words that we've already processed.
   464  		h.addr += h.valid * goarch.PtrSize
   465  		h.size -= h.valid * goarch.PtrSize
   466  		if h.size == 0 {
   467  			return h, 0 // no more pointers
   468  		}
   469  
   470  		// Grab more bits and try again.
   471  		h = heapBitsForAddr(h.addr, h.size)
   472  	}
   473  }
   474  
   475  // nextFast is like next, but can return 0 even when there are more pointers
   476  // to be found. Callers should call next if nextFast returns 0 as its second
   477  // return value.
   478  //
   479  //	if addr, h = h.nextFast(); addr == 0 {
   480  //	    if addr, h = h.next(); addr == 0 {
   481  //	        ... no more pointers ...
   482  //	    }
   483  //	}
   484  //	... process pointer at addr ...
   485  //
   486  // nextFast is designed to be inlineable.
   487  //
   488  //go:nosplit
   489  func (h heapBits) nextFast() (heapBits, uintptr) {
   490  	// TESTQ/JEQ
   491  	if h.mask == 0 {
   492  		return h, 0
   493  	}
   494  	// BSFQ
   495  	var i int
   496  	if goarch.PtrSize == 8 {
   497  		i = sys.TrailingZeros64(uint64(h.mask))
   498  	} else {
   499  		i = sys.TrailingZeros32(uint32(h.mask))
   500  	}
   501  	// BTCQ
   502  	h.mask ^= uintptr(1) << (i & (ptrBits - 1))
   503  	// LEAQ (XX)(XX*8)
   504  	return h, h.addr + uintptr(i)*goarch.PtrSize
   505  }
   506  
   507  // bulkBarrierPreWrite executes a write barrier
   508  // for every pointer slot in the memory range [src, src+size),
   509  // using pointer/scalar information from [dst, dst+size).
   510  // This executes the write barriers necessary before a memmove.
   511  // src, dst, and size must be pointer-aligned.
   512  // The range [dst, dst+size) must lie within a single object.
   513  // It does not perform the actual writes.
   514  //
   515  // As a special case, src == 0 indicates that this is being used for a
   516  // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
   517  // barrier.
   518  //
   519  // Callers should call bulkBarrierPreWrite immediately before
   520  // calling memmove(dst, src, size). This function is marked nosplit
   521  // to avoid being preempted; the GC must not stop the goroutine
   522  // between the memmove and the execution of the barriers.
   523  // The caller is also responsible for cgo pointer checks if this
   524  // may be writing Go pointers into non-Go memory.
   525  //
   526  // The pointer bitmap is not maintained for allocations containing
   527  // no pointers at all; any caller of bulkBarrierPreWrite must first
   528  // make sure the underlying allocation contains pointers, usually
   529  // by checking typ.ptrdata.
   530  //
   531  // Callers must perform cgo checks if writeBarrier.cgo.
   532  //
   533  //go:nosplit
   534  func bulkBarrierPreWrite(dst, src, size uintptr) {
   535  	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
   536  		throw("bulkBarrierPreWrite: unaligned arguments")
   537  	}
   538  	if !writeBarrier.needed {
   539  		return
   540  	}
   541  	if s := spanOf(dst); s == nil {
   542  		// If dst is a global, use the data or BSS bitmaps to
   543  		// execute write barriers.
   544  		for _, datap := range activeModules() {
   545  			if datap.data <= dst && dst < datap.edata {
   546  				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
   547  				return
   548  			}
   549  		}
   550  		for _, datap := range activeModules() {
   551  			if datap.bss <= dst && dst < datap.ebss {
   552  				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
   553  				return
   554  			}
   555  		}
   556  		return
   557  	} else if s.state.get() != mSpanInUse || dst < s.base() || s.limit <= dst {
   558  		// dst was heap memory at some point, but isn't now.
   559  		// It can't be a global. It must be either our stack,
   560  		// or in the case of direct channel sends, it could be
   561  		// another stack. Either way, no need for barriers.
   562  		// This will also catch if dst is in a freed span,
   563  		// though that should never have.
   564  		return
   565  	}
   566  
   567  	buf := &getg().m.p.ptr().wbBuf
   568  	h := heapBitsForAddr(dst, size)
   569  	if src == 0 {
   570  		for {
   571  			var addr uintptr
   572  			if h, addr = h.next(); addr == 0 {
   573  				break
   574  			}
   575  			dstx := (*uintptr)(unsafe.Pointer(addr))
   576  			if !buf.putFast(*dstx, 0) {
   577  				wbBufFlush(nil, 0)
   578  			}
   579  		}
   580  	} else {
   581  		for {
   582  			var addr uintptr
   583  			if h, addr = h.next(); addr == 0 {
   584  				break
   585  			}
   586  			dstx := (*uintptr)(unsafe.Pointer(addr))
   587  			srcx := (*uintptr)(unsafe.Pointer(src + (addr - dst)))
   588  			if !buf.putFast(*dstx, *srcx) {
   589  				wbBufFlush(nil, 0)
   590  			}
   591  		}
   592  	}
   593  }
   594  
   595  // bulkBarrierPreWriteSrcOnly is like bulkBarrierPreWrite but
   596  // does not execute write barriers for [dst, dst+size).
   597  //
   598  // In addition to the requirements of bulkBarrierPreWrite
   599  // callers need to ensure [dst, dst+size) is zeroed.
   600  //
   601  // This is used for special cases where e.g. dst was just
   602  // created and zeroed with malloc.
   603  //
   604  //go:nosplit
   605  func bulkBarrierPreWriteSrcOnly(dst, src, size uintptr) {
   606  	if (dst|src|size)&(goarch.PtrSize-1) != 0 {
   607  		throw("bulkBarrierPreWrite: unaligned arguments")
   608  	}
   609  	if !writeBarrier.needed {
   610  		return
   611  	}
   612  	buf := &getg().m.p.ptr().wbBuf
   613  	h := heapBitsForAddr(dst, size)
   614  	for {
   615  		var addr uintptr
   616  		if h, addr = h.next(); addr == 0 {
   617  			break
   618  		}
   619  		srcx := (*uintptr)(unsafe.Pointer(addr - dst + src))
   620  		if !buf.putFast(0, *srcx) {
   621  			wbBufFlush(nil, 0)
   622  		}
   623  	}
   624  }
   625  
   626  // bulkBarrierBitmap executes write barriers for copying from [src,
   627  // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
   628  // assumed to start maskOffset bytes into the data covered by the
   629  // bitmap in bits (which may not be a multiple of 8).
   630  //
   631  // This is used by bulkBarrierPreWrite for writes to data and BSS.
   632  //
   633  //go:nosplit
   634  func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
   635  	word := maskOffset / goarch.PtrSize
   636  	bits = addb(bits, word/8)
   637  	mask := uint8(1) << (word % 8)
   638  
   639  	buf := &getg().m.p.ptr().wbBuf
   640  	for i := uintptr(0); i < size; i += goarch.PtrSize {
   641  		if mask == 0 {
   642  			bits = addb(bits, 1)
   643  			if *bits == 0 {
   644  				// Skip 8 words.
   645  				i += 7 * goarch.PtrSize
   646  				continue
   647  			}
   648  			mask = 1
   649  		}
   650  		if *bits&mask != 0 {
   651  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   652  			if src == 0 {
   653  				if !buf.putFast(*dstx, 0) {
   654  					wbBufFlush(nil, 0)
   655  				}
   656  			} else {
   657  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   658  				if !buf.putFast(*dstx, *srcx) {
   659  					wbBufFlush(nil, 0)
   660  				}
   661  			}
   662  		}
   663  		mask <<= 1
   664  	}
   665  }
   666  
   667  // typeBitsBulkBarrier executes a write barrier for every
   668  // pointer that would be copied from [src, src+size) to [dst,
   669  // dst+size) by a memmove using the type bitmap to locate those
   670  // pointer slots.
   671  //
   672  // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
   673  // dst, src, and size must be pointer-aligned.
   674  // The type typ must have a plain bitmap, not a GC program.
   675  // The only use of this function is in channel sends, and the
   676  // 64 kB channel element limit takes care of this for us.
   677  //
   678  // Must not be preempted because it typically runs right before memmove,
   679  // and the GC must observe them as an atomic action.
   680  //
   681  // Callers must perform cgo checks if writeBarrier.cgo.
   682  //
   683  //go:nosplit
   684  func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
   685  	if typ == nil {
   686  		throw("runtime: typeBitsBulkBarrier without type")
   687  	}
   688  	if typ.size != size {
   689  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
   690  		throw("runtime: invalid typeBitsBulkBarrier")
   691  	}
   692  	if typ.kind&kindGCProg != 0 {
   693  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
   694  		throw("runtime: invalid typeBitsBulkBarrier")
   695  	}
   696  	if !writeBarrier.needed {
   697  		return
   698  	}
   699  	ptrmask := typ.gcdata
   700  	buf := &getg().m.p.ptr().wbBuf
   701  	var bits uint32
   702  	for i := uintptr(0); i < typ.ptrdata; i += goarch.PtrSize {
   703  		if i&(goarch.PtrSize*8-1) == 0 {
   704  			bits = uint32(*ptrmask)
   705  			ptrmask = addb(ptrmask, 1)
   706  		} else {
   707  			bits = bits >> 1
   708  		}
   709  		if bits&1 != 0 {
   710  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   711  			srcx := (*uintptr)(unsafe.Pointer(src + i))
   712  			if !buf.putFast(*dstx, *srcx) {
   713  				wbBufFlush(nil, 0)
   714  			}
   715  		}
   716  	}
   717  }
   718  
   719  // initHeapBits initializes the heap bitmap for a span.
   720  // If this is a span of single pointer allocations, it initializes all
   721  // words to pointer. If force is true, clears all bits.
   722  func (s *mspan) initHeapBits(forceClear bool) {
   723  	if forceClear || s.spanclass.noscan() {
   724  		// Set all the pointer bits to zero. We do this once
   725  		// when the span is allocated so we don't have to do it
   726  		// for each object allocation.
   727  		base := s.base()
   728  		size := s.npages * pageSize
   729  		h := writeHeapBitsForAddr(base)
   730  		h.flush(base, size)
   731  		return
   732  	}
   733  	isPtrs := goarch.PtrSize == 8 && s.elemsize == goarch.PtrSize
   734  	if !isPtrs {
   735  		return // nothing to do
   736  	}
   737  	h := writeHeapBitsForAddr(s.base())
   738  	size := s.npages * pageSize
   739  	nptrs := size / goarch.PtrSize
   740  	for i := uintptr(0); i < nptrs; i += ptrBits {
   741  		h = h.write(^uintptr(0), ptrBits)
   742  	}
   743  	h.flush(s.base(), size)
   744  }
   745  
   746  // countAlloc returns the number of objects allocated in span s by
   747  // scanning the allocation bitmap.
   748  func (s *mspan) countAlloc() int {
   749  	count := 0
   750  	bytes := divRoundUp(s.nelems, 8)
   751  	// Iterate over each 8-byte chunk and count allocations
   752  	// with an intrinsic. Note that newMarkBits guarantees that
   753  	// gcmarkBits will be 8-byte aligned, so we don't have to
   754  	// worry about edge cases, irrelevant bits will simply be zero.
   755  	for i := uintptr(0); i < bytes; i += 8 {
   756  		// Extract 64 bits from the byte pointer and get a OnesCount.
   757  		// Note that the unsafe cast here doesn't preserve endianness,
   758  		// but that's OK. We only care about how many bits are 1, not
   759  		// about the order we discover them in.
   760  		mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))
   761  		count += sys.OnesCount64(mrkBits)
   762  	}
   763  	return count
   764  }
   765  
   766  type writeHeapBits struct {
   767  	addr  uintptr // address that the low bit of mask represents the pointer state of.
   768  	mask  uintptr // some pointer bits starting at the address addr.
   769  	valid uintptr // number of bits in buf that are valid (including low)
   770  	low   uintptr // number of low-order bits to not overwrite
   771  }
   772  
   773  func writeHeapBitsForAddr(addr uintptr) (h writeHeapBits) {
   774  	// We start writing bits maybe in the middle of a heap bitmap word.
   775  	// Remember how many bits into the word we started, so we can be sure
   776  	// not to overwrite the previous bits.
   777  	h.low = addr / goarch.PtrSize % ptrBits
   778  
   779  	// round down to heap word that starts the bitmap word.
   780  	h.addr = addr - h.low*goarch.PtrSize
   781  
   782  	// We don't have any bits yet.
   783  	h.mask = 0
   784  	h.valid = h.low
   785  
   786  	return
   787  }
   788  
   789  // write appends the pointerness of the next valid pointer slots
   790  // using the low valid bits of bits. 1=pointer, 0=scalar.
   791  func (h writeHeapBits) write(bits, valid uintptr) writeHeapBits {
   792  	if h.valid+valid <= ptrBits {
   793  		// Fast path - just accumulate the bits.
   794  		h.mask |= bits << h.valid
   795  		h.valid += valid
   796  		return h
   797  	}
   798  	// Too many bits to fit in this word. Write the current word
   799  	// out and move on to the next word.
   800  
   801  	data := h.mask | bits<<h.valid       // mask for this word
   802  	h.mask = bits >> (ptrBits - h.valid) // leftover for next word
   803  	h.valid += valid - ptrBits           // have h.valid+valid bits, writing ptrBits of them
   804  
   805  	// Flush mask to the memory bitmap.
   806  	// TODO: figure out how to cache arena lookup.
   807  	ai := arenaIndex(h.addr)
   808  	ha := mheap_.arenas[ai.l1()][ai.l2()]
   809  	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
   810  	m := uintptr(1)<<h.low - 1
   811  	ha.bitmap[idx] = ha.bitmap[idx]&m | data
   812  	// Note: no synchronization required for this write because
   813  	// the allocator has exclusive access to the page, and the bitmap
   814  	// entries are all for a single page. Also, visibility of these
   815  	// writes is guaranteed by the publication barrier in mallocgc.
   816  
   817  	// Clear noMorePtrs bit, since we're going to be writing bits
   818  	// into the following word.
   819  	ha.noMorePtrs[idx/8] &^= uint8(1) << (idx % 8)
   820  	// Note: same as above
   821  
   822  	// Move to next word of bitmap.
   823  	h.addr += ptrBits * goarch.PtrSize
   824  	h.low = 0
   825  	return h
   826  }
   827  
   828  // Add padding of size bytes.
   829  func (h writeHeapBits) pad(size uintptr) writeHeapBits {
   830  	if size == 0 {
   831  		return h
   832  	}
   833  	words := size / goarch.PtrSize
   834  	for words > ptrBits {
   835  		h = h.write(0, ptrBits)
   836  		words -= ptrBits
   837  	}
   838  	return h.write(0, words)
   839  }
   840  
   841  // Flush the bits that have been written, and add zeros as needed
   842  // to cover the full object [addr, addr+size).
   843  func (h writeHeapBits) flush(addr, size uintptr) {
   844  	// zeros counts the number of bits needed to represent the object minus the
   845  	// number of bits we've already written. This is the number of 0 bits
   846  	// that need to be added.
   847  	zeros := (addr+size-h.addr)/goarch.PtrSize - h.valid
   848  
   849  	// Add zero bits up to the bitmap word boundary
   850  	if zeros > 0 {
   851  		z := ptrBits - h.valid
   852  		if z > zeros {
   853  			z = zeros
   854  		}
   855  		h.valid += z
   856  		zeros -= z
   857  	}
   858  
   859  	// Find word in bitmap that we're going to write.
   860  	ai := arenaIndex(h.addr)
   861  	ha := mheap_.arenas[ai.l1()][ai.l2()]
   862  	idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
   863  
   864  	// Write remaining bits.
   865  	if h.valid != h.low {
   866  		m := uintptr(1)<<h.low - 1      // don't clear existing bits below "low"
   867  		m |= ^(uintptr(1)<<h.valid - 1) // don't clear existing bits above "valid"
   868  		ha.bitmap[idx] = ha.bitmap[idx]&m | h.mask
   869  	}
   870  	if zeros == 0 {
   871  		return
   872  	}
   873  
   874  	// Record in the noMorePtrs map that there won't be any more 1 bits,
   875  	// so readers can stop early.
   876  	ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
   877  
   878  	// Advance to next bitmap word.
   879  	h.addr += ptrBits * goarch.PtrSize
   880  
   881  	// Continue on writing zeros for the rest of the object.
   882  	// For standard use of the ptr bits this is not required, as
   883  	// the bits are read from the beginning of the object. Some uses,
   884  	// like noscan spans, oblets, bulk write barriers, and cgocheck, might
   885  	// start mid-object, so these writes are still required.
   886  	for {
   887  		// Write zero bits.
   888  		ai := arenaIndex(h.addr)
   889  		ha := mheap_.arenas[ai.l1()][ai.l2()]
   890  		idx := h.addr / (ptrBits * goarch.PtrSize) % heapArenaBitmapWords
   891  		if zeros < ptrBits {
   892  			ha.bitmap[idx] &^= uintptr(1)<<zeros - 1
   893  			break
   894  		} else if zeros == ptrBits {
   895  			ha.bitmap[idx] = 0
   896  			break
   897  		} else {
   898  			ha.bitmap[idx] = 0
   899  			zeros -= ptrBits
   900  		}
   901  		ha.noMorePtrs[idx/8] |= uint8(1) << (idx % 8)
   902  		h.addr += ptrBits * goarch.PtrSize
   903  	}
   904  }
   905  
   906  // Read the bytes starting at the aligned pointer p into a uintptr.
   907  // Read is little-endian.
   908  func readUintptr(p *byte) uintptr {
   909  	x := *(*uintptr)(unsafe.Pointer(p))
   910  	if goarch.BigEndian {
   911  		if goarch.PtrSize == 8 {
   912  			return uintptr(sys.Bswap64(uint64(x)))
   913  		}
   914  		return uintptr(sys.Bswap32(uint32(x)))
   915  	}
   916  	return x
   917  }
   918  
   919  // heapBitsSetType records that the new allocation [x, x+size)
   920  // holds in [x, x+dataSize) one or more values of type typ.
   921  // (The number of values is given by dataSize / typ.size.)
   922  // If dataSize < size, the fragment [x+dataSize, x+size) is
   923  // recorded as non-pointer data.
   924  // It is known that the type has pointers somewhere;
   925  // malloc does not call heapBitsSetType when there are no pointers,
   926  // because all free objects are marked as noscan during
   927  // heapBitsSweepSpan.
   928  //
   929  // There can only be one allocation from a given span active at a time,
   930  // and the bitmap for a span always falls on word boundaries,
   931  // so there are no write-write races for access to the heap bitmap.
   932  // Hence, heapBitsSetType can access the bitmap without atomics.
   933  //
   934  // There can be read-write races between heapBitsSetType and things
   935  // that read the heap bitmap like scanobject. However, since
   936  // heapBitsSetType is only used for objects that have not yet been
   937  // made reachable, readers will ignore bits being modified by this
   938  // function. This does mean this function cannot transiently modify
   939  // bits that belong to neighboring objects. Also, on weakly-ordered
   940  // machines, callers must execute a store/store (publication) barrier
   941  // between calling this function and making the object reachable.
   942  func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
   943  	const doubleCheck = false // slow but helpful; enable to test modifications to this code
   944  
   945  	if doubleCheck && dataSize%typ.size != 0 {
   946  		throw("heapBitsSetType: dataSize not a multiple of typ.size")
   947  	}
   948  
   949  	if goarch.PtrSize == 8 && size == goarch.PtrSize {
   950  		// It's one word and it has pointers, it must be a pointer.
   951  		// Since all allocated one-word objects are pointers
   952  		// (non-pointers are aggregated into tinySize allocations),
   953  		// (*mspan).initHeapBits sets the pointer bits for us.
   954  		// Nothing to do here.
   955  		if doubleCheck {
   956  			h, addr := heapBitsForAddr(x, size).next()
   957  			if addr != x {
   958  				throw("heapBitsSetType: pointer bit missing")
   959  			}
   960  			_, addr = h.next()
   961  			if addr != 0 {
   962  				throw("heapBitsSetType: second pointer bit found")
   963  			}
   964  		}
   965  		return
   966  	}
   967  
   968  	h := writeHeapBitsForAddr(x)
   969  
   970  	// Handle GC program.
   971  	if typ.kind&kindGCProg != 0 {
   972  		// Expand the gc program into the storage we're going to use for the actual object.
   973  		obj := (*uint8)(unsafe.Pointer(x))
   974  		n := runGCProg(addb(typ.gcdata, 4), obj)
   975  		// Use the expanded program to set the heap bits.
   976  		for i := uintptr(0); true; i += typ.size {
   977  			// Copy expanded program to heap bitmap.
   978  			p := obj
   979  			j := n
   980  			for j > 8 {
   981  				h = h.write(uintptr(*p), 8)
   982  				p = add1(p)
   983  				j -= 8
   984  			}
   985  			h = h.write(uintptr(*p), j)
   986  
   987  			if i+typ.size == dataSize {
   988  				break // no padding after last element
   989  			}
   990  
   991  			// Pad with zeros to the start of the next element.
   992  			h = h.pad(typ.size - n*goarch.PtrSize)
   993  		}
   994  
   995  		h.flush(x, size)
   996  
   997  		// Erase the expanded GC program.
   998  		memclrNoHeapPointers(unsafe.Pointer(obj), (n+7)/8)
   999  		return
  1000  	}
  1001  
  1002  	// Note about sizes:
  1003  	//
  1004  	// typ.size is the number of words in the object,
  1005  	// and typ.ptrdata is the number of words in the prefix
  1006  	// of the object that contains pointers. That is, the final
  1007  	// typ.size - typ.ptrdata words contain no pointers.
  1008  	// This allows optimization of a common pattern where
  1009  	// an object has a small header followed by a large scalar
  1010  	// buffer. If we know the pointers are over, we don't have
  1011  	// to scan the buffer's heap bitmap at all.
  1012  	// The 1-bit ptrmasks are sized to contain only bits for
  1013  	// the typ.ptrdata prefix, zero padded out to a full byte
  1014  	// of bitmap. If there is more room in the allocated object,
  1015  	// that space is pointerless. The noMorePtrs bitmap will prevent
  1016  	// scanning large pointerless tails of an object.
  1017  	//
  1018  	// Replicated copies are not as nice: if there is an array of
  1019  	// objects with scalar tails, all but the last tail does have to
  1020  	// be initialized, because there is no way to say "skip forward".
  1021  
  1022  	ptrs := typ.ptrdata / goarch.PtrSize
  1023  	if typ.size == dataSize { // Single element
  1024  		if ptrs <= ptrBits { // Single small element
  1025  			m := readUintptr(typ.gcdata)
  1026  			h = h.write(m, ptrs)
  1027  		} else { // Single large element
  1028  			p := typ.gcdata
  1029  			for {
  1030  				h = h.write(readUintptr(p), ptrBits)
  1031  				p = addb(p, ptrBits/8)
  1032  				ptrs -= ptrBits
  1033  				if ptrs <= ptrBits {
  1034  					break
  1035  				}
  1036  			}
  1037  			m := readUintptr(p)
  1038  			h = h.write(m, ptrs)
  1039  		}
  1040  	} else { // Repeated element
  1041  		words := typ.size / goarch.PtrSize // total words, including scalar tail
  1042  		if words <= ptrBits {              // Repeated small element
  1043  			n := dataSize / typ.size
  1044  			m := readUintptr(typ.gcdata)
  1045  			// Make larger unit to repeat
  1046  			for words <= ptrBits/2 {
  1047  				if n&1 != 0 {
  1048  					h = h.write(m, words)
  1049  				}
  1050  				n /= 2
  1051  				m |= m << words
  1052  				ptrs += words
  1053  				words *= 2
  1054  				if n == 1 {
  1055  					break
  1056  				}
  1057  			}
  1058  			for n > 1 {
  1059  				h = h.write(m, words)
  1060  				n--
  1061  			}
  1062  			h = h.write(m, ptrs)
  1063  		} else { // Repeated large element
  1064  			for i := uintptr(0); true; i += typ.size {
  1065  				p := typ.gcdata
  1066  				j := ptrs
  1067  				for j > ptrBits {
  1068  					h = h.write(readUintptr(p), ptrBits)
  1069  					p = addb(p, ptrBits/8)
  1070  					j -= ptrBits
  1071  				}
  1072  				m := readUintptr(p)
  1073  				h = h.write(m, j)
  1074  				if i+typ.size == dataSize {
  1075  					break // don't need the trailing nonptr bits on the last element.
  1076  				}
  1077  				// Pad with zeros to the start of the next element.
  1078  				h = h.pad(typ.size - typ.ptrdata)
  1079  			}
  1080  		}
  1081  	}
  1082  	h.flush(x, size)
  1083  
  1084  	if doubleCheck {
  1085  		h := heapBitsForAddr(x, size)
  1086  		for i := uintptr(0); i < size; i += goarch.PtrSize {
  1087  			// Compute the pointer bit we want at offset i.
  1088  			want := false
  1089  			if i < dataSize {
  1090  				off := i % typ.size
  1091  				if off < typ.ptrdata {
  1092  					j := off / goarch.PtrSize
  1093  					want = *addb(typ.gcdata, j/8)>>(j%8)&1 != 0
  1094  				}
  1095  			}
  1096  			if want {
  1097  				var addr uintptr
  1098  				h, addr = h.next()
  1099  				if addr != x+i {
  1100  					throw("heapBitsSetType: pointer entry not correct")
  1101  				}
  1102  			}
  1103  		}
  1104  		if _, addr := h.next(); addr != 0 {
  1105  			throw("heapBitsSetType: extra pointer")
  1106  		}
  1107  	}
  1108  }
  1109  
  1110  var debugPtrmask struct {
  1111  	lock mutex
  1112  	data *byte
  1113  }
  1114  
  1115  // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
  1116  // size the size of the region described by prog, in bytes.
  1117  // The resulting bitvector will have no more than size/goarch.PtrSize bits.
  1118  func progToPointerMask(prog *byte, size uintptr) bitvector {
  1119  	n := (size/goarch.PtrSize + 7) / 8
  1120  	x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
  1121  	x[len(x)-1] = 0xa1 // overflow check sentinel
  1122  	n = runGCProg(prog, &x[0])
  1123  	if x[len(x)-1] != 0xa1 {
  1124  		throw("progToPointerMask: overflow")
  1125  	}
  1126  	return bitvector{int32(n), &x[0]}
  1127  }
  1128  
  1129  // Packed GC pointer bitmaps, aka GC programs.
  1130  //
  1131  // For large types containing arrays, the type information has a
  1132  // natural repetition that can be encoded to save space in the
  1133  // binary and in the memory representation of the type information.
  1134  //
  1135  // The encoding is a simple Lempel-Ziv style bytecode machine
  1136  // with the following instructions:
  1137  //
  1138  //	00000000: stop
  1139  //	0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
  1140  //	10000000 n c: repeat the previous n bits c times; n, c are varints
  1141  //	1nnnnnnn c: repeat the previous n bits c times; c is a varint
  1142  
  1143  // runGCProg returns the number of 1-bit entries written to memory.
  1144  func runGCProg(prog, dst *byte) uintptr {
  1145  	dstStart := dst
  1146  
  1147  	// Bits waiting to be written to memory.
  1148  	var bits uintptr
  1149  	var nbits uintptr
  1150  
  1151  	p := prog
  1152  Run:
  1153  	for {
  1154  		// Flush accumulated full bytes.
  1155  		// The rest of the loop assumes that nbits <= 7.
  1156  		for ; nbits >= 8; nbits -= 8 {
  1157  			*dst = uint8(bits)
  1158  			dst = add1(dst)
  1159  			bits >>= 8
  1160  		}
  1161  
  1162  		// Process one instruction.
  1163  		inst := uintptr(*p)
  1164  		p = add1(p)
  1165  		n := inst & 0x7F
  1166  		if inst&0x80 == 0 {
  1167  			// Literal bits; n == 0 means end of program.
  1168  			if n == 0 {
  1169  				// Program is over.
  1170  				break Run
  1171  			}
  1172  			nbyte := n / 8
  1173  			for i := uintptr(0); i < nbyte; i++ {
  1174  				bits |= uintptr(*p) << nbits
  1175  				p = add1(p)
  1176  				*dst = uint8(bits)
  1177  				dst = add1(dst)
  1178  				bits >>= 8
  1179  			}
  1180  			if n %= 8; n > 0 {
  1181  				bits |= uintptr(*p) << nbits
  1182  				p = add1(p)
  1183  				nbits += n
  1184  			}
  1185  			continue Run
  1186  		}
  1187  
  1188  		// Repeat. If n == 0, it is encoded in a varint in the next bytes.
  1189  		if n == 0 {
  1190  			for off := uint(0); ; off += 7 {
  1191  				x := uintptr(*p)
  1192  				p = add1(p)
  1193  				n |= (x & 0x7F) << off
  1194  				if x&0x80 == 0 {
  1195  					break
  1196  				}
  1197  			}
  1198  		}
  1199  
  1200  		// Count is encoded in a varint in the next bytes.
  1201  		c := uintptr(0)
  1202  		for off := uint(0); ; off += 7 {
  1203  			x := uintptr(*p)
  1204  			p = add1(p)
  1205  			c |= (x & 0x7F) << off
  1206  			if x&0x80 == 0 {
  1207  				break
  1208  			}
  1209  		}
  1210  		c *= n // now total number of bits to copy
  1211  
  1212  		// If the number of bits being repeated is small, load them
  1213  		// into a register and use that register for the entire loop
  1214  		// instead of repeatedly reading from memory.
  1215  		// Handling fewer than 8 bits here makes the general loop simpler.
  1216  		// The cutoff is goarch.PtrSize*8 - 7 to guarantee that when we add
  1217  		// the pattern to a bit buffer holding at most 7 bits (a partial byte)
  1218  		// it will not overflow.
  1219  		src := dst
  1220  		const maxBits = goarch.PtrSize*8 - 7
  1221  		if n <= maxBits {
  1222  			// Start with bits in output buffer.
  1223  			pattern := bits
  1224  			npattern := nbits
  1225  
  1226  			// If we need more bits, fetch them from memory.
  1227  			src = subtract1(src)
  1228  			for npattern < n {
  1229  				pattern <<= 8
  1230  				pattern |= uintptr(*src)
  1231  				src = subtract1(src)
  1232  				npattern += 8
  1233  			}
  1234  
  1235  			// We started with the whole bit output buffer,
  1236  			// and then we loaded bits from whole bytes.
  1237  			// Either way, we might now have too many instead of too few.
  1238  			// Discard the extra.
  1239  			if npattern > n {
  1240  				pattern >>= npattern - n
  1241  				npattern = n
  1242  			}
  1243  
  1244  			// Replicate pattern to at most maxBits.
  1245  			if npattern == 1 {
  1246  				// One bit being repeated.
  1247  				// If the bit is 1, make the pattern all 1s.
  1248  				// If the bit is 0, the pattern is already all 0s,
  1249  				// but we can claim that the number of bits
  1250  				// in the word is equal to the number we need (c),
  1251  				// because right shift of bits will zero fill.
  1252  				if pattern == 1 {
  1253  					pattern = 1<<maxBits - 1
  1254  					npattern = maxBits
  1255  				} else {
  1256  					npattern = c
  1257  				}
  1258  			} else {
  1259  				b := pattern
  1260  				nb := npattern
  1261  				if nb+nb <= maxBits {
  1262  					// Double pattern until the whole uintptr is filled.
  1263  					for nb <= goarch.PtrSize*8 {
  1264  						b |= b << nb
  1265  						nb += nb
  1266  					}
  1267  					// Trim away incomplete copy of original pattern in high bits.
  1268  					// TODO(rsc): Replace with table lookup or loop on systems without divide?
  1269  					nb = maxBits / npattern * npattern
  1270  					b &= 1<<nb - 1
  1271  					pattern = b
  1272  					npattern = nb
  1273  				}
  1274  			}
  1275  
  1276  			// Add pattern to bit buffer and flush bit buffer, c/npattern times.
  1277  			// Since pattern contains >8 bits, there will be full bytes to flush
  1278  			// on each iteration.
  1279  			for ; c >= npattern; c -= npattern {
  1280  				bits |= pattern << nbits
  1281  				nbits += npattern
  1282  				for nbits >= 8 {
  1283  					*dst = uint8(bits)
  1284  					dst = add1(dst)
  1285  					bits >>= 8
  1286  					nbits -= 8
  1287  				}
  1288  			}
  1289  
  1290  			// Add final fragment to bit buffer.
  1291  			if c > 0 {
  1292  				pattern &= 1<<c - 1
  1293  				bits |= pattern << nbits
  1294  				nbits += c
  1295  			}
  1296  			continue Run
  1297  		}
  1298  
  1299  		// Repeat; n too large to fit in a register.
  1300  		// Since nbits <= 7, we know the first few bytes of repeated data
  1301  		// are already written to memory.
  1302  		off := n - nbits // n > nbits because n > maxBits and nbits <= 7
  1303  		// Leading src fragment.
  1304  		src = subtractb(src, (off+7)/8)
  1305  		if frag := off & 7; frag != 0 {
  1306  			bits |= uintptr(*src) >> (8 - frag) << nbits
  1307  			src = add1(src)
  1308  			nbits += frag
  1309  			c -= frag
  1310  		}
  1311  		// Main loop: load one byte, write another.
  1312  		// The bits are rotating through the bit buffer.
  1313  		for i := c / 8; i > 0; i-- {
  1314  			bits |= uintptr(*src) << nbits
  1315  			src = add1(src)
  1316  			*dst = uint8(bits)
  1317  			dst = add1(dst)
  1318  			bits >>= 8
  1319  		}
  1320  		// Final src fragment.
  1321  		if c %= 8; c > 0 {
  1322  			bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1323  			nbits += c
  1324  		}
  1325  	}
  1326  
  1327  	// Write any final bits out, using full-byte writes, even for the final byte.
  1328  	totalBits := (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
  1329  	nbits += -nbits & 7
  1330  	for ; nbits > 0; nbits -= 8 {
  1331  		*dst = uint8(bits)
  1332  		dst = add1(dst)
  1333  		bits >>= 8
  1334  	}
  1335  	return totalBits
  1336  }
  1337  
  1338  // materializeGCProg allocates space for the (1-bit) pointer bitmask
  1339  // for an object of size ptrdata.  Then it fills that space with the
  1340  // pointer bitmask specified by the program prog.
  1341  // The bitmask starts at s.startAddr.
  1342  // The result must be deallocated with dematerializeGCProg.
  1343  func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
  1344  	// Each word of ptrdata needs one bit in the bitmap.
  1345  	bitmapBytes := divRoundUp(ptrdata, 8*goarch.PtrSize)
  1346  	// Compute the number of pages needed for bitmapBytes.
  1347  	pages := divRoundUp(bitmapBytes, pageSize)
  1348  	s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
  1349  	runGCProg(addb(prog, 4), (*byte)(unsafe.Pointer(s.startAddr)))
  1350  	return s
  1351  }
  1352  func dematerializeGCProg(s *mspan) {
  1353  	mheap_.freeManual(s, spanAllocPtrScalarBits)
  1354  }
  1355  
  1356  func dumpGCProg(p *byte) {
  1357  	nptr := 0
  1358  	for {
  1359  		x := *p
  1360  		p = add1(p)
  1361  		if x == 0 {
  1362  			print("\t", nptr, " end\n")
  1363  			break
  1364  		}
  1365  		if x&0x80 == 0 {
  1366  			print("\t", nptr, " lit ", x, ":")
  1367  			n := int(x+7) / 8
  1368  			for i := 0; i < n; i++ {
  1369  				print(" ", hex(*p))
  1370  				p = add1(p)
  1371  			}
  1372  			print("\n")
  1373  			nptr += int(x)
  1374  		} else {
  1375  			nbit := int(x &^ 0x80)
  1376  			if nbit == 0 {
  1377  				for nb := uint(0); ; nb += 7 {
  1378  					x := *p
  1379  					p = add1(p)
  1380  					nbit |= int(x&0x7f) << nb
  1381  					if x&0x80 == 0 {
  1382  						break
  1383  					}
  1384  				}
  1385  			}
  1386  			count := 0
  1387  			for nb := uint(0); ; nb += 7 {
  1388  				x := *p
  1389  				p = add1(p)
  1390  				count |= int(x&0x7f) << nb
  1391  				if x&0x80 == 0 {
  1392  					break
  1393  				}
  1394  			}
  1395  			print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
  1396  			nptr += nbit * count
  1397  		}
  1398  	}
  1399  }
  1400  
  1401  // Testing.
  1402  
  1403  func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
  1404  	target := (*stkframe)(ctxt)
  1405  	if frame.sp <= target.sp && target.sp < frame.varp {
  1406  		*target = *frame
  1407  		return false
  1408  	}
  1409  	return true
  1410  }
  1411  
  1412  // gcbits returns the GC type info for x, for testing.
  1413  // The result is the bitmap entries (0 or 1), one entry per byte.
  1414  //
  1415  //go:linkname reflect_gcbits reflect.gcbits
  1416  func reflect_gcbits(x any) []byte {
  1417  	return getgcmask(x)
  1418  }
  1419  
  1420  // Returns GC type info for the pointer stored in ep for testing.
  1421  // If ep points to the stack, only static live information will be returned
  1422  // (i.e. not for objects which are only dynamically live stack objects).
  1423  func getgcmask(ep any) (mask []byte) {
  1424  	e := *efaceOf(&ep)
  1425  	p := e.data
  1426  	t := e._type
  1427  	// data or bss
  1428  	for _, datap := range activeModules() {
  1429  		// data
  1430  		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
  1431  			bitmap := datap.gcdatamask.bytedata
  1432  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  1433  			mask = make([]byte, n/goarch.PtrSize)
  1434  			for i := uintptr(0); i < n; i += goarch.PtrSize {
  1435  				off := (uintptr(p) + i - datap.data) / goarch.PtrSize
  1436  				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1437  			}
  1438  			return
  1439  		}
  1440  
  1441  		// bss
  1442  		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
  1443  			bitmap := datap.gcbssmask.bytedata
  1444  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  1445  			mask = make([]byte, n/goarch.PtrSize)
  1446  			for i := uintptr(0); i < n; i += goarch.PtrSize {
  1447  				off := (uintptr(p) + i - datap.bss) / goarch.PtrSize
  1448  				mask[i/goarch.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1449  			}
  1450  			return
  1451  		}
  1452  	}
  1453  
  1454  	// heap
  1455  	if base, s, _ := findObject(uintptr(p), 0, 0); base != 0 {
  1456  		if s.spanclass.noscan() {
  1457  			return nil
  1458  		}
  1459  		n := s.elemsize
  1460  		hbits := heapBitsForAddr(base, n)
  1461  		mask = make([]byte, n/goarch.PtrSize)
  1462  		for {
  1463  			var addr uintptr
  1464  			if hbits, addr = hbits.next(); addr == 0 {
  1465  				break
  1466  			}
  1467  			mask[(addr-base)/goarch.PtrSize] = 1
  1468  		}
  1469  		// Callers expect this mask to end at the last pointer.
  1470  		for len(mask) > 0 && mask[len(mask)-1] == 0 {
  1471  			mask = mask[:len(mask)-1]
  1472  		}
  1473  		return
  1474  	}
  1475  
  1476  	// stack
  1477  	if gp := getg(); gp.m.curg.stack.lo <= uintptr(p) && uintptr(p) < gp.m.curg.stack.hi {
  1478  		var frame stkframe
  1479  		frame.sp = uintptr(p)
  1480  		gentraceback(gp.m.curg.sched.pc, gp.m.curg.sched.sp, 0, gp.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
  1481  		if frame.fn.valid() {
  1482  			locals, _, _ := frame.getStackMap(nil, false)
  1483  			if locals.n == 0 {
  1484  				return
  1485  			}
  1486  			size := uintptr(locals.n) * goarch.PtrSize
  1487  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  1488  			mask = make([]byte, n/goarch.PtrSize)
  1489  			for i := uintptr(0); i < n; i += goarch.PtrSize {
  1490  				off := (uintptr(p) + i - frame.varp + size) / goarch.PtrSize
  1491  				mask[i/goarch.PtrSize] = locals.ptrbit(off)
  1492  			}
  1493  		}
  1494  		return
  1495  	}
  1496  
  1497  	// otherwise, not something the GC knows about.
  1498  	// possibly read-only data, like malloc(0).
  1499  	// must not have pointers
  1500  	return
  1501  }