github.com/goproxy0/go@v0.0.0-20171111080102-49cc0c489d2c/src/runtime/mbitmap.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: type and heap bitmaps.
     6  //
     7  // Stack, data, and bss bitmaps
     8  //
     9  // Stack frames and global variables in the data and bss sections are described
    10  // by 1-bit bitmaps in which 0 means uninteresting and 1 means live pointer
    11  // to be visited during GC. The bits in each byte are consumed starting with
    12  // the low bit: 1<<0, 1<<1, and so on.
    13  //
    14  // Heap bitmap
    15  //
    16  // The allocated heap comes from a subset of the memory in the range [start, used),
    17  // where start == mheap_.arena_start and used == mheap_.arena_used.
    18  // The heap bitmap comprises 2 bits for each pointer-sized word in that range,
    19  // stored in bytes indexed backward in memory from start.
    20  // That is, the byte at address start-1 holds the 2-bit entries for the four words
    21  // start through start+3*ptrSize, the byte at start-2 holds the entries for
    22  // start+4*ptrSize through start+7*ptrSize, and so on.
    23  //
    24  // In each 2-bit entry, the lower bit holds the same information as in the 1-bit
    25  // bitmaps: 0 means uninteresting and 1 means live pointer to be visited during GC.
    26  // The meaning of the high bit depends on the position of the word being described
    27  // in its allocated object. In all words *except* the second word, the
    28  // high bit indicates that the object is still being described. In
    29  // these words, if a bit pair with a high bit 0 is encountered, the
    30  // low bit can also be assumed to be 0, and the object description is
    31  // over. This 00 is called the ``dead'' encoding: it signals that the
    32  // rest of the words in the object are uninteresting to the garbage
    33  // collector.
    34  //
    35  // In the second word, the high bit is the GC ``checkmarked'' bit (see below).
    36  //
    37  // The 2-bit entries are split when written into the byte, so that the top half
    38  // of the byte contains 4 high bits and the bottom half contains 4 low (pointer)
    39  // bits.
    40  // This form allows a copy from the 1-bit to the 4-bit form to keep the
    41  // pointer bits contiguous, instead of having to space them out.
    42  //
    43  // The code makes use of the fact that the zero value for a heap bitmap
    44  // has no live pointer bit set and is (depending on position), not used,
    45  // not checkmarked, and is the dead encoding.
    46  // These properties must be preserved when modifying the encoding.
    47  //
    48  // The bitmap for noscan spans is not maintained. Code must ensure
    49  // that an object is scannable before consulting its bitmap by
    50  // checking either the noscan bit in the span or by consulting its
    51  // type's information.
    52  //
    53  // Checkmarks
    54  //
    55  // In a concurrent garbage collector, one worries about failing to mark
    56  // a live object due to mutations without write barriers or bugs in the
    57  // collector implementation. As a sanity check, the GC has a 'checkmark'
    58  // mode that retraverses the object graph with the world stopped, to make
    59  // sure that everything that should be marked is marked.
    60  // In checkmark mode, in the heap bitmap, the high bit of the 2-bit entry
    61  // for the second word of the object holds the checkmark bit.
    62  // When not in checkmark mode, this bit is set to 1.
    63  //
    64  // The smallest possible allocation is 8 bytes. On a 32-bit machine, that
    65  // means every allocated object has two words, so there is room for the
    66  // checkmark bit. On a 64-bit machine, however, the 8-byte allocation is
    67  // just one word, so the second bit pair is not available for encoding the
    68  // checkmark. However, because non-pointer allocations are combined
    69  // into larger 16-byte (maxTinySize) allocations, a plain 8-byte allocation
    70  // must be a pointer, so the type bit in the first word is not actually needed.
    71  // It is still used in general, except in checkmark the type bit is repurposed
    72  // as the checkmark bit and then reinitialized (to 1) as the type bit when
    73  // finished.
    74  //
    75  
    76  package runtime
    77  
    78  import (
    79  	"runtime/internal/atomic"
    80  	"runtime/internal/sys"
    81  	"unsafe"
    82  )
    83  
    84  const (
    85  	bitPointer = 1 << 0
    86  	bitScan    = 1 << 4
    87  
    88  	heapBitsShift   = 1                     // shift offset between successive bitPointer or bitScan entries
    89  	heapBitmapScale = sys.PtrSize * (8 / 2) // number of data bytes described by one heap bitmap byte
    90  
    91  	// all scan/pointer bits in a byte
    92  	bitScanAll    = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
    93  	bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
    94  )
    95  
    96  // addb returns the byte pointer p+n.
    97  //go:nowritebarrier
    98  //go:nosplit
    99  func addb(p *byte, n uintptr) *byte {
   100  	// Note: wrote out full expression instead of calling add(p, n)
   101  	// to reduce the number of temporaries generated by the
   102  	// compiler for this trivial expression during inlining.
   103  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
   104  }
   105  
   106  // subtractb returns the byte pointer p-n.
   107  // subtractb is typically used when traversing the pointer tables referred to by hbits
   108  // which are arranged in reverse order.
   109  //go:nowritebarrier
   110  //go:nosplit
   111  func subtractb(p *byte, n uintptr) *byte {
   112  	// Note: wrote out full expression instead of calling add(p, -n)
   113  	// to reduce the number of temporaries generated by the
   114  	// compiler for this trivial expression during inlining.
   115  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
   116  }
   117  
   118  // add1 returns the byte pointer p+1.
   119  //go:nowritebarrier
   120  //go:nosplit
   121  func add1(p *byte) *byte {
   122  	// Note: wrote out full expression instead of calling addb(p, 1)
   123  	// to reduce the number of temporaries generated by the
   124  	// compiler for this trivial expression during inlining.
   125  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
   126  }
   127  
   128  // subtract1 returns the byte pointer p-1.
   129  // subtract1 is typically used when traversing the pointer tables referred to by hbits
   130  // which are arranged in reverse order.
   131  //go:nowritebarrier
   132  //
   133  // nosplit because it is used during write barriers and must not be preempted.
   134  //go:nosplit
   135  func subtract1(p *byte) *byte {
   136  	// Note: wrote out full expression instead of calling subtractb(p, 1)
   137  	// to reduce the number of temporaries generated by the
   138  	// compiler for this trivial expression during inlining.
   139  	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
   140  }
   141  
   142  // mapBits maps any additional bitmap memory needed for the new arena memory.
   143  //
   144  // Don't call this directly. Call mheap.setArenaUsed.
   145  //
   146  //go:nowritebarrier
   147  func (h *mheap) mapBits(arena_used uintptr) {
   148  	// Caller has added extra mappings to the arena.
   149  	// Add extra mappings of bitmap words as needed.
   150  	// We allocate extra bitmap pieces in chunks of bitmapChunk.
   151  	const bitmapChunk = 8192
   152  
   153  	n := (arena_used - mheap_.arena_start) / heapBitmapScale
   154  	n = round(n, bitmapChunk)
   155  	n = round(n, physPageSize)
   156  	if h.bitmap_mapped >= n {
   157  		return
   158  	}
   159  
   160  	sysMap(unsafe.Pointer(h.bitmap-n), n-h.bitmap_mapped, h.arena_reserved, &memstats.gc_sys)
   161  	h.bitmap_mapped = n
   162  }
   163  
   164  // heapBits provides access to the bitmap bits for a single heap word.
   165  // The methods on heapBits take value receivers so that the compiler
   166  // can more easily inline calls to those methods and registerize the
   167  // struct fields independently.
   168  type heapBits struct {
   169  	bitp  *uint8
   170  	shift uint32
   171  }
   172  
   173  // markBits provides access to the mark bit for an object in the heap.
   174  // bytep points to the byte holding the mark bit.
   175  // mask is a byte with a single bit set that can be &ed with *bytep
   176  // to see if the bit has been set.
   177  // *m.byte&m.mask != 0 indicates the mark bit is set.
   178  // index can be used along with span information to generate
   179  // the address of the object in the heap.
   180  // We maintain one set of mark bits for allocation and one for
   181  // marking purposes.
   182  type markBits struct {
   183  	bytep *uint8
   184  	mask  uint8
   185  	index uintptr
   186  }
   187  
   188  //go:nosplit
   189  func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
   190  	bytep, mask := s.allocBits.bitp(allocBitIndex)
   191  	return markBits{bytep, mask, allocBitIndex}
   192  }
   193  
   194  // refillaCache takes 8 bytes s.allocBits starting at whichByte
   195  // and negates them so that ctz (count trailing zeros) instructions
   196  // can be used. It then places these 8 bytes into the cached 64 bit
   197  // s.allocCache.
   198  func (s *mspan) refillAllocCache(whichByte uintptr) {
   199  	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
   200  	aCache := uint64(0)
   201  	aCache |= uint64(bytes[0])
   202  	aCache |= uint64(bytes[1]) << (1 * 8)
   203  	aCache |= uint64(bytes[2]) << (2 * 8)
   204  	aCache |= uint64(bytes[3]) << (3 * 8)
   205  	aCache |= uint64(bytes[4]) << (4 * 8)
   206  	aCache |= uint64(bytes[5]) << (5 * 8)
   207  	aCache |= uint64(bytes[6]) << (6 * 8)
   208  	aCache |= uint64(bytes[7]) << (7 * 8)
   209  	s.allocCache = ^aCache
   210  }
   211  
   212  // nextFreeIndex returns the index of the next free object in s at
   213  // or after s.freeindex.
   214  // There are hardware instructions that can be used to make this
   215  // faster if profiling warrants it.
   216  func (s *mspan) nextFreeIndex() uintptr {
   217  	sfreeindex := s.freeindex
   218  	snelems := s.nelems
   219  	if sfreeindex == snelems {
   220  		return sfreeindex
   221  	}
   222  	if sfreeindex > snelems {
   223  		throw("s.freeindex > s.nelems")
   224  	}
   225  
   226  	aCache := s.allocCache
   227  
   228  	bitIndex := sys.Ctz64(aCache)
   229  	for bitIndex == 64 {
   230  		// Move index to start of next cached bits.
   231  		sfreeindex = (sfreeindex + 64) &^ (64 - 1)
   232  		if sfreeindex >= snelems {
   233  			s.freeindex = snelems
   234  			return snelems
   235  		}
   236  		whichByte := sfreeindex / 8
   237  		// Refill s.allocCache with the next 64 alloc bits.
   238  		s.refillAllocCache(whichByte)
   239  		aCache = s.allocCache
   240  		bitIndex = sys.Ctz64(aCache)
   241  		// nothing available in cached bits
   242  		// grab the next 8 bytes and try again.
   243  	}
   244  	result := sfreeindex + uintptr(bitIndex)
   245  	if result >= snelems {
   246  		s.freeindex = snelems
   247  		return snelems
   248  	}
   249  
   250  	s.allocCache >>= uint(bitIndex + 1)
   251  	sfreeindex = result + 1
   252  
   253  	if sfreeindex%64 == 0 && sfreeindex != snelems {
   254  		// We just incremented s.freeindex so it isn't 0.
   255  		// As each 1 in s.allocCache was encountered and used for allocation
   256  		// it was shifted away. At this point s.allocCache contains all 0s.
   257  		// Refill s.allocCache so that it corresponds
   258  		// to the bits at s.allocBits starting at s.freeindex.
   259  		whichByte := sfreeindex / 8
   260  		s.refillAllocCache(whichByte)
   261  	}
   262  	s.freeindex = sfreeindex
   263  	return result
   264  }
   265  
   266  // isFree returns whether the index'th object in s is unallocated.
   267  func (s *mspan) isFree(index uintptr) bool {
   268  	if index < s.freeindex {
   269  		return false
   270  	}
   271  	bytep, mask := s.allocBits.bitp(index)
   272  	return *bytep&mask == 0
   273  }
   274  
   275  func (s *mspan) objIndex(p uintptr) uintptr {
   276  	byteOffset := p - s.base()
   277  	if byteOffset == 0 {
   278  		return 0
   279  	}
   280  	if s.baseMask != 0 {
   281  		// s.baseMask is 0, elemsize is a power of two, so shift by s.divShift
   282  		return byteOffset >> s.divShift
   283  	}
   284  	return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
   285  }
   286  
   287  func markBitsForAddr(p uintptr) markBits {
   288  	s := spanOf(p)
   289  	objIndex := s.objIndex(p)
   290  	return s.markBitsForIndex(objIndex)
   291  }
   292  
   293  func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
   294  	bytep, mask := s.gcmarkBits.bitp(objIndex)
   295  	return markBits{bytep, mask, objIndex}
   296  }
   297  
   298  func (s *mspan) markBitsForBase() markBits {
   299  	return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
   300  }
   301  
   302  // isMarked reports whether mark bit m is set.
   303  func (m markBits) isMarked() bool {
   304  	return *m.bytep&m.mask != 0
   305  }
   306  
   307  // setMarked sets the marked bit in the markbits, atomically. Some compilers
   308  // are not able to inline atomic.Or8 function so if it appears as a hot spot consider
   309  // inlining it manually.
   310  func (m markBits) setMarked() {
   311  	// Might be racing with other updates, so use atomic update always.
   312  	// We used to be clever here and use a non-atomic update in certain
   313  	// cases, but it's not worth the risk.
   314  	atomic.Or8(m.bytep, m.mask)
   315  }
   316  
   317  // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
   318  func (m markBits) setMarkedNonAtomic() {
   319  	*m.bytep |= m.mask
   320  }
   321  
   322  // clearMarked clears the marked bit in the markbits, atomically.
   323  func (m markBits) clearMarked() {
   324  	// Might be racing with other updates, so use atomic update always.
   325  	// We used to be clever here and use a non-atomic update in certain
   326  	// cases, but it's not worth the risk.
   327  	atomic.And8(m.bytep, ^m.mask)
   328  }
   329  
   330  // markBitsForSpan returns the markBits for the span base address base.
   331  func markBitsForSpan(base uintptr) (mbits markBits) {
   332  	if base < mheap_.arena_start || base >= mheap_.arena_used {
   333  		throw("markBitsForSpan: base out of range")
   334  	}
   335  	mbits = markBitsForAddr(base)
   336  	if mbits.mask != 1 {
   337  		throw("markBitsForSpan: unaligned start")
   338  	}
   339  	return mbits
   340  }
   341  
   342  // advance advances the markBits to the next object in the span.
   343  func (m *markBits) advance() {
   344  	if m.mask == 1<<7 {
   345  		m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
   346  		m.mask = 1
   347  	} else {
   348  		m.mask = m.mask << 1
   349  	}
   350  	m.index++
   351  }
   352  
   353  // heapBitsForAddr returns the heapBits for the address addr.
   354  // The caller must have already checked that addr is in the range [mheap_.arena_start, mheap_.arena_used).
   355  //
   356  // nosplit because it is used during write barriers and must not be preempted.
   357  //go:nosplit
   358  func heapBitsForAddr(addr uintptr) heapBits {
   359  	// 2 bits per work, 4 pairs per byte, and a mask is hard coded.
   360  	off := (addr - mheap_.arena_start) / sys.PtrSize
   361  	return heapBits{(*uint8)(unsafe.Pointer(mheap_.bitmap - off/4 - 1)), uint32(off & 3)}
   362  }
   363  
   364  // heapBitsForSpan returns the heapBits for the span base address base.
   365  func heapBitsForSpan(base uintptr) (hbits heapBits) {
   366  	if base < mheap_.arena_start || base >= mheap_.arena_used {
   367  		print("runtime: base ", hex(base), " not in range [", hex(mheap_.arena_start), ",", hex(mheap_.arena_used), ")\n")
   368  		throw("heapBitsForSpan: base out of range")
   369  	}
   370  	return heapBitsForAddr(base)
   371  }
   372  
   373  // heapBitsForObject returns the base address for the heap object
   374  // containing the address p, the heapBits for base,
   375  // the object's span, and of the index of the object in s.
   376  // If p does not point into a heap object,
   377  // return base == 0
   378  // otherwise return the base of the object.
   379  //
   380  // refBase and refOff optionally give the base address of the object
   381  // in which the pointer p was found and the byte offset at which it
   382  // was found. These are used for error reporting.
   383  func heapBitsForObject(p, refBase, refOff uintptr) (base uintptr, hbits heapBits, s *mspan, objIndex uintptr) {
   384  	arenaStart := mheap_.arena_start
   385  	if p < arenaStart || p >= mheap_.arena_used {
   386  		return
   387  	}
   388  	off := p - arenaStart
   389  	idx := off >> _PageShift
   390  	// p points into the heap, but possibly to the middle of an object.
   391  	// Consult the span table to find the block beginning.
   392  	s = mheap_.spans[idx]
   393  	if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
   394  		if s == nil || s.state == _MSpanManual {
   395  			// If s is nil, the virtual address has never been part of the heap.
   396  			// This pointer may be to some mmap'd region, so we allow it.
   397  			// Pointers into stacks are also ok, the runtime manages these explicitly.
   398  			return
   399  		}
   400  
   401  		// The following ensures that we are rigorous about what data
   402  		// structures hold valid pointers.
   403  		if debug.invalidptr != 0 {
   404  			// Typically this indicates an incorrect use
   405  			// of unsafe or cgo to store a bad pointer in
   406  			// the Go heap. It may also indicate a runtime
   407  			// bug.
   408  			//
   409  			// TODO(austin): We could be more aggressive
   410  			// and detect pointers to unallocated objects
   411  			// in allocated spans.
   412  			printlock()
   413  			print("runtime: pointer ", hex(p))
   414  			if s.state != mSpanInUse {
   415  				print(" to unallocated span")
   416  			} else {
   417  				print(" to unused region of span")
   418  			}
   419  			print(" idx=", hex(idx), " span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
   420  			if refBase != 0 {
   421  				print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
   422  				gcDumpObject("object", refBase, refOff)
   423  			}
   424  			getg().m.traceback = 2
   425  			throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
   426  		}
   427  		return
   428  	}
   429  	// If this span holds object of a power of 2 size, just mask off the bits to
   430  	// the interior of the object. Otherwise use the size to get the base.
   431  	if s.baseMask != 0 {
   432  		// optimize for power of 2 sized objects.
   433  		base = s.base()
   434  		base = base + (p-base)&uintptr(s.baseMask)
   435  		objIndex = (base - s.base()) >> s.divShift
   436  		// base = p & s.baseMask is faster for small spans,
   437  		// but doesn't work for large spans.
   438  		// Overall, it's faster to use the more general computation above.
   439  	} else {
   440  		base = s.base()
   441  		if p-base >= s.elemsize {
   442  			// n := (p - base) / s.elemsize, using division by multiplication
   443  			objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
   444  			base += objIndex * s.elemsize
   445  		}
   446  	}
   447  	// Now that we know the actual base, compute heapBits to return to caller.
   448  	hbits = heapBitsForAddr(base)
   449  	return
   450  }
   451  
   452  // next returns the heapBits describing the next pointer-sized word in memory.
   453  // That is, if h describes address p, h.next() describes p+ptrSize.
   454  // Note that next does not modify h. The caller must record the result.
   455  //
   456  // nosplit because it is used during write barriers and must not be preempted.
   457  //go:nosplit
   458  func (h heapBits) next() heapBits {
   459  	if h.shift < 3*heapBitsShift {
   460  		return heapBits{h.bitp, h.shift + heapBitsShift}
   461  	}
   462  	return heapBits{subtract1(h.bitp), 0}
   463  }
   464  
   465  // forward returns the heapBits describing n pointer-sized words ahead of h in memory.
   466  // That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
   467  // h.forward(1) is equivalent to h.next(), just slower.
   468  // Note that forward does not modify h. The caller must record the result.
   469  // bits returns the heap bits for the current word.
   470  func (h heapBits) forward(n uintptr) heapBits {
   471  	n += uintptr(h.shift) / heapBitsShift
   472  	return heapBits{subtractb(h.bitp, n/4), uint32(n%4) * heapBitsShift}
   473  }
   474  
   475  // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
   476  // The result includes in its higher bits the bits for subsequent words
   477  // described by the same bitmap byte.
   478  func (h heapBits) bits() uint32 {
   479  	// The (shift & 31) eliminates a test and conditional branch
   480  	// from the generated code.
   481  	return uint32(*h.bitp) >> (h.shift & 31)
   482  }
   483  
   484  // morePointers returns true if this word and all remaining words in this object
   485  // are scalars.
   486  // h must not describe the second word of the object.
   487  func (h heapBits) morePointers() bool {
   488  	return h.bits()&bitScan != 0
   489  }
   490  
   491  // isPointer reports whether the heap bits describe a pointer word.
   492  //
   493  // nosplit because it is used during write barriers and must not be preempted.
   494  //go:nosplit
   495  func (h heapBits) isPointer() bool {
   496  	return h.bits()&bitPointer != 0
   497  }
   498  
   499  // isCheckmarked reports whether the heap bits have the checkmarked bit set.
   500  // It must be told how large the object at h is, because the encoding of the
   501  // checkmark bit varies by size.
   502  // h must describe the initial word of the object.
   503  func (h heapBits) isCheckmarked(size uintptr) bool {
   504  	if size == sys.PtrSize {
   505  		return (*h.bitp>>h.shift)&bitPointer != 0
   506  	}
   507  	// All multiword objects are 2-word aligned,
   508  	// so we know that the initial word's 2-bit pair
   509  	// and the second word's 2-bit pair are in the
   510  	// same heap bitmap byte, *h.bitp.
   511  	return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
   512  }
   513  
   514  // setCheckmarked sets the checkmarked bit.
   515  // It must be told how large the object at h is, because the encoding of the
   516  // checkmark bit varies by size.
   517  // h must describe the initial word of the object.
   518  func (h heapBits) setCheckmarked(size uintptr) {
   519  	if size == sys.PtrSize {
   520  		atomic.Or8(h.bitp, bitPointer<<h.shift)
   521  		return
   522  	}
   523  	atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
   524  }
   525  
   526  // bulkBarrierPreWrite executes a write barrier
   527  // for every pointer slot in the memory range [src, src+size),
   528  // using pointer/scalar information from [dst, dst+size).
   529  // This executes the write barriers necessary before a memmove.
   530  // src, dst, and size must be pointer-aligned.
   531  // The range [dst, dst+size) must lie within a single object.
   532  // It does not perform the actual writes.
   533  //
   534  // As a special case, src == 0 indicates that this is being used for a
   535  // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
   536  // barrier.
   537  //
   538  // Callers should call bulkBarrierPreWrite immediately before
   539  // calling memmove(dst, src, size). This function is marked nosplit
   540  // to avoid being preempted; the GC must not stop the goroutine
   541  // between the memmove and the execution of the barriers.
   542  // The caller is also responsible for cgo pointer checks if this
   543  // may be writing Go pointers into non-Go memory.
   544  //
   545  // The pointer bitmap is not maintained for allocations containing
   546  // no pointers at all; any caller of bulkBarrierPreWrite must first
   547  // make sure the underlying allocation contains pointers, usually
   548  // by checking typ.kind&kindNoPointers.
   549  //
   550  //go:nosplit
   551  func bulkBarrierPreWrite(dst, src, size uintptr) {
   552  	if (dst|src|size)&(sys.PtrSize-1) != 0 {
   553  		throw("bulkBarrierPreWrite: unaligned arguments")
   554  	}
   555  	if !writeBarrier.needed {
   556  		return
   557  	}
   558  	if !inheap(dst) {
   559  		gp := getg().m.curg
   560  		if gp != nil && gp.stack.lo <= dst && dst < gp.stack.hi {
   561  			// Destination is our own stack. No need for barriers.
   562  			return
   563  		}
   564  
   565  		// If dst is a global, use the data or BSS bitmaps to
   566  		// execute write barriers.
   567  		for _, datap := range activeModules() {
   568  			if datap.data <= dst && dst < datap.edata {
   569  				bulkBarrierBitmap(dst, src, size, dst-datap.data, datap.gcdatamask.bytedata)
   570  				return
   571  			}
   572  		}
   573  		for _, datap := range activeModules() {
   574  			if datap.bss <= dst && dst < datap.ebss {
   575  				bulkBarrierBitmap(dst, src, size, dst-datap.bss, datap.gcbssmask.bytedata)
   576  				return
   577  			}
   578  		}
   579  		return
   580  	}
   581  
   582  	buf := &getg().m.p.ptr().wbBuf
   583  	h := heapBitsForAddr(dst)
   584  	if src == 0 {
   585  		for i := uintptr(0); i < size; i += sys.PtrSize {
   586  			if h.isPointer() {
   587  				dstx := (*uintptr)(unsafe.Pointer(dst + i))
   588  				if !buf.putFast(*dstx, 0) {
   589  					wbBufFlush(nil, 0)
   590  				}
   591  			}
   592  			h = h.next()
   593  		}
   594  	} else {
   595  		for i := uintptr(0); i < size; i += sys.PtrSize {
   596  			if h.isPointer() {
   597  				dstx := (*uintptr)(unsafe.Pointer(dst + i))
   598  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   599  				if !buf.putFast(*dstx, *srcx) {
   600  					wbBufFlush(nil, 0)
   601  				}
   602  			}
   603  			h = h.next()
   604  		}
   605  	}
   606  }
   607  
   608  // bulkBarrierBitmap executes write barriers for copying from [src,
   609  // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
   610  // assumed to start maskOffset bytes into the data covered by the
   611  // bitmap in bits (which may not be a multiple of 8).
   612  //
   613  // This is used by bulkBarrierPreWrite for writes to data and BSS.
   614  //
   615  //go:nosplit
   616  func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
   617  	word := maskOffset / sys.PtrSize
   618  	bits = addb(bits, word/8)
   619  	mask := uint8(1) << (word % 8)
   620  
   621  	buf := &getg().m.p.ptr().wbBuf
   622  	for i := uintptr(0); i < size; i += sys.PtrSize {
   623  		if mask == 0 {
   624  			bits = addb(bits, 1)
   625  			if *bits == 0 {
   626  				// Skip 8 words.
   627  				i += 7 * sys.PtrSize
   628  				continue
   629  			}
   630  			mask = 1
   631  		}
   632  		if *bits&mask != 0 {
   633  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   634  			if src == 0 {
   635  				if !buf.putFast(*dstx, 0) {
   636  					wbBufFlush(nil, 0)
   637  				}
   638  			} else {
   639  				srcx := (*uintptr)(unsafe.Pointer(src + i))
   640  				if !buf.putFast(*dstx, *srcx) {
   641  					wbBufFlush(nil, 0)
   642  				}
   643  			}
   644  		}
   645  		mask <<= 1
   646  	}
   647  }
   648  
   649  // typeBitsBulkBarrier executes writebarrierptr_prewrite for every
   650  // pointer that would be copied from [src, src+size) to [dst,
   651  // dst+size) by a memmove using the type bitmap to locate those
   652  // pointer slots.
   653  //
   654  // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
   655  // dst, src, and size must be pointer-aligned.
   656  // The type typ must have a plain bitmap, not a GC program.
   657  // The only use of this function is in channel sends, and the
   658  // 64 kB channel element limit takes care of this for us.
   659  //
   660  // Must not be preempted because it typically runs right before memmove,
   661  // and the GC must observe them as an atomic action.
   662  //
   663  //go:nosplit
   664  func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
   665  	if typ == nil {
   666  		throw("runtime: typeBitsBulkBarrier without type")
   667  	}
   668  	if typ.size != size {
   669  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " of size ", typ.size, " but memory size", size)
   670  		throw("runtime: invalid typeBitsBulkBarrier")
   671  	}
   672  	if typ.kind&kindGCProg != 0 {
   673  		println("runtime: typeBitsBulkBarrier with type ", typ.string(), " with GC prog")
   674  		throw("runtime: invalid typeBitsBulkBarrier")
   675  	}
   676  	if !writeBarrier.needed {
   677  		return
   678  	}
   679  	ptrmask := typ.gcdata
   680  	var bits uint32
   681  	for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
   682  		if i&(sys.PtrSize*8-1) == 0 {
   683  			bits = uint32(*ptrmask)
   684  			ptrmask = addb(ptrmask, 1)
   685  		} else {
   686  			bits = bits >> 1
   687  		}
   688  		if bits&1 != 0 {
   689  			dstx := (*uintptr)(unsafe.Pointer(dst + i))
   690  			srcx := (*uintptr)(unsafe.Pointer(src + i))
   691  			writebarrierptr_prewrite(dstx, *srcx)
   692  		}
   693  	}
   694  }
   695  
   696  // The methods operating on spans all require that h has been returned
   697  // by heapBitsForSpan and that size, n, total are the span layout description
   698  // returned by the mspan's layout method.
   699  // If total > size*n, it means that there is extra leftover memory in the span,
   700  // usually due to rounding.
   701  //
   702  // TODO(rsc): Perhaps introduce a different heapBitsSpan type.
   703  
   704  // initSpan initializes the heap bitmap for a span.
   705  // It clears all checkmark bits.
   706  // If this is a span of pointer-sized objects, it initializes all
   707  // words to pointer/scan.
   708  // Otherwise, it initializes all words to scalar/dead.
   709  func (h heapBits) initSpan(s *mspan) {
   710  	size, n, total := s.layout()
   711  
   712  	// Init the markbit structures
   713  	s.freeindex = 0
   714  	s.allocCache = ^uint64(0) // all 1s indicating all free.
   715  	s.nelems = n
   716  	s.allocBits = nil
   717  	s.gcmarkBits = nil
   718  	s.gcmarkBits = newMarkBits(s.nelems)
   719  	s.allocBits = newAllocBits(s.nelems)
   720  
   721  	// Clear bits corresponding to objects.
   722  	if total%heapBitmapScale != 0 {
   723  		throw("initSpan: unaligned length")
   724  	}
   725  	nbyte := total / heapBitmapScale
   726  	if sys.PtrSize == 8 && size == sys.PtrSize {
   727  		end := h.bitp
   728  		bitp := subtractb(end, nbyte-1)
   729  		for {
   730  			*bitp = bitPointerAll | bitScanAll
   731  			if bitp == end {
   732  				break
   733  			}
   734  			bitp = add1(bitp)
   735  		}
   736  		return
   737  	}
   738  	memclrNoHeapPointers(unsafe.Pointer(subtractb(h.bitp, nbyte-1)), nbyte)
   739  }
   740  
   741  // initCheckmarkSpan initializes a span for being checkmarked.
   742  // It clears the checkmark bits, which are set to 1 in normal operation.
   743  func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
   744  	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
   745  	if sys.PtrSize == 8 && size == sys.PtrSize {
   746  		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
   747  		// Only possible on 64-bit system, since minimum size is 8.
   748  		// Must clear type bit (checkmark bit) of every word.
   749  		// The type bit is the lower of every two-bit pair.
   750  		bitp := h.bitp
   751  		for i := uintptr(0); i < n; i += 4 {
   752  			*bitp &^= bitPointerAll
   753  			bitp = subtract1(bitp)
   754  		}
   755  		return
   756  	}
   757  	for i := uintptr(0); i < n; i++ {
   758  		*h.bitp &^= bitScan << (heapBitsShift + h.shift)
   759  		h = h.forward(size / sys.PtrSize)
   760  	}
   761  }
   762  
   763  // clearCheckmarkSpan undoes all the checkmarking in a span.
   764  // The actual checkmark bits are ignored, so the only work to do
   765  // is to fix the pointer bits. (Pointer bits are ignored by scanobject
   766  // but consulted by typedmemmove.)
   767  func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
   768  	// The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
   769  	if sys.PtrSize == 8 && size == sys.PtrSize {
   770  		// Checkmark bit is type bit, bottom bit of every 2-bit entry.
   771  		// Only possible on 64-bit system, since minimum size is 8.
   772  		// Must clear type bit (checkmark bit) of every word.
   773  		// The type bit is the lower of every two-bit pair.
   774  		bitp := h.bitp
   775  		for i := uintptr(0); i < n; i += 4 {
   776  			*bitp |= bitPointerAll
   777  			bitp = subtract1(bitp)
   778  		}
   779  	}
   780  }
   781  
   782  // oneBitCount is indexed by byte and produces the
   783  // number of 1 bits in that byte. For example 128 has 1 bit set
   784  // and oneBitCount[128] will holds 1.
   785  var oneBitCount = [256]uint8{
   786  	0, 1, 1, 2, 1, 2, 2, 3,
   787  	1, 2, 2, 3, 2, 3, 3, 4,
   788  	1, 2, 2, 3, 2, 3, 3, 4,
   789  	2, 3, 3, 4, 3, 4, 4, 5,
   790  	1, 2, 2, 3, 2, 3, 3, 4,
   791  	2, 3, 3, 4, 3, 4, 4, 5,
   792  	2, 3, 3, 4, 3, 4, 4, 5,
   793  	3, 4, 4, 5, 4, 5, 5, 6,
   794  	1, 2, 2, 3, 2, 3, 3, 4,
   795  	2, 3, 3, 4, 3, 4, 4, 5,
   796  	2, 3, 3, 4, 3, 4, 4, 5,
   797  	3, 4, 4, 5, 4, 5, 5, 6,
   798  	2, 3, 3, 4, 3, 4, 4, 5,
   799  	3, 4, 4, 5, 4, 5, 5, 6,
   800  	3, 4, 4, 5, 4, 5, 5, 6,
   801  	4, 5, 5, 6, 5, 6, 6, 7,
   802  	1, 2, 2, 3, 2, 3, 3, 4,
   803  	2, 3, 3, 4, 3, 4, 4, 5,
   804  	2, 3, 3, 4, 3, 4, 4, 5,
   805  	3, 4, 4, 5, 4, 5, 5, 6,
   806  	2, 3, 3, 4, 3, 4, 4, 5,
   807  	3, 4, 4, 5, 4, 5, 5, 6,
   808  	3, 4, 4, 5, 4, 5, 5, 6,
   809  	4, 5, 5, 6, 5, 6, 6, 7,
   810  	2, 3, 3, 4, 3, 4, 4, 5,
   811  	3, 4, 4, 5, 4, 5, 5, 6,
   812  	3, 4, 4, 5, 4, 5, 5, 6,
   813  	4, 5, 5, 6, 5, 6, 6, 7,
   814  	3, 4, 4, 5, 4, 5, 5, 6,
   815  	4, 5, 5, 6, 5, 6, 6, 7,
   816  	4, 5, 5, 6, 5, 6, 6, 7,
   817  	5, 6, 6, 7, 6, 7, 7, 8}
   818  
   819  // countAlloc returns the number of objects allocated in span s by
   820  // scanning the allocation bitmap.
   821  // TODO:(rlh) Use popcount intrinsic.
   822  func (s *mspan) countAlloc() int {
   823  	count := 0
   824  	maxIndex := s.nelems / 8
   825  	for i := uintptr(0); i < maxIndex; i++ {
   826  		mrkBits := *s.gcmarkBits.bytep(i)
   827  		count += int(oneBitCount[mrkBits])
   828  	}
   829  	if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
   830  		mrkBits := *s.gcmarkBits.bytep(maxIndex)
   831  		mask := uint8((1 << bitsInLastByte) - 1)
   832  		bits := mrkBits & mask
   833  		count += int(oneBitCount[bits])
   834  	}
   835  	return count
   836  }
   837  
   838  // heapBitsSetType records that the new allocation [x, x+size)
   839  // holds in [x, x+dataSize) one or more values of type typ.
   840  // (The number of values is given by dataSize / typ.size.)
   841  // If dataSize < size, the fragment [x+dataSize, x+size) is
   842  // recorded as non-pointer data.
   843  // It is known that the type has pointers somewhere;
   844  // malloc does not call heapBitsSetType when there are no pointers,
   845  // because all free objects are marked as noscan during
   846  // heapBitsSweepSpan.
   847  //
   848  // There can only be one allocation from a given span active at a time,
   849  // and the bitmap for a span always falls on byte boundaries,
   850  // so there are no write-write races for access to the heap bitmap.
   851  // Hence, heapBitsSetType can access the bitmap without atomics.
   852  //
   853  // There can be read-write races between heapBitsSetType and things
   854  // that read the heap bitmap like scanobject. However, since
   855  // heapBitsSetType is only used for objects that have not yet been
   856  // made reachable, readers will ignore bits being modified by this
   857  // function. This does mean this function cannot transiently modify
   858  // bits that belong to neighboring objects. Also, on weakly-ordered
   859  // machines, callers must execute a store/store (publication) barrier
   860  // between calling this function and making the object reachable.
   861  func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
   862  	const doubleCheck = false // slow but helpful; enable to test modifications to this code
   863  
   864  	// dataSize is always size rounded up to the next malloc size class,
   865  	// except in the case of allocating a defer block, in which case
   866  	// size is sizeof(_defer{}) (at least 6 words) and dataSize may be
   867  	// arbitrarily larger.
   868  	//
   869  	// The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
   870  	// assume that dataSize == size without checking it explicitly.
   871  
   872  	if sys.PtrSize == 8 && size == sys.PtrSize {
   873  		// It's one word and it has pointers, it must be a pointer.
   874  		// Since all allocated one-word objects are pointers
   875  		// (non-pointers are aggregated into tinySize allocations),
   876  		// initSpan sets the pointer bits for us. Nothing to do here.
   877  		if doubleCheck {
   878  			h := heapBitsForAddr(x)
   879  			if !h.isPointer() {
   880  				throw("heapBitsSetType: pointer bit missing")
   881  			}
   882  			if !h.morePointers() {
   883  				throw("heapBitsSetType: scan bit missing")
   884  			}
   885  		}
   886  		return
   887  	}
   888  
   889  	h := heapBitsForAddr(x)
   890  	ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
   891  
   892  	// Heap bitmap bits for 2-word object are only 4 bits,
   893  	// so also shared with objects next to it.
   894  	// This is called out as a special case primarily for 32-bit systems,
   895  	// so that on 32-bit systems the code below can assume all objects
   896  	// are 4-word aligned (because they're all 16-byte aligned).
   897  	if size == 2*sys.PtrSize {
   898  		if typ.size == sys.PtrSize {
   899  			// We're allocating a block big enough to hold two pointers.
   900  			// On 64-bit, that means the actual object must be two pointers,
   901  			// or else we'd have used the one-pointer-sized block.
   902  			// On 32-bit, however, this is the 8-byte block, the smallest one.
   903  			// So it could be that we're allocating one pointer and this was
   904  			// just the smallest block available. Distinguish by checking dataSize.
   905  			// (In general the number of instances of typ being allocated is
   906  			// dataSize/typ.size.)
   907  			if sys.PtrSize == 4 && dataSize == sys.PtrSize {
   908  				// 1 pointer object. On 32-bit machines clear the bit for the
   909  				// unused second word.
   910  				*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
   911  				*h.bitp |= (bitPointer | bitScan) << h.shift
   912  			} else {
   913  				// 2-element slice of pointer.
   914  				*h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
   915  			}
   916  			return
   917  		}
   918  		// Otherwise typ.size must be 2*sys.PtrSize,
   919  		// and typ.kind&kindGCProg == 0.
   920  		if doubleCheck {
   921  			if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
   922  				print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
   923  				throw("heapBitsSetType")
   924  			}
   925  		}
   926  		b := uint32(*ptrmask)
   927  		hb := (b & 3) | bitScan
   928  		// bitPointer == 1, bitScan is 1 << 4, heapBitsShift is 1.
   929  		// 110011 is shifted h.shift and complemented.
   930  		// This clears out the bits that are about to be
   931  		// ored into *h.hbitp in the next instructions.
   932  		*h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
   933  		*h.bitp |= uint8(hb << h.shift)
   934  		return
   935  	}
   936  
   937  	// Copy from 1-bit ptrmask into 2-bit bitmap.
   938  	// The basic approach is to use a single uintptr as a bit buffer,
   939  	// alternating between reloading the buffer and writing bitmap bytes.
   940  	// In general, one load can supply two bitmap byte writes.
   941  	// This is a lot of lines of code, but it compiles into relatively few
   942  	// machine instructions.
   943  
   944  	var (
   945  		// Ptrmask input.
   946  		p     *byte   // last ptrmask byte read
   947  		b     uintptr // ptrmask bits already loaded
   948  		nb    uintptr // number of bits in b at next read
   949  		endp  *byte   // final ptrmask byte to read (then repeat)
   950  		endnb uintptr // number of valid bits in *endp
   951  		pbits uintptr // alternate source of bits
   952  
   953  		// Heap bitmap output.
   954  		w     uintptr // words processed
   955  		nw    uintptr // number of words to process
   956  		hbitp *byte   // next heap bitmap byte to write
   957  		hb    uintptr // bits being prepared for *hbitp
   958  	)
   959  
   960  	hbitp = h.bitp
   961  
   962  	// Handle GC program. Delayed until this part of the code
   963  	// so that we can use the same double-checking mechanism
   964  	// as the 1-bit case. Nothing above could have encountered
   965  	// GC programs: the cases were all too small.
   966  	if typ.kind&kindGCProg != 0 {
   967  		heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
   968  		if doubleCheck {
   969  			// Double-check the heap bits written by GC program
   970  			// by running the GC program to create a 1-bit pointer mask
   971  			// and then jumping to the double-check code below.
   972  			// This doesn't catch bugs shared between the 1-bit and 4-bit
   973  			// GC program execution, but it does catch mistakes specific
   974  			// to just one of those and bugs in heapBitsSetTypeGCProg's
   975  			// implementation of arrays.
   976  			lock(&debugPtrmask.lock)
   977  			if debugPtrmask.data == nil {
   978  				debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
   979  			}
   980  			ptrmask = debugPtrmask.data
   981  			runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
   982  			goto Phase4
   983  		}
   984  		return
   985  	}
   986  
   987  	// Note about sizes:
   988  	//
   989  	// typ.size is the number of words in the object,
   990  	// and typ.ptrdata is the number of words in the prefix
   991  	// of the object that contains pointers. That is, the final
   992  	// typ.size - typ.ptrdata words contain no pointers.
   993  	// This allows optimization of a common pattern where
   994  	// an object has a small header followed by a large scalar
   995  	// buffer. If we know the pointers are over, we don't have
   996  	// to scan the buffer's heap bitmap at all.
   997  	// The 1-bit ptrmasks are sized to contain only bits for
   998  	// the typ.ptrdata prefix, zero padded out to a full byte
   999  	// of bitmap. This code sets nw (below) so that heap bitmap
  1000  	// bits are only written for the typ.ptrdata prefix; if there is
  1001  	// more room in the allocated object, the next heap bitmap
  1002  	// entry is a 00, indicating that there are no more pointers
  1003  	// to scan. So only the ptrmask for the ptrdata bytes is needed.
  1004  	//
  1005  	// Replicated copies are not as nice: if there is an array of
  1006  	// objects with scalar tails, all but the last tail does have to
  1007  	// be initialized, because there is no way to say "skip forward".
  1008  	// However, because of the possibility of a repeated type with
  1009  	// size not a multiple of 4 pointers (one heap bitmap byte),
  1010  	// the code already must handle the last ptrmask byte specially
  1011  	// by treating it as containing only the bits for endnb pointers,
  1012  	// where endnb <= 4. We represent large scalar tails that must
  1013  	// be expanded in the replication by setting endnb larger than 4.
  1014  	// This will have the effect of reading many bits out of b,
  1015  	// but once the real bits are shifted out, b will supply as many
  1016  	// zero bits as we try to read, which is exactly what we need.
  1017  
  1018  	p = ptrmask
  1019  	if typ.size < dataSize {
  1020  		// Filling in bits for an array of typ.
  1021  		// Set up for repetition of ptrmask during main loop.
  1022  		// Note that ptrmask describes only a prefix of
  1023  		const maxBits = sys.PtrSize*8 - 7
  1024  		if typ.ptrdata/sys.PtrSize <= maxBits {
  1025  			// Entire ptrmask fits in uintptr with room for a byte fragment.
  1026  			// Load into pbits and never read from ptrmask again.
  1027  			// This is especially important when the ptrmask has
  1028  			// fewer than 8 bits in it; otherwise the reload in the middle
  1029  			// of the Phase 2 loop would itself need to loop to gather
  1030  			// at least 8 bits.
  1031  
  1032  			// Accumulate ptrmask into b.
  1033  			// ptrmask is sized to describe only typ.ptrdata, but we record
  1034  			// it as describing typ.size bytes, since all the high bits are zero.
  1035  			nb = typ.ptrdata / sys.PtrSize
  1036  			for i := uintptr(0); i < nb; i += 8 {
  1037  				b |= uintptr(*p) << i
  1038  				p = add1(p)
  1039  			}
  1040  			nb = typ.size / sys.PtrSize
  1041  
  1042  			// Replicate ptrmask to fill entire pbits uintptr.
  1043  			// Doubling and truncating is fewer steps than
  1044  			// iterating by nb each time. (nb could be 1.)
  1045  			// Since we loaded typ.ptrdata/sys.PtrSize bits
  1046  			// but are pretending to have typ.size/sys.PtrSize,
  1047  			// there might be no replication necessary/possible.
  1048  			pbits = b
  1049  			endnb = nb
  1050  			if nb+nb <= maxBits {
  1051  				for endnb <= sys.PtrSize*8 {
  1052  					pbits |= pbits << endnb
  1053  					endnb += endnb
  1054  				}
  1055  				// Truncate to a multiple of original ptrmask.
  1056  				// Because nb+nb <= maxBits, nb fits in a byte.
  1057  				// Byte division is cheaper than uintptr division.
  1058  				endnb = uintptr(maxBits/byte(nb)) * nb
  1059  				pbits &= 1<<endnb - 1
  1060  				b = pbits
  1061  				nb = endnb
  1062  			}
  1063  
  1064  			// Clear p and endp as sentinel for using pbits.
  1065  			// Checked during Phase 2 loop.
  1066  			p = nil
  1067  			endp = nil
  1068  		} else {
  1069  			// Ptrmask is larger. Read it multiple times.
  1070  			n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
  1071  			endp = addb(ptrmask, n)
  1072  			endnb = typ.size/sys.PtrSize - n*8
  1073  		}
  1074  	}
  1075  	if p != nil {
  1076  		b = uintptr(*p)
  1077  		p = add1(p)
  1078  		nb = 8
  1079  	}
  1080  
  1081  	if typ.size == dataSize {
  1082  		// Single entry: can stop once we reach the non-pointer data.
  1083  		nw = typ.ptrdata / sys.PtrSize
  1084  	} else {
  1085  		// Repeated instances of typ in an array.
  1086  		// Have to process first N-1 entries in full, but can stop
  1087  		// once we reach the non-pointer data in the final entry.
  1088  		nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
  1089  	}
  1090  	if nw == 0 {
  1091  		// No pointers! Caller was supposed to check.
  1092  		println("runtime: invalid type ", typ.string())
  1093  		throw("heapBitsSetType: called with non-pointer type")
  1094  		return
  1095  	}
  1096  	if nw < 2 {
  1097  		// Must write at least 2 words, because the "no scan"
  1098  		// encoding doesn't take effect until the third word.
  1099  		nw = 2
  1100  	}
  1101  
  1102  	// Phase 1: Special case for leading byte (shift==0) or half-byte (shift==4).
  1103  	// The leading byte is special because it contains the bits for word 1,
  1104  	// which does not have the scan bit set.
  1105  	// The leading half-byte is special because it's a half a byte,
  1106  	// so we have to be careful with the bits already there.
  1107  	switch {
  1108  	default:
  1109  		throw("heapBitsSetType: unexpected shift")
  1110  
  1111  	case h.shift == 0:
  1112  		// Ptrmask and heap bitmap are aligned.
  1113  		// Handle first byte of bitmap specially.
  1114  		//
  1115  		// The first byte we write out covers the first four
  1116  		// words of the object. The scan/dead bit on the first
  1117  		// word must be set to scan since there are pointers
  1118  		// somewhere in the object. The scan/dead bit on the
  1119  		// second word is the checkmark, so we don't set it.
  1120  		// In all following words, we set the scan/dead
  1121  		// appropriately to indicate that the object contains
  1122  		// to the next 2-bit entry in the bitmap.
  1123  		//
  1124  		// TODO: It doesn't matter if we set the checkmark, so
  1125  		// maybe this case isn't needed any more.
  1126  		hb = b & bitPointerAll
  1127  		hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
  1128  		if w += 4; w >= nw {
  1129  			goto Phase3
  1130  		}
  1131  		*hbitp = uint8(hb)
  1132  		hbitp = subtract1(hbitp)
  1133  		b >>= 4
  1134  		nb -= 4
  1135  
  1136  	case sys.PtrSize == 8 && h.shift == 2:
  1137  		// Ptrmask and heap bitmap are misaligned.
  1138  		// The bits for the first two words are in a byte shared
  1139  		// with another object, so we must be careful with the bits
  1140  		// already there.
  1141  		// We took care of 1-word and 2-word objects above,
  1142  		// so this is at least a 6-word object.
  1143  		hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
  1144  		// This is not noscan, so set the scan bit in the
  1145  		// first word.
  1146  		hb |= bitScan << (2 * heapBitsShift)
  1147  		b >>= 2
  1148  		nb -= 2
  1149  		// Note: no bitScan for second word because that's
  1150  		// the checkmark.
  1151  		*hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
  1152  		*hbitp |= uint8(hb)
  1153  		hbitp = subtract1(hbitp)
  1154  		if w += 2; w >= nw {
  1155  			// We know that there is more data, because we handled 2-word objects above.
  1156  			// This must be at least a 6-word object. If we're out of pointer words,
  1157  			// mark no scan in next bitmap byte and finish.
  1158  			hb = 0
  1159  			w += 4
  1160  			goto Phase3
  1161  		}
  1162  	}
  1163  
  1164  	// Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
  1165  	// The loop computes the bits for that last write but does not execute the write;
  1166  	// it leaves the bits in hb for processing by phase 3.
  1167  	// To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
  1168  	// use in the first half of the loop right now, and then we only adjust nb explicitly
  1169  	// if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
  1170  	nb -= 4
  1171  	for {
  1172  		// Emit bitmap byte.
  1173  		// b has at least nb+4 bits, with one exception:
  1174  		// if w+4 >= nw, then b has only nw-w bits,
  1175  		// but we'll stop at the break and then truncate
  1176  		// appropriately in Phase 3.
  1177  		hb = b & bitPointerAll
  1178  		hb |= bitScanAll
  1179  		if w += 4; w >= nw {
  1180  			break
  1181  		}
  1182  		*hbitp = uint8(hb)
  1183  		hbitp = subtract1(hbitp)
  1184  		b >>= 4
  1185  
  1186  		// Load more bits. b has nb right now.
  1187  		if p != endp {
  1188  			// Fast path: keep reading from ptrmask.
  1189  			// nb unmodified: we just loaded 8 bits,
  1190  			// and the next iteration will consume 8 bits,
  1191  			// leaving us with the same nb the next time we're here.
  1192  			if nb < 8 {
  1193  				b |= uintptr(*p) << nb
  1194  				p = add1(p)
  1195  			} else {
  1196  				// Reduce the number of bits in b.
  1197  				// This is important if we skipped
  1198  				// over a scalar tail, since nb could
  1199  				// be larger than the bit width of b.
  1200  				nb -= 8
  1201  			}
  1202  		} else if p == nil {
  1203  			// Almost as fast path: track bit count and refill from pbits.
  1204  			// For short repetitions.
  1205  			if nb < 8 {
  1206  				b |= pbits << nb
  1207  				nb += endnb
  1208  			}
  1209  			nb -= 8 // for next iteration
  1210  		} else {
  1211  			// Slow path: reached end of ptrmask.
  1212  			// Process final partial byte and rewind to start.
  1213  			b |= uintptr(*p) << nb
  1214  			nb += endnb
  1215  			if nb < 8 {
  1216  				b |= uintptr(*ptrmask) << nb
  1217  				p = add1(ptrmask)
  1218  			} else {
  1219  				nb -= 8
  1220  				p = ptrmask
  1221  			}
  1222  		}
  1223  
  1224  		// Emit bitmap byte.
  1225  		hb = b & bitPointerAll
  1226  		hb |= bitScanAll
  1227  		if w += 4; w >= nw {
  1228  			break
  1229  		}
  1230  		*hbitp = uint8(hb)
  1231  		hbitp = subtract1(hbitp)
  1232  		b >>= 4
  1233  	}
  1234  
  1235  Phase3:
  1236  	// Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
  1237  	if w > nw {
  1238  		// Counting the 4 entries in hb not yet written to memory,
  1239  		// there are more entries than possible pointer slots.
  1240  		// Discard the excess entries (can't be more than 3).
  1241  		mask := uintptr(1)<<(4-(w-nw)) - 1
  1242  		hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
  1243  	}
  1244  
  1245  	// Change nw from counting possibly-pointer words to total words in allocation.
  1246  	nw = size / sys.PtrSize
  1247  
  1248  	// Write whole bitmap bytes.
  1249  	// The first is hb, the rest are zero.
  1250  	if w <= nw {
  1251  		*hbitp = uint8(hb)
  1252  		hbitp = subtract1(hbitp)
  1253  		hb = 0 // for possible final half-byte below
  1254  		for w += 4; w <= nw; w += 4 {
  1255  			*hbitp = 0
  1256  			hbitp = subtract1(hbitp)
  1257  		}
  1258  	}
  1259  
  1260  	// Write final partial bitmap byte if any.
  1261  	// We know w > nw, or else we'd still be in the loop above.
  1262  	// It can be bigger only due to the 4 entries in hb that it counts.
  1263  	// If w == nw+4 then there's nothing left to do: we wrote all nw entries
  1264  	// and can discard the 4 sitting in hb.
  1265  	// But if w == nw+2, we need to write first two in hb.
  1266  	// The byte is shared with the next object, so be careful with
  1267  	// existing bits.
  1268  	if w == nw+2 {
  1269  		*hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
  1270  	}
  1271  
  1272  Phase4:
  1273  	// Phase 4: all done, but perhaps double check.
  1274  	if doubleCheck {
  1275  		end := heapBitsForAddr(x + size)
  1276  		if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
  1277  			println("ended at wrong bitmap byte for", typ.string(), "x", dataSize/typ.size)
  1278  			print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
  1279  			print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
  1280  			h0 := heapBitsForAddr(x)
  1281  			print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
  1282  			print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
  1283  			throw("bad heapBitsSetType")
  1284  		}
  1285  
  1286  		// Double-check that bits to be written were written correctly.
  1287  		// Does not check that other bits were not written, unfortunately.
  1288  		h := heapBitsForAddr(x)
  1289  		nptr := typ.ptrdata / sys.PtrSize
  1290  		ndata := typ.size / sys.PtrSize
  1291  		count := dataSize / typ.size
  1292  		totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
  1293  		for i := uintptr(0); i < size/sys.PtrSize; i++ {
  1294  			j := i % ndata
  1295  			var have, want uint8
  1296  			have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
  1297  			if i >= totalptr {
  1298  				want = 0 // deadmarker
  1299  				if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
  1300  					want = bitScan
  1301  				}
  1302  			} else {
  1303  				if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
  1304  					want |= bitPointer
  1305  				}
  1306  				if i != 1 {
  1307  					want |= bitScan
  1308  				} else {
  1309  					have &^= bitScan
  1310  				}
  1311  			}
  1312  			if have != want {
  1313  				println("mismatch writing bits for", typ.string(), "x", dataSize/typ.size)
  1314  				print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
  1315  				print("kindGCProg=", typ.kind&kindGCProg != 0, "\n")
  1316  				print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
  1317  				h0 := heapBitsForAddr(x)
  1318  				print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
  1319  				print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
  1320  				print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
  1321  				println("at word", i, "offset", i*sys.PtrSize, "have", have, "want", want)
  1322  				if typ.kind&kindGCProg != 0 {
  1323  					println("GC program:")
  1324  					dumpGCProg(addb(typ.gcdata, 4))
  1325  				}
  1326  				throw("bad heapBitsSetType")
  1327  			}
  1328  			h = h.next()
  1329  		}
  1330  		if ptrmask == debugPtrmask.data {
  1331  			unlock(&debugPtrmask.lock)
  1332  		}
  1333  	}
  1334  }
  1335  
  1336  var debugPtrmask struct {
  1337  	lock mutex
  1338  	data *byte
  1339  }
  1340  
  1341  // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
  1342  // progSize is the size of the memory described by the program.
  1343  // elemSize is the size of the element that the GC program describes (a prefix of).
  1344  // dataSize is the total size of the intended data, a multiple of elemSize.
  1345  // allocSize is the total size of the allocated memory.
  1346  //
  1347  // GC programs are only used for large allocations.
  1348  // heapBitsSetType requires that allocSize is a multiple of 4 words,
  1349  // so that the relevant bitmap bytes are not shared with surrounding
  1350  // objects.
  1351  func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
  1352  	if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
  1353  		// Alignment will be wrong.
  1354  		throw("heapBitsSetTypeGCProg: small allocation")
  1355  	}
  1356  	var totalBits uintptr
  1357  	if elemSize == dataSize {
  1358  		totalBits = runGCProg(prog, nil, h.bitp, 2)
  1359  		if totalBits*sys.PtrSize != progSize {
  1360  			println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
  1361  			throw("heapBitsSetTypeGCProg: unexpected bit count")
  1362  		}
  1363  	} else {
  1364  		count := dataSize / elemSize
  1365  
  1366  		// Piece together program trailer to run after prog that does:
  1367  		//	literal(0)
  1368  		//	repeat(1, elemSize-progSize-1) // zeros to fill element size
  1369  		//	repeat(elemSize, count-1) // repeat that element for count
  1370  		// This zero-pads the data remaining in the first element and then
  1371  		// repeats that first element to fill the array.
  1372  		var trailer [40]byte // 3 varints (max 10 each) + some bytes
  1373  		i := 0
  1374  		if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
  1375  			// literal(0)
  1376  			trailer[i] = 0x01
  1377  			i++
  1378  			trailer[i] = 0
  1379  			i++
  1380  			if n > 1 {
  1381  				// repeat(1, n-1)
  1382  				trailer[i] = 0x81
  1383  				i++
  1384  				n--
  1385  				for ; n >= 0x80; n >>= 7 {
  1386  					trailer[i] = byte(n | 0x80)
  1387  					i++
  1388  				}
  1389  				trailer[i] = byte(n)
  1390  				i++
  1391  			}
  1392  		}
  1393  		// repeat(elemSize/ptrSize, count-1)
  1394  		trailer[i] = 0x80
  1395  		i++
  1396  		n := elemSize / sys.PtrSize
  1397  		for ; n >= 0x80; n >>= 7 {
  1398  			trailer[i] = byte(n | 0x80)
  1399  			i++
  1400  		}
  1401  		trailer[i] = byte(n)
  1402  		i++
  1403  		n = count - 1
  1404  		for ; n >= 0x80; n >>= 7 {
  1405  			trailer[i] = byte(n | 0x80)
  1406  			i++
  1407  		}
  1408  		trailer[i] = byte(n)
  1409  		i++
  1410  		trailer[i] = 0
  1411  		i++
  1412  
  1413  		runGCProg(prog, &trailer[0], h.bitp, 2)
  1414  
  1415  		// Even though we filled in the full array just now,
  1416  		// record that we only filled in up to the ptrdata of the
  1417  		// last element. This will cause the code below to
  1418  		// memclr the dead section of the final array element,
  1419  		// so that scanobject can stop early in the final element.
  1420  		totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
  1421  	}
  1422  	endProg := unsafe.Pointer(subtractb(h.bitp, (totalBits+3)/4))
  1423  	endAlloc := unsafe.Pointer(subtractb(h.bitp, allocSize/heapBitmapScale))
  1424  	memclrNoHeapPointers(add(endAlloc, 1), uintptr(endProg)-uintptr(endAlloc))
  1425  }
  1426  
  1427  // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
  1428  // size the size of the region described by prog, in bytes.
  1429  // The resulting bitvector will have no more than size/sys.PtrSize bits.
  1430  func progToPointerMask(prog *byte, size uintptr) bitvector {
  1431  	n := (size/sys.PtrSize + 7) / 8
  1432  	x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
  1433  	x[len(x)-1] = 0xa1 // overflow check sentinel
  1434  	n = runGCProg(prog, nil, &x[0], 1)
  1435  	if x[len(x)-1] != 0xa1 {
  1436  		throw("progToPointerMask: overflow")
  1437  	}
  1438  	return bitvector{int32(n), &x[0]}
  1439  }
  1440  
  1441  // Packed GC pointer bitmaps, aka GC programs.
  1442  //
  1443  // For large types containing arrays, the type information has a
  1444  // natural repetition that can be encoded to save space in the
  1445  // binary and in the memory representation of the type information.
  1446  //
  1447  // The encoding is a simple Lempel-Ziv style bytecode machine
  1448  // with the following instructions:
  1449  //
  1450  //	00000000: stop
  1451  //	0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
  1452  //	10000000 n c: repeat the previous n bits c times; n, c are varints
  1453  //	1nnnnnnn c: repeat the previous n bits c times; c is a varint
  1454  
  1455  // runGCProg executes the GC program prog, and then trailer if non-nil,
  1456  // writing to dst with entries of the given size.
  1457  // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
  1458  // If size == 2, dst is the 2-bit heap bitmap, and writes move backward
  1459  // starting at dst (because the heap bitmap does). In this case, the caller guarantees
  1460  // that only whole bytes in dst need to be written.
  1461  //
  1462  // runGCProg returns the number of 1- or 2-bit entries written to memory.
  1463  func runGCProg(prog, trailer, dst *byte, size int) uintptr {
  1464  	dstStart := dst
  1465  
  1466  	// Bits waiting to be written to memory.
  1467  	var bits uintptr
  1468  	var nbits uintptr
  1469  
  1470  	p := prog
  1471  Run:
  1472  	for {
  1473  		// Flush accumulated full bytes.
  1474  		// The rest of the loop assumes that nbits <= 7.
  1475  		for ; nbits >= 8; nbits -= 8 {
  1476  			if size == 1 {
  1477  				*dst = uint8(bits)
  1478  				dst = add1(dst)
  1479  				bits >>= 8
  1480  			} else {
  1481  				v := bits&bitPointerAll | bitScanAll
  1482  				*dst = uint8(v)
  1483  				dst = subtract1(dst)
  1484  				bits >>= 4
  1485  				v = bits&bitPointerAll | bitScanAll
  1486  				*dst = uint8(v)
  1487  				dst = subtract1(dst)
  1488  				bits >>= 4
  1489  			}
  1490  		}
  1491  
  1492  		// Process one instruction.
  1493  		inst := uintptr(*p)
  1494  		p = add1(p)
  1495  		n := inst & 0x7F
  1496  		if inst&0x80 == 0 {
  1497  			// Literal bits; n == 0 means end of program.
  1498  			if n == 0 {
  1499  				// Program is over; continue in trailer if present.
  1500  				if trailer != nil {
  1501  					//println("trailer")
  1502  					p = trailer
  1503  					trailer = nil
  1504  					continue
  1505  				}
  1506  				//println("done")
  1507  				break Run
  1508  			}
  1509  			//println("lit", n, dst)
  1510  			nbyte := n / 8
  1511  			for i := uintptr(0); i < nbyte; i++ {
  1512  				bits |= uintptr(*p) << nbits
  1513  				p = add1(p)
  1514  				if size == 1 {
  1515  					*dst = uint8(bits)
  1516  					dst = add1(dst)
  1517  					bits >>= 8
  1518  				} else {
  1519  					v := bits&0xf | bitScanAll
  1520  					*dst = uint8(v)
  1521  					dst = subtract1(dst)
  1522  					bits >>= 4
  1523  					v = bits&0xf | bitScanAll
  1524  					*dst = uint8(v)
  1525  					dst = subtract1(dst)
  1526  					bits >>= 4
  1527  				}
  1528  			}
  1529  			if n %= 8; n > 0 {
  1530  				bits |= uintptr(*p) << nbits
  1531  				p = add1(p)
  1532  				nbits += n
  1533  			}
  1534  			continue Run
  1535  		}
  1536  
  1537  		// Repeat. If n == 0, it is encoded in a varint in the next bytes.
  1538  		if n == 0 {
  1539  			for off := uint(0); ; off += 7 {
  1540  				x := uintptr(*p)
  1541  				p = add1(p)
  1542  				n |= (x & 0x7F) << off
  1543  				if x&0x80 == 0 {
  1544  					break
  1545  				}
  1546  			}
  1547  		}
  1548  
  1549  		// Count is encoded in a varint in the next bytes.
  1550  		c := uintptr(0)
  1551  		for off := uint(0); ; off += 7 {
  1552  			x := uintptr(*p)
  1553  			p = add1(p)
  1554  			c |= (x & 0x7F) << off
  1555  			if x&0x80 == 0 {
  1556  				break
  1557  			}
  1558  		}
  1559  		c *= n // now total number of bits to copy
  1560  
  1561  		// If the number of bits being repeated is small, load them
  1562  		// into a register and use that register for the entire loop
  1563  		// instead of repeatedly reading from memory.
  1564  		// Handling fewer than 8 bits here makes the general loop simpler.
  1565  		// The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
  1566  		// the pattern to a bit buffer holding at most 7 bits (a partial byte)
  1567  		// it will not overflow.
  1568  		src := dst
  1569  		const maxBits = sys.PtrSize*8 - 7
  1570  		if n <= maxBits {
  1571  			// Start with bits in output buffer.
  1572  			pattern := bits
  1573  			npattern := nbits
  1574  
  1575  			// If we need more bits, fetch them from memory.
  1576  			if size == 1 {
  1577  				src = subtract1(src)
  1578  				for npattern < n {
  1579  					pattern <<= 8
  1580  					pattern |= uintptr(*src)
  1581  					src = subtract1(src)
  1582  					npattern += 8
  1583  				}
  1584  			} else {
  1585  				src = add1(src)
  1586  				for npattern < n {
  1587  					pattern <<= 4
  1588  					pattern |= uintptr(*src) & 0xf
  1589  					src = add1(src)
  1590  					npattern += 4
  1591  				}
  1592  			}
  1593  
  1594  			// We started with the whole bit output buffer,
  1595  			// and then we loaded bits from whole bytes.
  1596  			// Either way, we might now have too many instead of too few.
  1597  			// Discard the extra.
  1598  			if npattern > n {
  1599  				pattern >>= npattern - n
  1600  				npattern = n
  1601  			}
  1602  
  1603  			// Replicate pattern to at most maxBits.
  1604  			if npattern == 1 {
  1605  				// One bit being repeated.
  1606  				// If the bit is 1, make the pattern all 1s.
  1607  				// If the bit is 0, the pattern is already all 0s,
  1608  				// but we can claim that the number of bits
  1609  				// in the word is equal to the number we need (c),
  1610  				// because right shift of bits will zero fill.
  1611  				if pattern == 1 {
  1612  					pattern = 1<<maxBits - 1
  1613  					npattern = maxBits
  1614  				} else {
  1615  					npattern = c
  1616  				}
  1617  			} else {
  1618  				b := pattern
  1619  				nb := npattern
  1620  				if nb+nb <= maxBits {
  1621  					// Double pattern until the whole uintptr is filled.
  1622  					for nb <= sys.PtrSize*8 {
  1623  						b |= b << nb
  1624  						nb += nb
  1625  					}
  1626  					// Trim away incomplete copy of original pattern in high bits.
  1627  					// TODO(rsc): Replace with table lookup or loop on systems without divide?
  1628  					nb = maxBits / npattern * npattern
  1629  					b &= 1<<nb - 1
  1630  					pattern = b
  1631  					npattern = nb
  1632  				}
  1633  			}
  1634  
  1635  			// Add pattern to bit buffer and flush bit buffer, c/npattern times.
  1636  			// Since pattern contains >8 bits, there will be full bytes to flush
  1637  			// on each iteration.
  1638  			for ; c >= npattern; c -= npattern {
  1639  				bits |= pattern << nbits
  1640  				nbits += npattern
  1641  				if size == 1 {
  1642  					for nbits >= 8 {
  1643  						*dst = uint8(bits)
  1644  						dst = add1(dst)
  1645  						bits >>= 8
  1646  						nbits -= 8
  1647  					}
  1648  				} else {
  1649  					for nbits >= 4 {
  1650  						*dst = uint8(bits&0xf | bitScanAll)
  1651  						dst = subtract1(dst)
  1652  						bits >>= 4
  1653  						nbits -= 4
  1654  					}
  1655  				}
  1656  			}
  1657  
  1658  			// Add final fragment to bit buffer.
  1659  			if c > 0 {
  1660  				pattern &= 1<<c - 1
  1661  				bits |= pattern << nbits
  1662  				nbits += c
  1663  			}
  1664  			continue Run
  1665  		}
  1666  
  1667  		// Repeat; n too large to fit in a register.
  1668  		// Since nbits <= 7, we know the first few bytes of repeated data
  1669  		// are already written to memory.
  1670  		off := n - nbits // n > nbits because n > maxBits and nbits <= 7
  1671  		if size == 1 {
  1672  			// Leading src fragment.
  1673  			src = subtractb(src, (off+7)/8)
  1674  			if frag := off & 7; frag != 0 {
  1675  				bits |= uintptr(*src) >> (8 - frag) << nbits
  1676  				src = add1(src)
  1677  				nbits += frag
  1678  				c -= frag
  1679  			}
  1680  			// Main loop: load one byte, write another.
  1681  			// The bits are rotating through the bit buffer.
  1682  			for i := c / 8; i > 0; i-- {
  1683  				bits |= uintptr(*src) << nbits
  1684  				src = add1(src)
  1685  				*dst = uint8(bits)
  1686  				dst = add1(dst)
  1687  				bits >>= 8
  1688  			}
  1689  			// Final src fragment.
  1690  			if c %= 8; c > 0 {
  1691  				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1692  				nbits += c
  1693  			}
  1694  		} else {
  1695  			// Leading src fragment.
  1696  			src = addb(src, (off+3)/4)
  1697  			if frag := off & 3; frag != 0 {
  1698  				bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
  1699  				src = subtract1(src)
  1700  				nbits += frag
  1701  				c -= frag
  1702  			}
  1703  			// Main loop: load one byte, write another.
  1704  			// The bits are rotating through the bit buffer.
  1705  			for i := c / 4; i > 0; i-- {
  1706  				bits |= (uintptr(*src) & 0xf) << nbits
  1707  				src = subtract1(src)
  1708  				*dst = uint8(bits&0xf | bitScanAll)
  1709  				dst = subtract1(dst)
  1710  				bits >>= 4
  1711  			}
  1712  			// Final src fragment.
  1713  			if c %= 4; c > 0 {
  1714  				bits |= (uintptr(*src) & (1<<c - 1)) << nbits
  1715  				nbits += c
  1716  			}
  1717  		}
  1718  	}
  1719  
  1720  	// Write any final bits out, using full-byte writes, even for the final byte.
  1721  	var totalBits uintptr
  1722  	if size == 1 {
  1723  		totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
  1724  		nbits += -nbits & 7
  1725  		for ; nbits > 0; nbits -= 8 {
  1726  			*dst = uint8(bits)
  1727  			dst = add1(dst)
  1728  			bits >>= 8
  1729  		}
  1730  	} else {
  1731  		totalBits = (uintptr(unsafe.Pointer(dstStart))-uintptr(unsafe.Pointer(dst)))*4 + nbits
  1732  		nbits += -nbits & 3
  1733  		for ; nbits > 0; nbits -= 4 {
  1734  			v := bits&0xf | bitScanAll
  1735  			*dst = uint8(v)
  1736  			dst = subtract1(dst)
  1737  			bits >>= 4
  1738  		}
  1739  	}
  1740  	return totalBits
  1741  }
  1742  
  1743  func dumpGCProg(p *byte) {
  1744  	nptr := 0
  1745  	for {
  1746  		x := *p
  1747  		p = add1(p)
  1748  		if x == 0 {
  1749  			print("\t", nptr, " end\n")
  1750  			break
  1751  		}
  1752  		if x&0x80 == 0 {
  1753  			print("\t", nptr, " lit ", x, ":")
  1754  			n := int(x+7) / 8
  1755  			for i := 0; i < n; i++ {
  1756  				print(" ", hex(*p))
  1757  				p = add1(p)
  1758  			}
  1759  			print("\n")
  1760  			nptr += int(x)
  1761  		} else {
  1762  			nbit := int(x &^ 0x80)
  1763  			if nbit == 0 {
  1764  				for nb := uint(0); ; nb += 7 {
  1765  					x := *p
  1766  					p = add1(p)
  1767  					nbit |= int(x&0x7f) << nb
  1768  					if x&0x80 == 0 {
  1769  						break
  1770  					}
  1771  				}
  1772  			}
  1773  			count := 0
  1774  			for nb := uint(0); ; nb += 7 {
  1775  				x := *p
  1776  				p = add1(p)
  1777  				count |= int(x&0x7f) << nb
  1778  				if x&0x80 == 0 {
  1779  					break
  1780  				}
  1781  			}
  1782  			print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
  1783  			nptr += nbit * count
  1784  		}
  1785  	}
  1786  }
  1787  
  1788  // Testing.
  1789  
  1790  func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
  1791  	target := (*stkframe)(ctxt)
  1792  	if frame.sp <= target.sp && target.sp < frame.varp {
  1793  		*target = *frame
  1794  		return false
  1795  	}
  1796  	return true
  1797  }
  1798  
  1799  // gcbits returns the GC type info for x, for testing.
  1800  // The result is the bitmap entries (0 or 1), one entry per byte.
  1801  //go:linkname reflect_gcbits reflect.gcbits
  1802  func reflect_gcbits(x interface{}) []byte {
  1803  	ret := getgcmask(x)
  1804  	typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
  1805  	nptr := typ.ptrdata / sys.PtrSize
  1806  	for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
  1807  		ret = ret[:len(ret)-1]
  1808  	}
  1809  	return ret
  1810  }
  1811  
  1812  // Returns GC type info for object p for testing.
  1813  func getgcmask(ep interface{}) (mask []byte) {
  1814  	e := *efaceOf(&ep)
  1815  	p := e.data
  1816  	t := e._type
  1817  	// data or bss
  1818  	for _, datap := range activeModules() {
  1819  		// data
  1820  		if datap.data <= uintptr(p) && uintptr(p) < datap.edata {
  1821  			bitmap := datap.gcdatamask.bytedata
  1822  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  1823  			mask = make([]byte, n/sys.PtrSize)
  1824  			for i := uintptr(0); i < n; i += sys.PtrSize {
  1825  				off := (uintptr(p) + i - datap.data) / sys.PtrSize
  1826  				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1827  			}
  1828  			return
  1829  		}
  1830  
  1831  		// bss
  1832  		if datap.bss <= uintptr(p) && uintptr(p) < datap.ebss {
  1833  			bitmap := datap.gcbssmask.bytedata
  1834  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  1835  			mask = make([]byte, n/sys.PtrSize)
  1836  			for i := uintptr(0); i < n; i += sys.PtrSize {
  1837  				off := (uintptr(p) + i - datap.bss) / sys.PtrSize
  1838  				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1839  			}
  1840  			return
  1841  		}
  1842  	}
  1843  
  1844  	// heap
  1845  	var n uintptr
  1846  	var base uintptr
  1847  	if mlookup(uintptr(p), &base, &n, nil) != 0 {
  1848  		mask = make([]byte, n/sys.PtrSize)
  1849  		for i := uintptr(0); i < n; i += sys.PtrSize {
  1850  			hbits := heapBitsForAddr(base + i)
  1851  			if hbits.isPointer() {
  1852  				mask[i/sys.PtrSize] = 1
  1853  			}
  1854  			if i != 1*sys.PtrSize && !hbits.morePointers() {
  1855  				mask = mask[:i/sys.PtrSize]
  1856  				break
  1857  			}
  1858  		}
  1859  		return
  1860  	}
  1861  
  1862  	// stack
  1863  	if _g_ := getg(); _g_.m.curg.stack.lo <= uintptr(p) && uintptr(p) < _g_.m.curg.stack.hi {
  1864  		var frame stkframe
  1865  		frame.sp = uintptr(p)
  1866  		_g_ := getg()
  1867  		gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
  1868  		if frame.fn.valid() {
  1869  			f := frame.fn
  1870  			targetpc := frame.continpc
  1871  			if targetpc == 0 {
  1872  				return
  1873  			}
  1874  			if targetpc != f.entry {
  1875  				targetpc--
  1876  			}
  1877  			pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, nil)
  1878  			if pcdata == -1 {
  1879  				return
  1880  			}
  1881  			stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
  1882  			if stkmap == nil || stkmap.n <= 0 {
  1883  				return
  1884  			}
  1885  			bv := stackmapdata(stkmap, pcdata)
  1886  			size := uintptr(bv.n) * sys.PtrSize
  1887  			n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  1888  			mask = make([]byte, n/sys.PtrSize)
  1889  			for i := uintptr(0); i < n; i += sys.PtrSize {
  1890  				bitmap := bv.bytedata
  1891  				off := (uintptr(p) + i - frame.varp + size) / sys.PtrSize
  1892  				mask[i/sys.PtrSize] = (*addb(bitmap, off/8) >> (off % 8)) & 1
  1893  			}
  1894  		}
  1895  		return
  1896  	}
  1897  
  1898  	// otherwise, not something the GC knows about.
  1899  	// possibly read-only data, like malloc(0).
  1900  	// must not have pointers
  1901  	return
  1902  }