github.com/zach-klippenstein/go@v0.0.0-20150108044943-fcfbeb3adf58/src/runtime/mgc.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // TODO(rsc): The code having to do with the heap bitmap needs very serious cleanup.
     6  // It has gotten completely out of control.
     7  
     8  // Garbage collector (GC).
     9  //
    10  // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
    11  // GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
    12  // non-generational and non-compacting. Allocation is done using size segregated per P allocation
    13  // areas to minimize fragmentation while eliminating locks in the common case.
    14  //
    15  // The algorithm decomposes into several steps.
    16  // This is a high level description of the algorithm being used. For an overview of GC a good
    17  // place to start is Richard Jones' gchandbook.org.
    18  //
    19  // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
    20  // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
    21  // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
    22  // 966-975.
    23  // For journal quality proofs that these steps are complete, correct, and terminate see
    24  // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
    25  // Concurrency and Computation: Practice and Experience 15(3-5), 2003.
    26  //
    27  //  0. Set phase = GCscan from GCoff.
    28  //  1. Wait for all P's to acknowledge phase change.
    29  //         At this point all goroutines have passed through a GC safepoint and
    30  //         know we are in the GCscan phase.
    31  //  2. GC scans all goroutine stacks, mark and enqueues all encountered pointers
    32  //       (marking avoids most duplicate enqueuing but races may produce benign duplication).
    33  //       Preempted goroutines are scanned before P schedules next goroutine.
    34  //  3. Set phase = GCmark.
    35  //  4. Wait for all P's to acknowledge phase change.
    36  //  5. Now write barrier marks and enqueues black, grey, or white to white pointers.
    37  //       Malloc still allocates white (non-marked) objects.
    38  //  6. Meanwhile GC transitively walks the heap marking reachable objects.
    39  //  7. When GC finishes marking heap, it preempts P's one-by-one and
    40  //       retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine
    41  //       currently scheduled on the P).
    42  //  8. Once the GC has exhausted all available marking work it sets phase = marktermination.
    43  //  9. Wait for all P's to acknowledge phase change.
    44  // 10. Malloc now allocates black objects, so number of unmarked reachable objects
    45  //        monotonically decreases.
    46  // 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet
    47  //        reachable objects.
    48  // 12. When GC completes a full cycle over P's and discovers no new grey
    49  //         objects, (which means all reachable objects are marked) set phase = GCsweep.
    50  // 13. Wait for all P's to acknowledge phase change.
    51  // 14. Now malloc allocates white (but sweeps spans before use).
    52  //         Write barrier becomes nop.
    53  // 15. GC does background sweeping, see description below.
    54  // 16. When sweeping is complete set phase to GCoff.
    55  // 17. When sufficient allocation has taken place replay the sequence starting at 0 above,
    56  //         see discussion of GC rate below.
    57  
    58  // Changing phases.
    59  // Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase.
    60  // All phase action must be benign in the presence of a change.
    61  // Starting with GCoff
    62  // GCoff to GCscan
    63  //     GSscan scans stacks and globals greying them and never marks an object black.
    64  //     Once all the P's are aware of the new phase they will scan gs on preemption.
    65  //     This means that the scanning of preempted gs can't start until all the Ps
    66  //     have acknowledged.
    67  // GCscan to GCmark
    68  //     GCMark turns on the write barrier which also only greys objects. No scanning
    69  //     of objects (making them black) can happen until all the Ps have acknowledged
    70  //     the phase change.
    71  // GCmark to GCmarktermination
    72  //     The only change here is that we start allocating black so the Ps must acknowledge
    73  //     the change before we begin the termination algorithm
    74  // GCmarktermination to GSsweep
    75  //     Object currently on the freelist must be marked black for this to work.
    76  //     Are things on the free lists black or white? How does the sweep phase work?
    77  
    78  // Concurrent sweep.
    79  // The sweep phase proceeds concurrently with normal program execution.
    80  // The heap is swept span-by-span both lazily (when a goroutine needs another span)
    81  // and concurrently in a background goroutine (this helps programs that are not CPU bound).
    82  // However, at the end of the stop-the-world GC phase we don't know the size of the live heap,
    83  // and so next_gc calculation is tricky and happens as follows.
    84  // At the end of the stop-the-world phase next_gc is conservatively set based on total
    85  // heap size; all spans are marked as "needs sweeping".
    86  // Whenever a span is swept, next_gc is decremented by GOGC*newly_freed_memory.
    87  // The background sweeper goroutine simply sweeps spans one-by-one bringing next_gc
    88  // closer to the target value. However, this is not enough to avoid over-allocating memory.
    89  // Consider that a goroutine wants to allocate a new span for a large object and
    90  // there are no free swept spans, but there are small-object unswept spans.
    91  // If the goroutine naively allocates a new span, it can surpass the yet-unknown
    92  // target next_gc value. In order to prevent such cases (1) when a goroutine needs
    93  // to allocate a new small-object span, it sweeps small-object spans for the same
    94  // object size until it frees at least one object; (2) when a goroutine needs to
    95  // allocate large-object span from heap, it sweeps spans until it frees at least
    96  // that many pages into heap. Together these two measures ensure that we don't surpass
    97  // target next_gc value by a large margin. There is an exception: if a goroutine sweeps
    98  // and frees two nonadjacent one-page spans to the heap, it will allocate a new two-page span,
    99  // but there can still be other one-page unswept spans which could be combined into a
   100  // two-page span.
   101  // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
   102  // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
   103  // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
   104  // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
   105  // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
   106  // The finalizer goroutine is kicked off only when all spans are swept.
   107  // When the next GC starts, it sweeps all not-yet-swept spans (if any).
   108  
   109  // GC rate.
   110  // Next GC is after we've allocated an extra amount of memory proportional to
   111  // the amount already in use. The proportion is controlled by GOGC environment variable
   112  // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
   113  // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
   114  // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
   115  // (and also the amount of extra memory used).
   116  
   117  package runtime
   118  
   119  import "unsafe"
   120  
   121  const (
   122  	_DebugGC         = 0
   123  	_DebugGCPtrs     = false // if true, print trace of every pointer load during GC
   124  	_ConcurrentSweep = true
   125  
   126  	_WorkbufSize     = 4 * 1024
   127  	_FinBlockSize    = 4 * 1024
   128  	_RootData        = 0
   129  	_RootBss         = 1
   130  	_RootFinalizers  = 2
   131  	_RootSpans       = 3
   132  	_RootFlushCaches = 4
   133  	_RootCount       = 5
   134  )
   135  
   136  // ptrmask for an allocation containing a single pointer.
   137  var oneptr = [...]uint8{bitsPointer}
   138  
   139  // Initialized from $GOGC.  GOGC=off means no GC.
   140  var gcpercent int32
   141  
   142  // Holding worldsema grants an M the right to try to stop the world.
   143  // The procedure is:
   144  //
   145  //	semacquire(&worldsema);
   146  //	m.gcing = 1;
   147  //	stoptheworld();
   148  //
   149  //	... do stuff ...
   150  //
   151  //	m.gcing = 0;
   152  //	semrelease(&worldsema);
   153  //	starttheworld();
   154  //
   155  var worldsema uint32 = 1
   156  
   157  // It is a bug if bits does not have bitBoundary set but
   158  // there are still some cases where this happens related
   159  // to stack spans.
   160  type markbits struct {
   161  	bitp  *byte   // pointer to the byte holding xbits
   162  	shift uintptr // bits xbits needs to be shifted to get bits
   163  	xbits byte    // byte holding all the bits from *bitp
   164  	bits  byte    // mark and boundary bits relevant to corresponding slot.
   165  	tbits byte    // pointer||scalar bits relevant to corresponding slot.
   166  }
   167  
   168  type workbuf struct {
   169  	node lfnode // must be first
   170  	nobj uintptr
   171  	obj  [(_WorkbufSize - unsafe.Sizeof(lfnode{}) - ptrSize) / ptrSize]uintptr
   172  }
   173  
   174  var data, edata, bss, ebss, gcdata, gcbss struct{}
   175  
   176  var finlock mutex  // protects the following variables
   177  var fing *g        // goroutine that runs finalizers
   178  var finq *finblock // list of finalizers that are to be executed
   179  var finc *finblock // cache of free blocks
   180  var finptrmask [_FinBlockSize / ptrSize / pointersPerByte]byte
   181  var fingwait bool
   182  var fingwake bool
   183  var allfin *finblock // list of all blocks
   184  
   185  var gcdatamask bitvector
   186  var gcbssmask bitvector
   187  
   188  var gclock mutex
   189  
   190  var badblock [1024]uintptr
   191  var nbadblock int32
   192  
   193  type workdata struct {
   194  	full    uint64                // lock-free list of full blocks
   195  	empty   uint64                // lock-free list of empty blocks
   196  	partial uint64                // lock-free list of partially filled blocks
   197  	pad0    [_CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
   198  	nproc   uint32
   199  	tstart  int64
   200  	nwait   uint32
   201  	ndone   uint32
   202  	alldone note
   203  	markfor *parfor
   204  
   205  	// Copy of mheap.allspans for marker or sweeper.
   206  	spans []*mspan
   207  }
   208  
   209  var work workdata
   210  
   211  //go:linkname weak_cgo_allocate go.weak.runtime._cgo_allocate_internal
   212  var weak_cgo_allocate byte
   213  
   214  // Is _cgo_allocate linked into the binary?
   215  //go:nowritebarrier
   216  func have_cgo_allocate() bool {
   217  	return &weak_cgo_allocate != nil
   218  }
   219  
   220  // To help debug the concurrent GC we remark with the world
   221  // stopped ensuring that any object encountered has their normal
   222  // mark bit set. To do this we use an orthogonal bit
   223  // pattern to indicate the object is marked. The following pattern
   224  // uses the upper two bits in the object's bounday nibble.
   225  // 01: scalar  not marked
   226  // 10: pointer not marked
   227  // 11: pointer     marked
   228  // 00: scalar      marked
   229  // Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
   230  // The higher bit is 1 for pointers and 0 for scalars, whether the object
   231  // is marked or not.
   232  // The first nibble no longer holds the bitsDead pattern indicating that the
   233  // there are no more pointers in the object. This information is held
   234  // in the second nibble.
   235  
   236  // When marking an object if the bool checkmark is true one uses the above
   237  // encoding, otherwise one uses the bitMarked bit in the lower two bits
   238  // of the nibble.
   239  var (
   240  	checkmark         = false
   241  	gccheckmarkenable = true
   242  )
   243  
   244  // inheap reports whether b is a pointer into a (potentially dead) heap object.
   245  // It returns false for pointers into stack spans.
   246  //go:nowritebarrier
   247  func inheap(b uintptr) bool {
   248  	if b == 0 || b < mheap_.arena_start || b >= mheap_.arena_used {
   249  		return false
   250  	}
   251  	// Not a beginning of a block, consult span table to find the block beginning.
   252  	k := b >> _PageShift
   253  	x := k
   254  	x -= mheap_.arena_start >> _PageShift
   255  	s := h_spans[x]
   256  	if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
   257  		return false
   258  	}
   259  	return true
   260  }
   261  
   262  // Given an address in the heap return the relevant byte from the gcmap. This routine
   263  // can be used on addresses to the start of an object or to the interior of the an object.
   264  //go:nowritebarrier
   265  func slottombits(obj uintptr, mbits *markbits) {
   266  	off := (obj&^(ptrSize-1) - mheap_.arena_start) / ptrSize
   267  	*(*uintptr)(unsafe.Pointer(&mbits.bitp)) = mheap_.arena_start - off/wordsPerBitmapByte - 1
   268  	mbits.shift = off % wordsPerBitmapByte * gcBits
   269  	mbits.xbits = *mbits.bitp
   270  	mbits.bits = (mbits.xbits >> mbits.shift) & bitMask
   271  	mbits.tbits = ((mbits.xbits >> mbits.shift) & bitPtrMask) >> 2
   272  }
   273  
   274  // b is a pointer into the heap.
   275  // Find the start of the object refered to by b.
   276  // Set mbits to the associated bits from the bit map.
   277  // If b is not a valid heap object return nil and
   278  // undefined values in mbits.
   279  //go:nowritebarrier
   280  func objectstart(b uintptr, mbits *markbits) uintptr {
   281  	obj := b &^ (ptrSize - 1)
   282  	for {
   283  		slottombits(obj, mbits)
   284  		if mbits.bits&bitBoundary == bitBoundary {
   285  			break
   286  		}
   287  
   288  		// Not a beginning of a block, consult span table to find the block beginning.
   289  		k := b >> _PageShift
   290  		x := k
   291  		x -= mheap_.arena_start >> _PageShift
   292  		s := h_spans[x]
   293  		if s == nil || pageID(k) < s.start || b >= s.limit || s.state != mSpanInUse {
   294  			if s != nil && s.state == _MSpanStack {
   295  				return 0 // This is legit.
   296  			}
   297  
   298  			// The following ensures that we are rigorous about what data
   299  			// structures hold valid pointers
   300  			if false {
   301  				// Still happens sometimes. We don't know why.
   302  				printlock()
   303  				print("runtime:objectstart Span weird: obj=", hex(obj), " k=", hex(k))
   304  				if s == nil {
   305  					print(" s=nil\n")
   306  				} else {
   307  					print(" s.start=", hex(s.start<<_PageShift), " s.limit=", hex(s.limit), " s.state=", s.state, "\n")
   308  				}
   309  				printunlock()
   310  				throw("objectstart: bad pointer in unexpected span")
   311  			}
   312  			return 0
   313  		}
   314  
   315  		p := uintptr(s.start) << _PageShift
   316  		if s.sizeclass != 0 {
   317  			size := s.elemsize
   318  			idx := (obj - p) / size
   319  			p = p + idx*size
   320  		}
   321  		if p == obj {
   322  			print("runtime: failed to find block beginning for ", hex(p), " s=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), "\n")
   323  			throw("failed to find block beginning")
   324  		}
   325  		obj = p
   326  	}
   327  
   328  	// if size(obj.firstfield) < PtrSize, the &obj.secondfield could map to the boundary bit
   329  	// Clear any low bits to get to the start of the object.
   330  	// greyobject depends on this.
   331  	return obj
   332  }
   333  
   334  // Slow for now as we serialize this, since this is on a debug path
   335  // speed is not critical at this point.
   336  var andlock mutex
   337  
   338  //go:nowritebarrier
   339  func atomicand8(src *byte, val byte) {
   340  	lock(&andlock)
   341  	*src &= val
   342  	unlock(&andlock)
   343  }
   344  
   345  // Mark using the checkmark scheme.
   346  //go:nowritebarrier
   347  func docheckmark(mbits *markbits) {
   348  	// xor 01 moves 01(scalar unmarked) to 00(scalar marked)
   349  	// and 10(pointer unmarked) to 11(pointer marked)
   350  	if mbits.tbits == _BitsScalar {
   351  		atomicand8(mbits.bitp, ^byte(_BitsCheckMarkXor<<mbits.shift<<2))
   352  	} else if mbits.tbits == _BitsPointer {
   353  		atomicor8(mbits.bitp, byte(_BitsCheckMarkXor<<mbits.shift<<2))
   354  	}
   355  
   356  	// reload bits for ischeckmarked
   357  	mbits.xbits = *mbits.bitp
   358  	mbits.bits = (mbits.xbits >> mbits.shift) & bitMask
   359  	mbits.tbits = ((mbits.xbits >> mbits.shift) & bitPtrMask) >> 2
   360  }
   361  
   362  // In the default scheme does mbits refer to a marked object.
   363  //go:nowritebarrier
   364  func ismarked(mbits *markbits) bool {
   365  	if mbits.bits&bitBoundary != bitBoundary {
   366  		throw("ismarked: bits should have boundary bit set")
   367  	}
   368  	return mbits.bits&bitMarked == bitMarked
   369  }
   370  
   371  // In the checkmark scheme does mbits refer to a marked object.
   372  //go:nowritebarrier
   373  func ischeckmarked(mbits *markbits) bool {
   374  	if mbits.bits&bitBoundary != bitBoundary {
   375  		throw("ischeckmarked: bits should have boundary bit set")
   376  	}
   377  	return mbits.tbits == _BitsScalarMarked || mbits.tbits == _BitsPointerMarked
   378  }
   379  
   380  // When in GCmarkterminate phase we allocate black.
   381  //go:nowritebarrier
   382  func gcmarknewobject_m(obj uintptr) {
   383  	if gcphase != _GCmarktermination {
   384  		throw("marking new object while not in mark termination phase")
   385  	}
   386  	if checkmark { // The world should be stopped so this should not happen.
   387  		throw("gcmarknewobject called while doing checkmark")
   388  	}
   389  
   390  	var mbits markbits
   391  	slottombits(obj, &mbits)
   392  	if mbits.bits&bitMarked != 0 {
   393  		return
   394  	}
   395  
   396  	// Each byte of GC bitmap holds info for two words.
   397  	// If the current object is larger than two words, or if the object is one word
   398  	// but the object it shares the byte with is already marked,
   399  	// then all the possible concurrent updates are trying to set the same bit,
   400  	// so we can use a non-atomic update.
   401  	if mbits.xbits&(bitMask|(bitMask<<gcBits)) != bitBoundary|bitBoundary<<gcBits || work.nproc == 1 {
   402  		*mbits.bitp = mbits.xbits | bitMarked<<mbits.shift
   403  	} else {
   404  		atomicor8(mbits.bitp, bitMarked<<mbits.shift)
   405  	}
   406  }
   407  
   408  // obj is the start of an object with mark mbits.
   409  // If it isn't already marked, mark it and enqueue into workbuf.
   410  // Return possibly new workbuf to use.
   411  // base and off are for debugging only and could be removed.
   412  //go:nowritebarrier
   413  func greyobject(obj uintptr, base, off uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
   414  	// obj should be start of allocation, and so must be at least pointer-aligned.
   415  	if obj&(ptrSize-1) != 0 {
   416  		throw("greyobject: obj not pointer-aligned")
   417  	}
   418  
   419  	if checkmark {
   420  		if !ismarked(mbits) {
   421  			print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), ", mbits->bits=", hex(mbits.bits), " *mbits->bitp=", hex(*mbits.bitp), "\n")
   422  			print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
   423  
   424  			k := obj >> _PageShift
   425  			x := k
   426  			x -= mheap_.arena_start >> _PageShift
   427  			s := h_spans[x]
   428  			printlock()
   429  			print("runtime:greyobject Span: obj=", hex(obj), " k=", hex(k))
   430  			if s == nil {
   431  				print(" s=nil\n")
   432  			} else {
   433  				print(" s.start=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, "\n")
   434  				// NOTE(rsc): This code is using s.sizeclass as an approximation of the
   435  				// number of pointer-sized words in an object. Perhaps not what was intended.
   436  				for i := 0; i < int(s.sizeclass); i++ {
   437  					print(" *(obj+", i*ptrSize, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + uintptr(i)*ptrSize))), "\n")
   438  				}
   439  			}
   440  			throw("checkmark found unmarked object")
   441  		}
   442  		if ischeckmarked(mbits) {
   443  			return wbuf
   444  		}
   445  		docheckmark(mbits)
   446  		if !ischeckmarked(mbits) {
   447  			print("mbits xbits=", hex(mbits.xbits), " bits=", hex(mbits.bits), " tbits=", hex(mbits.tbits), " shift=", mbits.shift, "\n")
   448  			throw("docheckmark and ischeckmarked disagree")
   449  		}
   450  	} else {
   451  		// If marked we have nothing to do.
   452  		if mbits.bits&bitMarked != 0 {
   453  			return wbuf
   454  		}
   455  
   456  		// Each byte of GC bitmap holds info for two words.
   457  		// If the current object is larger than two words, or if the object is one word
   458  		// but the object it shares the byte with is already marked,
   459  		// then all the possible concurrent updates are trying to set the same bit,
   460  		// so we can use a non-atomic update.
   461  		if mbits.xbits&(bitMask|bitMask<<gcBits) != bitBoundary|bitBoundary<<gcBits || work.nproc == 1 {
   462  			*mbits.bitp = mbits.xbits | bitMarked<<mbits.shift
   463  		} else {
   464  			atomicor8(mbits.bitp, bitMarked<<mbits.shift)
   465  		}
   466  	}
   467  
   468  	if !checkmark && (mbits.xbits>>(mbits.shift+2))&_BitsMask == _BitsDead {
   469  		return wbuf // noscan object
   470  	}
   471  
   472  	// Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
   473  	// seems like a nice optimization that can be added back in.
   474  	// There needs to be time between the PREFETCH and the use.
   475  	// Previously we put the obj in an 8 element buffer that is drained at a rate
   476  	// to give the PREFETCH time to do its work.
   477  	// Use of PREFETCHNTA might be more appropriate than PREFETCH
   478  
   479  	// If workbuf is full, obtain an empty one.
   480  	if wbuf.nobj >= uintptr(len(wbuf.obj)) {
   481  		wbuf = getempty(wbuf)
   482  	}
   483  
   484  	wbuf.obj[wbuf.nobj] = obj
   485  	wbuf.nobj++
   486  	return wbuf
   487  }
   488  
   489  // Scan the object b of size n, adding pointers to wbuf.
   490  // Return possibly new wbuf to use.
   491  // If ptrmask != nil, it specifies where pointers are in b.
   492  // If ptrmask == nil, the GC bitmap should be consulted.
   493  // In this case, n may be an overestimate of the size; the GC bitmap
   494  // must also be used to make sure the scan stops at the end of b.
   495  //go:nowritebarrier
   496  func scanobject(b, n uintptr, ptrmask *uint8, wbuf *workbuf) *workbuf {
   497  	arena_start := mheap_.arena_start
   498  	arena_used := mheap_.arena_used
   499  
   500  	// Find bits of the beginning of the object.
   501  	var ptrbitp unsafe.Pointer
   502  	var mbits markbits
   503  	if ptrmask == nil {
   504  		b = objectstart(b, &mbits)
   505  		if b == 0 {
   506  			return wbuf
   507  		}
   508  		ptrbitp = unsafe.Pointer(mbits.bitp)
   509  	}
   510  	for i := uintptr(0); i < n; i += ptrSize {
   511  		// Find bits for this word.
   512  		var bits uintptr
   513  		if ptrmask != nil {
   514  			// dense mask (stack or data)
   515  			bits = (uintptr(*(*byte)(add(unsafe.Pointer(ptrmask), (i/ptrSize)/4))) >> (((i / ptrSize) % 4) * bitsPerPointer)) & bitsMask
   516  		} else {
   517  			// Check if we have reached end of span.
   518  			// n is an overestimate of the size of the object.
   519  			if (b+i)%_PageSize == 0 && h_spans[(b-arena_start)>>_PageShift] != h_spans[(b+i-arena_start)>>_PageShift] {
   520  				break
   521  			}
   522  
   523  			// Consult GC bitmap.
   524  			bits = uintptr(*(*byte)(ptrbitp))
   525  			if wordsPerBitmapByte != 2 {
   526  				throw("alg doesn't work for wordsPerBitmapByte != 2")
   527  			}
   528  			j := (uintptr(b) + i) / ptrSize & 1 // j indicates upper nibble or lower nibble
   529  			bits >>= gcBits * j
   530  			if i == 0 {
   531  				bits &^= bitBoundary
   532  			}
   533  			ptrbitp = add(ptrbitp, -j)
   534  
   535  			if bits&bitBoundary != 0 && i != 0 {
   536  				break // reached beginning of the next object
   537  			}
   538  			bits = (bits & bitPtrMask) >> 2 // bits refer to the type bits.
   539  
   540  			if i != 0 && bits == bitsDead { // BitsDead in first nibble not valid during checkmark
   541  				break // reached no-scan part of the object
   542  			}
   543  		}
   544  
   545  		if bits <= _BitsScalar { // _BitsScalar, _BitsDead, _BitsScalarMarked
   546  			continue
   547  		}
   548  
   549  		if bits&_BitsPointer != _BitsPointer {
   550  			print("gc checkmark=", checkmark, " b=", hex(b), " ptrmask=", ptrmask, " mbits.bitp=", mbits.bitp, " mbits.xbits=", hex(mbits.xbits), " bits=", hex(bits), "\n")
   551  			throw("unexpected garbage collection bits")
   552  		}
   553  
   554  		obj := *(*uintptr)(unsafe.Pointer(b + i))
   555  
   556  		// At this point we have extracted the next potential pointer.
   557  		// Check if it points into heap.
   558  		if obj == 0 || obj < arena_start || obj >= arena_used {
   559  			continue
   560  		}
   561  
   562  		if mheap_.shadow_enabled && debug.wbshadow >= 2 && gccheckmarkenable && checkmark {
   563  			checkwbshadow((*uintptr)(unsafe.Pointer(b + i)))
   564  		}
   565  
   566  		// Mark the object. return some important bits.
   567  		// We we combine the following two rotines we don't have to pass mbits or obj around.
   568  		var mbits markbits
   569  		obj = objectstart(obj, &mbits)
   570  		if obj == 0 {
   571  			continue
   572  		}
   573  		wbuf = greyobject(obj, b, i, &mbits, wbuf)
   574  	}
   575  	return wbuf
   576  }
   577  
   578  // scanblock starts by scanning b as scanobject would.
   579  // If the gcphase is GCscan, that's all scanblock does.
   580  // Otherwise it traverses some fraction of the pointers it found in b, recursively.
   581  // As a special case, scanblock(nil, 0, nil) means to scan previously queued work,
   582  // stopping only when no work is left in the system.
   583  //go:nowritebarrier
   584  func scanblock(b0, n0 uintptr, ptrmask *uint8) {
   585  	// Use local copies of original parameters, so that a stack trace
   586  	// due to one of the throws below shows the original block
   587  	// base and extent.
   588  	b := b0
   589  	n := n0
   590  	wbuf := getpartialorempty()
   591  	if b != 0 {
   592  		wbuf = scanobject(b, n, ptrmask, wbuf)
   593  		if gcphase == _GCscan {
   594  			if inheap(b) && ptrmask == nil {
   595  				// b is in heap, we are in GCscan so there should be a ptrmask.
   596  				throw("scanblock: In GCscan phase and inheap is true.")
   597  			}
   598  			// GCscan only goes one level deep since mark wb not turned on.
   599  			putpartial(wbuf)
   600  			return
   601  		}
   602  	}
   603  	if gcphase == _GCscan {
   604  		throw("scanblock: In GCscan phase but no b passed in.")
   605  	}
   606  
   607  	keepworking := b == 0
   608  
   609  	if gcphase != _GCmark && gcphase != _GCmarktermination {
   610  		println("gcphase", gcphase)
   611  		throw("scanblock phase")
   612  	}
   613  
   614  	// ptrmask can have 2 possible values:
   615  	// 1. nil - obtain pointer mask from GC bitmap.
   616  	// 2. pointer to a compact mask (for stacks and data).
   617  	for {
   618  		if wbuf.nobj == 0 {
   619  			if !keepworking {
   620  				putempty(wbuf)
   621  				return
   622  			}
   623  			// Refill workbuf from global queue.
   624  			wbuf = getfull(wbuf)
   625  			if wbuf == nil { // nil means out of work barrier reached
   626  				return
   627  			}
   628  
   629  			if wbuf.nobj <= 0 {
   630  				throw("runtime:scanblock getfull returns empty buffer")
   631  			}
   632  		}
   633  
   634  		// If another proc wants a pointer, give it some.
   635  		if work.nwait > 0 && wbuf.nobj > 4 && work.full == 0 {
   636  			wbuf = handoff(wbuf)
   637  		}
   638  
   639  		// This might be a good place to add prefetch code...
   640  		// if(wbuf->nobj > 4) {
   641  		//         PREFETCH(wbuf->obj[wbuf->nobj - 3];
   642  		//  }
   643  		wbuf.nobj--
   644  		b = wbuf.obj[wbuf.nobj]
   645  		wbuf = scanobject(b, mheap_.arena_used-b, nil, wbuf)
   646  	}
   647  }
   648  
   649  //go:nowritebarrier
   650  func markroot(desc *parfor, i uint32) {
   651  	// Note: if you add a case here, please also update heapdump.c:dumproots.
   652  	switch i {
   653  	case _RootData:
   654  		scanblock(uintptr(unsafe.Pointer(&data)), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)), gcdatamask.bytedata)
   655  
   656  	case _RootBss:
   657  		scanblock(uintptr(unsafe.Pointer(&bss)), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)), gcbssmask.bytedata)
   658  
   659  	case _RootFinalizers:
   660  		for fb := allfin; fb != nil; fb = fb.alllink {
   661  			scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), uintptr(fb.cnt)*unsafe.Sizeof(fb.fin[0]), &finptrmask[0])
   662  		}
   663  
   664  	case _RootSpans:
   665  		// mark MSpan.specials
   666  		sg := mheap_.sweepgen
   667  		for spanidx := uint32(0); spanidx < uint32(len(work.spans)); spanidx++ {
   668  			s := work.spans[spanidx]
   669  			if s.state != mSpanInUse {
   670  				continue
   671  			}
   672  			if !checkmark && s.sweepgen != sg {
   673  				// sweepgen was updated (+2) during non-checkmark GC pass
   674  				print("sweep ", s.sweepgen, " ", sg, "\n")
   675  				throw("gc: unswept span")
   676  			}
   677  			for sp := s.specials; sp != nil; sp = sp.next {
   678  				if sp.kind != _KindSpecialFinalizer {
   679  					continue
   680  				}
   681  				// don't mark finalized object, but scan it so we
   682  				// retain everything it points to.
   683  				spf := (*specialfinalizer)(unsafe.Pointer(sp))
   684  				// A finalizer can be set for an inner byte of an object, find object beginning.
   685  				p := uintptr(s.start<<_PageShift) + uintptr(spf.special.offset)/s.elemsize*s.elemsize
   686  				if gcphase != _GCscan {
   687  					scanblock(p, s.elemsize, nil) // scanned during mark phase
   688  				}
   689  				scanblock(uintptr(unsafe.Pointer(&spf.fn)), ptrSize, &oneptr[0])
   690  			}
   691  		}
   692  
   693  	case _RootFlushCaches:
   694  		if gcphase != _GCscan { // Do not flush mcaches during GCscan phase.
   695  			flushallmcaches()
   696  		}
   697  
   698  	default:
   699  		// the rest is scanning goroutine stacks
   700  		if uintptr(i-_RootCount) >= allglen {
   701  			throw("markroot: bad index")
   702  		}
   703  		gp := allgs[i-_RootCount]
   704  
   705  		// remember when we've first observed the G blocked
   706  		// needed only to output in traceback
   707  		status := readgstatus(gp) // We are not in a scan state
   708  		if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
   709  			gp.waitsince = work.tstart
   710  		}
   711  
   712  		// Shrink a stack if not much of it is being used but not in the scan phase.
   713  		if gcphase != _GCscan { // Do not shrink during GCscan phase.
   714  			shrinkstack(gp)
   715  		}
   716  		if readgstatus(gp) == _Gdead {
   717  			gp.gcworkdone = true
   718  		} else {
   719  			gp.gcworkdone = false
   720  		}
   721  		restart := stopg(gp)
   722  
   723  		// goroutine will scan its own stack when it stops running.
   724  		// Wait until it has.
   725  		for readgstatus(gp) == _Grunning && !gp.gcworkdone {
   726  		}
   727  
   728  		// scanstack(gp) is done as part of gcphasework
   729  		// But to make sure we finished we need to make sure that
   730  		// the stack traps have all responded so drop into
   731  		// this while loop until they respond.
   732  		for !gp.gcworkdone {
   733  			status = readgstatus(gp)
   734  			if status == _Gdead {
   735  				gp.gcworkdone = true // scan is a noop
   736  				break
   737  			}
   738  			if status == _Gwaiting || status == _Grunnable {
   739  				restart = stopg(gp)
   740  			}
   741  		}
   742  		if restart {
   743  			restartg(gp)
   744  		}
   745  	}
   746  }
   747  
   748  // Get an empty work buffer off the work.empty list,
   749  // allocating new buffers as needed.
   750  //go:nowritebarrier
   751  func getempty(b *workbuf) *workbuf {
   752  	if b != nil {
   753  		putfull(b)
   754  		b = nil
   755  	}
   756  	if work.empty != 0 {
   757  		b = (*workbuf)(lfstackpop(&work.empty))
   758  	}
   759  	if b != nil && b.nobj != 0 {
   760  		_g_ := getg()
   761  		print("m", _g_.m.id, ": getempty: popped b=", b, " with non-zero b.nobj=", b.nobj, "\n")
   762  		throw("getempty: workbuffer not empty, b->nobj not 0")
   763  	}
   764  	if b == nil {
   765  		b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
   766  		b.nobj = 0
   767  	}
   768  	return b
   769  }
   770  
   771  //go:nowritebarrier
   772  func putempty(b *workbuf) {
   773  	if b.nobj != 0 {
   774  		throw("putempty: b->nobj not 0")
   775  	}
   776  	lfstackpush(&work.empty, &b.node)
   777  }
   778  
   779  //go:nowritebarrier
   780  func putfull(b *workbuf) {
   781  	if b.nobj <= 0 {
   782  		throw("putfull: b->nobj <= 0")
   783  	}
   784  	lfstackpush(&work.full, &b.node)
   785  }
   786  
   787  // Get an partially empty work buffer
   788  // if none are available get an empty one.
   789  //go:nowritebarrier
   790  func getpartialorempty() *workbuf {
   791  	b := (*workbuf)(lfstackpop(&work.partial))
   792  	if b == nil {
   793  		b = getempty(nil)
   794  	}
   795  	return b
   796  }
   797  
   798  //go:nowritebarrier
   799  func putpartial(b *workbuf) {
   800  	if b.nobj == 0 {
   801  		lfstackpush(&work.empty, &b.node)
   802  	} else if b.nobj < uintptr(len(b.obj)) {
   803  		lfstackpush(&work.partial, &b.node)
   804  	} else if b.nobj == uintptr(len(b.obj)) {
   805  		lfstackpush(&work.full, &b.node)
   806  	} else {
   807  		print("b=", b, " b.nobj=", b.nobj, " len(b.obj)=", len(b.obj), "\n")
   808  		throw("putpartial: bad Workbuf b.nobj")
   809  	}
   810  }
   811  
   812  // Get a full work buffer off the work.full or a partially
   813  // filled one off the work.partial list. If nothing is available
   814  // wait until all the other gc helpers have finished and then
   815  // return nil.
   816  // getfull acts as a barrier for work.nproc helpers. As long as one
   817  // gchelper is actively marking objects it
   818  // may create a workbuffer that the other helpers can work on.
   819  // The for loop either exits when a work buffer is found
   820  // or when _all_ of the work.nproc GC helpers are in the loop
   821  // looking for work and thus not capable of creating new work.
   822  // This is in fact the termination condition for the STW mark
   823  // phase.
   824  //go:nowritebarrier
   825  func getfull(b *workbuf) *workbuf {
   826  	if b != nil {
   827  		putempty(b)
   828  	}
   829  
   830  	b = (*workbuf)(lfstackpop(&work.full))
   831  	if b == nil {
   832  		b = (*workbuf)(lfstackpop(&work.partial))
   833  	}
   834  	if b != nil || work.nproc == 1 {
   835  		return b
   836  	}
   837  
   838  	xadd(&work.nwait, +1)
   839  	for i := 0; ; i++ {
   840  		if work.full != 0 {
   841  			xadd(&work.nwait, -1)
   842  			b = (*workbuf)(lfstackpop(&work.full))
   843  			if b == nil {
   844  				b = (*workbuf)(lfstackpop(&work.partial))
   845  			}
   846  			if b != nil {
   847  				return b
   848  			}
   849  			xadd(&work.nwait, +1)
   850  		}
   851  		if work.nwait == work.nproc {
   852  			return nil
   853  		}
   854  		_g_ := getg()
   855  		if i < 10 {
   856  			_g_.m.gcstats.nprocyield++
   857  			procyield(20)
   858  		} else if i < 20 {
   859  			_g_.m.gcstats.nosyield++
   860  			osyield()
   861  		} else {
   862  			_g_.m.gcstats.nsleep++
   863  			usleep(100)
   864  		}
   865  	}
   866  }
   867  
   868  //go:nowritebarrier
   869  func handoff(b *workbuf) *workbuf {
   870  	// Make new buffer with half of b's pointers.
   871  	b1 := getempty(nil)
   872  	n := b.nobj / 2
   873  	b.nobj -= n
   874  	b1.nobj = n
   875  	memmove(unsafe.Pointer(&b1.obj[0]), unsafe.Pointer(&b.obj[b.nobj]), n*unsafe.Sizeof(b1.obj[0]))
   876  	_g_ := getg()
   877  	_g_.m.gcstats.nhandoff++
   878  	_g_.m.gcstats.nhandoffcnt += uint64(n)
   879  
   880  	// Put b on full list - let first half of b get stolen.
   881  	lfstackpush(&work.full, &b.node)
   882  	return b1
   883  }
   884  
   885  //go:nowritebarrier
   886  func stackmapdata(stkmap *stackmap, n int32) bitvector {
   887  	if n < 0 || n >= stkmap.n {
   888  		throw("stackmapdata: index out of range")
   889  	}
   890  	return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+31)/32*4))))}
   891  }
   892  
   893  // Scan a stack frame: local variables and function arguments/results.
   894  //go:nowritebarrier
   895  func scanframe(frame *stkframe, unused unsafe.Pointer) bool {
   896  
   897  	f := frame.fn
   898  	targetpc := frame.continpc
   899  	if targetpc == 0 {
   900  		// Frame is dead.
   901  		return true
   902  	}
   903  	if _DebugGC > 1 {
   904  		print("scanframe ", funcname(f), "\n")
   905  	}
   906  	if targetpc != f.entry {
   907  		targetpc--
   908  	}
   909  	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
   910  	if pcdata == -1 {
   911  		// We do not have a valid pcdata value but there might be a
   912  		// stackmap for this function.  It is likely that we are looking
   913  		// at the function prologue, assume so and hope for the best.
   914  		pcdata = 0
   915  	}
   916  
   917  	// Scan local variables if stack frame has been allocated.
   918  	size := frame.varp - frame.sp
   919  	var minsize uintptr
   920  	if thechar != '6' && thechar != '8' {
   921  		minsize = ptrSize
   922  	} else {
   923  		minsize = 0
   924  	}
   925  	if size > minsize {
   926  		stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
   927  		if stkmap == nil || stkmap.n <= 0 {
   928  			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
   929  			throw("missing stackmap")
   930  		}
   931  
   932  		// Locals bitmap information, scan just the pointers in locals.
   933  		if pcdata < 0 || pcdata >= stkmap.n {
   934  			// don't know where we are
   935  			print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
   936  			throw("scanframe: bad symbol table")
   937  		}
   938  		bv := stackmapdata(stkmap, pcdata)
   939  		size = (uintptr(bv.n) * ptrSize) / bitsPerPointer
   940  		scanblock(frame.varp-size, uintptr(bv.n)/bitsPerPointer*ptrSize, bv.bytedata)
   941  	}
   942  
   943  	// Scan arguments.
   944  	if frame.arglen > 0 {
   945  		var bv bitvector
   946  		if frame.argmap != nil {
   947  			bv = *frame.argmap
   948  		} else {
   949  			stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   950  			if stkmap == nil || stkmap.n <= 0 {
   951  				print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
   952  				throw("missing stackmap")
   953  			}
   954  			if pcdata < 0 || pcdata >= stkmap.n {
   955  				// don't know where we are
   956  				print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
   957  				throw("scanframe: bad symbol table")
   958  			}
   959  			bv = stackmapdata(stkmap, pcdata)
   960  		}
   961  		scanblock(frame.argp, uintptr(bv.n)/bitsPerPointer*ptrSize, bv.bytedata)
   962  	}
   963  	return true
   964  }
   965  
   966  //go:nowritebarrier
   967  func scanstack(gp *g) {
   968  
   969  	if readgstatus(gp)&_Gscan == 0 {
   970  		print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
   971  		throw("scanstack - bad status")
   972  	}
   973  
   974  	switch readgstatus(gp) &^ _Gscan {
   975  	default:
   976  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   977  		throw("mark - bad status")
   978  	case _Gdead:
   979  		return
   980  	case _Grunning:
   981  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   982  		throw("scanstack: goroutine not stopped")
   983  	case _Grunnable, _Gsyscall, _Gwaiting:
   984  		// ok
   985  	}
   986  
   987  	if gp == getg() {
   988  		throw("can't scan our own stack")
   989  	}
   990  	mp := gp.m
   991  	if mp != nil && mp.helpgc != 0 {
   992  		throw("can't scan gchelper stack")
   993  	}
   994  
   995  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
   996  	tracebackdefers(gp, scanframe, nil)
   997  }
   998  
   999  // If the slot is grey or black return true, if white return false.
  1000  // If the slot is not in the known heap and thus does not have a valid GC bitmap then
  1001  // it is considered grey. Globals and stacks can hold such slots.
  1002  // The slot is grey if its mark bit is set and it is enqueued to be scanned.
  1003  // The slot is black if it has already been scanned.
  1004  // It is white if it has a valid mark bit and the bit is not set.
  1005  //go:nowritebarrier
  1006  func shaded(slot uintptr) bool {
  1007  	if !inheap(slot) { // non-heap slots considered grey
  1008  		return true
  1009  	}
  1010  
  1011  	var mbits markbits
  1012  	valid := objectstart(slot, &mbits)
  1013  	if valid == 0 {
  1014  		return true
  1015  	}
  1016  
  1017  	if checkmark {
  1018  		return ischeckmarked(&mbits)
  1019  	}
  1020  
  1021  	return mbits.bits&bitMarked != 0
  1022  }
  1023  
  1024  // Shade the object if it isn't already.
  1025  // The object is not nil and known to be in the heap.
  1026  //go:nowritebarrier
  1027  func shade(b uintptr) {
  1028  	if !inheap(b) {
  1029  		throw("shade: passed an address not in the heap")
  1030  	}
  1031  
  1032  	wbuf := getpartialorempty()
  1033  	// Mark the object, return some important bits.
  1034  	// If we combine the following two rotines we don't have to pass mbits or obj around.
  1035  	var mbits markbits
  1036  	obj := objectstart(b, &mbits)
  1037  	if obj != 0 {
  1038  		wbuf = greyobject(obj, 0, 0, &mbits, wbuf) // augments the wbuf
  1039  	}
  1040  	putpartial(wbuf)
  1041  }
  1042  
  1043  // This is the Dijkstra barrier coarsened to always shade the ptr (dst) object.
  1044  // The original Dijkstra barrier only shaded ptrs being placed in black slots.
  1045  //
  1046  // Shade indicates that it has seen a white pointer by adding the referent
  1047  // to wbuf as well as marking it.
  1048  //
  1049  // slot is the destination (dst) in go code
  1050  // ptr is the value that goes into the slot (src) in the go code
  1051  //
  1052  // Dijkstra pointed out that maintaining the no black to white
  1053  // pointers means that white to white pointers not need
  1054  // to be noted by the write barrier. Furthermore if either
  1055  // white object dies before it is reached by the
  1056  // GC then the object can be collected during this GC cycle
  1057  // instead of waiting for the next cycle. Unfortunately the cost of
  1058  // ensure that the object holding the slot doesn't concurrently
  1059  // change to black without the mutator noticing seems prohibitive.
  1060  //
  1061  // Consider the following example where the mutator writes into
  1062  // a slot and then loads the slot's mark bit while the GC thread
  1063  // writes to the slot's mark bit and then as part of scanning reads
  1064  // the slot.
  1065  //
  1066  // Initially both [slot] and [slotmark] are 0 (nil)
  1067  // Mutator thread          GC thread
  1068  // st [slot], ptr          st [slotmark], 1
  1069  //
  1070  // ld r1, [slotmark]       ld r2, [slot]
  1071  //
  1072  // This is a classic example of independent reads of independent writes,
  1073  // aka IRIW. The question is if r1==r2==0 is allowed and for most HW the
  1074  // answer is yes without inserting a memory barriers between the st and the ld.
  1075  // These barriers are expensive so we have decided that we will
  1076  // always grey the ptr object regardless of the slot's color.
  1077  //go:nowritebarrier
  1078  func gcmarkwb_m(slot *uintptr, ptr uintptr) {
  1079  	switch gcphase {
  1080  	default:
  1081  		throw("gcphasework in bad gcphase")
  1082  
  1083  	case _GCoff, _GCquiesce, _GCstw, _GCsweep, _GCscan:
  1084  		// ok
  1085  
  1086  	case _GCmark, _GCmarktermination:
  1087  		if ptr != 0 && inheap(ptr) {
  1088  			shade(ptr)
  1089  		}
  1090  	}
  1091  }
  1092  
  1093  // The gp has been moved to a GC safepoint. GC phase specific
  1094  // work is done here.
  1095  //go:nowritebarrier
  1096  func gcphasework(gp *g) {
  1097  	switch gcphase {
  1098  	default:
  1099  		throw("gcphasework in bad gcphase")
  1100  	case _GCoff, _GCquiesce, _GCstw, _GCsweep:
  1101  		// No work.
  1102  	case _GCscan:
  1103  		// scan the stack, mark the objects, put pointers in work buffers
  1104  		// hanging off the P where this is being run.
  1105  		scanstack(gp)
  1106  	case _GCmark:
  1107  		// No work.
  1108  	case _GCmarktermination:
  1109  		scanstack(gp)
  1110  		// All available mark work will be emptied before returning.
  1111  	}
  1112  	gp.gcworkdone = true
  1113  }
  1114  
  1115  var finalizer1 = [...]byte{
  1116  	// Each Finalizer is 5 words, ptr ptr uintptr ptr ptr.
  1117  	// Each byte describes 4 words.
  1118  	// Need 4 Finalizers described by 5 bytes before pattern repeats:
  1119  	//	ptr ptr uintptr ptr ptr
  1120  	//	ptr ptr uintptr ptr ptr
  1121  	//	ptr ptr uintptr ptr ptr
  1122  	//	ptr ptr uintptr ptr ptr
  1123  	// aka
  1124  	//	ptr ptr uintptr ptr
  1125  	//	ptr ptr ptr uintptr
  1126  	//	ptr ptr ptr ptr
  1127  	//	uintptr ptr ptr ptr
  1128  	//	ptr uintptr ptr ptr
  1129  	// Assumptions about Finalizer layout checked below.
  1130  	bitsPointer | bitsPointer<<2 | bitsScalar<<4 | bitsPointer<<6,
  1131  	bitsPointer | bitsPointer<<2 | bitsPointer<<4 | bitsScalar<<6,
  1132  	bitsPointer | bitsPointer<<2 | bitsPointer<<4 | bitsPointer<<6,
  1133  	bitsScalar | bitsPointer<<2 | bitsPointer<<4 | bitsPointer<<6,
  1134  	bitsPointer | bitsScalar<<2 | bitsPointer<<4 | bitsPointer<<6,
  1135  }
  1136  
  1137  func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
  1138  	lock(&finlock)
  1139  	if finq == nil || finq.cnt == int32(len(finq.fin)) {
  1140  		if finc == nil {
  1141  			// Note: write barrier here, assigning to finc, but should be okay.
  1142  			finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
  1143  			finc.alllink = allfin
  1144  			allfin = finc
  1145  			if finptrmask[0] == 0 {
  1146  				// Build pointer mask for Finalizer array in block.
  1147  				// Check assumptions made in finalizer1 array above.
  1148  				if (unsafe.Sizeof(finalizer{}) != 5*ptrSize ||
  1149  					unsafe.Offsetof(finalizer{}.fn) != 0 ||
  1150  					unsafe.Offsetof(finalizer{}.arg) != ptrSize ||
  1151  					unsafe.Offsetof(finalizer{}.nret) != 2*ptrSize ||
  1152  					unsafe.Offsetof(finalizer{}.fint) != 3*ptrSize ||
  1153  					unsafe.Offsetof(finalizer{}.ot) != 4*ptrSize ||
  1154  					bitsPerPointer != 2) {
  1155  					throw("finalizer out of sync")
  1156  				}
  1157  				for i := range finptrmask {
  1158  					finptrmask[i] = finalizer1[i%len(finalizer1)]
  1159  				}
  1160  			}
  1161  		}
  1162  		block := finc
  1163  		finc = block.next
  1164  		block.next = finq
  1165  		finq = block
  1166  	}
  1167  	f := &finq.fin[finq.cnt]
  1168  	finq.cnt++
  1169  	f.fn = fn
  1170  	f.nret = nret
  1171  	f.fint = fint
  1172  	f.ot = ot
  1173  	f.arg = p
  1174  	fingwake = true
  1175  	unlock(&finlock)
  1176  }
  1177  
  1178  //go:nowritebarrier
  1179  func iterate_finq(callback func(*funcval, unsafe.Pointer, uintptr, *_type, *ptrtype)) {
  1180  	for fb := allfin; fb != nil; fb = fb.alllink {
  1181  		for i := int32(0); i < fb.cnt; i++ {
  1182  			f := &fb.fin[i]
  1183  			callback(f.fn, f.arg, f.nret, f.fint, f.ot)
  1184  		}
  1185  	}
  1186  }
  1187  
  1188  // Returns only when span s has been swept.
  1189  //go:nowritebarrier
  1190  func mSpan_EnsureSwept(s *mspan) {
  1191  	// Caller must disable preemption.
  1192  	// Otherwise when this function returns the span can become unswept again
  1193  	// (if GC is triggered on another goroutine).
  1194  	_g_ := getg()
  1195  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
  1196  		throw("MSpan_EnsureSwept: m is not locked")
  1197  	}
  1198  
  1199  	sg := mheap_.sweepgen
  1200  	if atomicload(&s.sweepgen) == sg {
  1201  		return
  1202  	}
  1203  	// The caller must be sure that the span is a MSpanInUse span.
  1204  	if cas(&s.sweepgen, sg-2, sg-1) {
  1205  		mSpan_Sweep(s, false)
  1206  		return
  1207  	}
  1208  	// unfortunate condition, and we don't have efficient means to wait
  1209  	for atomicload(&s.sweepgen) != sg {
  1210  		osyield()
  1211  	}
  1212  }
  1213  
  1214  // Sweep frees or collects finalizers for blocks not marked in the mark phase.
  1215  // It clears the mark bits in preparation for the next GC round.
  1216  // Returns true if the span was returned to heap.
  1217  // If preserve=true, don't return it to heap nor relink in MCentral lists;
  1218  // caller takes care of it.
  1219  //TODO go:nowritebarrier
  1220  func mSpan_Sweep(s *mspan, preserve bool) bool {
  1221  	if checkmark {
  1222  		throw("MSpan_Sweep: checkmark only runs in STW and after the sweep")
  1223  	}
  1224  
  1225  	// It's critical that we enter this function with preemption disabled,
  1226  	// GC must not start while we are in the middle of this function.
  1227  	_g_ := getg()
  1228  	if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
  1229  		throw("MSpan_Sweep: m is not locked")
  1230  	}
  1231  	sweepgen := mheap_.sweepgen
  1232  	if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
  1233  		print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
  1234  		throw("MSpan_Sweep: bad span state")
  1235  	}
  1236  	arena_start := mheap_.arena_start
  1237  	cl := s.sizeclass
  1238  	size := s.elemsize
  1239  	var n int32
  1240  	var npages int32
  1241  	if cl == 0 {
  1242  		n = 1
  1243  	} else {
  1244  		// Chunk full of small blocks.
  1245  		npages = class_to_allocnpages[cl]
  1246  		n = (npages << _PageShift) / int32(size)
  1247  	}
  1248  	res := false
  1249  	nfree := 0
  1250  
  1251  	var head, end gclinkptr
  1252  
  1253  	c := _g_.m.mcache
  1254  	sweepgenset := false
  1255  
  1256  	// Mark any free objects in this span so we don't collect them.
  1257  	for link := s.freelist; link.ptr() != nil; link = link.ptr().next {
  1258  		off := (uintptr(unsafe.Pointer(link)) - arena_start) / ptrSize
  1259  		bitp := arena_start - off/wordsPerBitmapByte - 1
  1260  		shift := (off % wordsPerBitmapByte) * gcBits
  1261  		*(*byte)(unsafe.Pointer(bitp)) |= bitMarked << shift
  1262  	}
  1263  
  1264  	// Unlink & free special records for any objects we're about to free.
  1265  	specialp := &s.specials
  1266  	special := *specialp
  1267  	for special != nil {
  1268  		// A finalizer can be set for an inner byte of an object, find object beginning.
  1269  		p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size
  1270  		off := (p - arena_start) / ptrSize
  1271  		bitp := arena_start - off/wordsPerBitmapByte - 1
  1272  		shift := (off % wordsPerBitmapByte) * gcBits
  1273  		bits := (*(*byte)(unsafe.Pointer(bitp)) >> shift) & bitMask
  1274  		if bits&bitMarked == 0 {
  1275  			// Find the exact byte for which the special was setup
  1276  			// (as opposed to object beginning).
  1277  			p := uintptr(s.start<<_PageShift) + uintptr(special.offset)
  1278  			// about to free object: splice out special record
  1279  			y := special
  1280  			special = special.next
  1281  			*specialp = special
  1282  			if !freespecial(y, unsafe.Pointer(p), size, false) {
  1283  				// stop freeing of object if it has a finalizer
  1284  				*(*byte)(unsafe.Pointer(bitp)) |= bitMarked << shift
  1285  			}
  1286  		} else {
  1287  			// object is still live: keep special record
  1288  			specialp = &special.next
  1289  			special = *specialp
  1290  		}
  1291  	}
  1292  
  1293  	// Sweep through n objects of given size starting at p.
  1294  	// This thread owns the span now, so it can manipulate
  1295  	// the block bitmap without atomic operations.
  1296  	p := uintptr(s.start << _PageShift)
  1297  	off := (p - arena_start) / ptrSize
  1298  	bitp := arena_start - off/wordsPerBitmapByte - 1
  1299  	shift := uint(0)
  1300  	step := size / (ptrSize * wordsPerBitmapByte)
  1301  	// Rewind to the previous quadruple as we move to the next
  1302  	// in the beginning of the loop.
  1303  	bitp += step
  1304  	if step == 0 {
  1305  		// 8-byte objects.
  1306  		bitp++
  1307  		shift = gcBits
  1308  	}
  1309  	for ; n > 0; n, p = n-1, p+size {
  1310  		bitp -= step
  1311  		if step == 0 {
  1312  			if shift != 0 {
  1313  				bitp--
  1314  			}
  1315  			shift = gcBits - shift
  1316  		}
  1317  
  1318  		xbits := *(*byte)(unsafe.Pointer(bitp))
  1319  		bits := (xbits >> shift) & bitMask
  1320  
  1321  		// Allocated and marked object, reset bits to allocated.
  1322  		if bits&bitMarked != 0 {
  1323  			*(*byte)(unsafe.Pointer(bitp)) &^= bitMarked << shift
  1324  			continue
  1325  		}
  1326  
  1327  		// At this point we know that we are looking at garbage object
  1328  		// that needs to be collected.
  1329  		if debug.allocfreetrace != 0 {
  1330  			tracefree(unsafe.Pointer(p), size)
  1331  		}
  1332  
  1333  		// Reset to allocated+noscan.
  1334  		*(*byte)(unsafe.Pointer(bitp)) = uint8(uintptr(xbits&^((bitMarked|bitsMask<<2)<<shift)) | uintptr(bitsDead)<<(shift+2))
  1335  		if cl == 0 {
  1336  			// Free large span.
  1337  			if preserve {
  1338  				throw("can't preserve large span")
  1339  			}
  1340  			unmarkspan(p, s.npages<<_PageShift)
  1341  			s.needzero = 1
  1342  
  1343  			// important to set sweepgen before returning it to heap
  1344  			atomicstore(&s.sweepgen, sweepgen)
  1345  			sweepgenset = true
  1346  
  1347  			// NOTE(rsc,dvyukov): The original implementation of efence
  1348  			// in CL 22060046 used SysFree instead of SysFault, so that
  1349  			// the operating system would eventually give the memory
  1350  			// back to us again, so that an efence program could run
  1351  			// longer without running out of memory. Unfortunately,
  1352  			// calling SysFree here without any kind of adjustment of the
  1353  			// heap data structures means that when the memory does
  1354  			// come back to us, we have the wrong metadata for it, either in
  1355  			// the MSpan structures or in the garbage collection bitmap.
  1356  			// Using SysFault here means that the program will run out of
  1357  			// memory fairly quickly in efence mode, but at least it won't
  1358  			// have mysterious crashes due to confused memory reuse.
  1359  			// It should be possible to switch back to SysFree if we also
  1360  			// implement and then call some kind of MHeap_DeleteSpan.
  1361  			if debug.efence > 0 {
  1362  				s.limit = 0 // prevent mlookup from finding this span
  1363  				sysFault(unsafe.Pointer(p), size)
  1364  			} else {
  1365  				mHeap_Free(&mheap_, s, 1)
  1366  			}
  1367  			c.local_nlargefree++
  1368  			c.local_largefree += size
  1369  			xadd64(&memstats.next_gc, -int64(size)*int64(gcpercent+100)/100)
  1370  			res = true
  1371  		} else {
  1372  			// Free small object.
  1373  			if size > 2*ptrSize {
  1374  				*(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed"
  1375  			} else if size > ptrSize {
  1376  				*(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0
  1377  			}
  1378  			if head.ptr() == nil {
  1379  				head = gclinkptr(p)
  1380  			} else {
  1381  				end.ptr().next = gclinkptr(p)
  1382  			}
  1383  			end = gclinkptr(p)
  1384  			end.ptr().next = gclinkptr(0x0bade5)
  1385  			nfree++
  1386  		}
  1387  	}
  1388  
  1389  	// We need to set s.sweepgen = h.sweepgen only when all blocks are swept,
  1390  	// because of the potential for a concurrent free/SetFinalizer.
  1391  	// But we need to set it before we make the span available for allocation
  1392  	// (return it to heap or mcentral), because allocation code assumes that a
  1393  	// span is already swept if available for allocation.
  1394  	if !sweepgenset && nfree == 0 {
  1395  		// The span must be in our exclusive ownership until we update sweepgen,
  1396  		// check for potential races.
  1397  		if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
  1398  			print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
  1399  			throw("MSpan_Sweep: bad span state after sweep")
  1400  		}
  1401  		atomicstore(&s.sweepgen, sweepgen)
  1402  	}
  1403  	if nfree > 0 {
  1404  		c.local_nsmallfree[cl] += uintptr(nfree)
  1405  		c.local_cachealloc -= intptr(uintptr(nfree) * size)
  1406  		xadd64(&memstats.next_gc, -int64(nfree)*int64(size)*int64(gcpercent+100)/100)
  1407  		res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve)
  1408  		// MCentral_FreeSpan updates sweepgen
  1409  	}
  1410  	return res
  1411  }
  1412  
  1413  // State of background sweep.
  1414  // Protected by gclock.
  1415  type sweepdata struct {
  1416  	g       *g
  1417  	parked  bool
  1418  	started bool
  1419  
  1420  	spanidx uint32 // background sweeper position
  1421  
  1422  	nbgsweep    uint32
  1423  	npausesweep uint32
  1424  }
  1425  
  1426  var sweep sweepdata
  1427  
  1428  // sweeps one span
  1429  // returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep
  1430  //go:nowritebarrier
  1431  func sweepone() uintptr {
  1432  	_g_ := getg()
  1433  
  1434  	// increment locks to ensure that the goroutine is not preempted
  1435  	// in the middle of sweep thus leaving the span in an inconsistent state for next GC
  1436  	_g_.m.locks++
  1437  	sg := mheap_.sweepgen
  1438  	for {
  1439  		idx := xadd(&sweep.spanidx, 1) - 1
  1440  		if idx >= uint32(len(work.spans)) {
  1441  			mheap_.sweepdone = 1
  1442  			_g_.m.locks--
  1443  			return ^uintptr(0)
  1444  		}
  1445  		s := work.spans[idx]
  1446  		if s.state != mSpanInUse {
  1447  			s.sweepgen = sg
  1448  			continue
  1449  		}
  1450  		if s.sweepgen != sg-2 || !cas(&s.sweepgen, sg-2, sg-1) {
  1451  			continue
  1452  		}
  1453  		npages := s.npages
  1454  		if !mSpan_Sweep(s, false) {
  1455  			npages = 0
  1456  		}
  1457  		_g_.m.locks--
  1458  		return npages
  1459  	}
  1460  }
  1461  
  1462  //go:nowritebarrier
  1463  func gosweepone() uintptr {
  1464  	var ret uintptr
  1465  	systemstack(func() {
  1466  		ret = sweepone()
  1467  	})
  1468  	return ret
  1469  }
  1470  
  1471  //go:nowritebarrier
  1472  func gosweepdone() bool {
  1473  	return mheap_.sweepdone != 0
  1474  }
  1475  
  1476  //go:nowritebarrier
  1477  func gchelper() {
  1478  	_g_ := getg()
  1479  	_g_.m.traceback = 2
  1480  	gchelperstart()
  1481  
  1482  	// parallel mark for over GC roots
  1483  	parfordo(work.markfor)
  1484  	if gcphase != _GCscan {
  1485  		scanblock(0, 0, nil) // blocks in getfull
  1486  	}
  1487  
  1488  	nproc := work.nproc // work.nproc can change right after we increment work.ndone
  1489  	if xadd(&work.ndone, +1) == nproc-1 {
  1490  		notewakeup(&work.alldone)
  1491  	}
  1492  	_g_.m.traceback = 0
  1493  }
  1494  
  1495  //go:nowritebarrier
  1496  func cachestats() {
  1497  	for i := 0; ; i++ {
  1498  		p := allp[i]
  1499  		if p == nil {
  1500  			break
  1501  		}
  1502  		c := p.mcache
  1503  		if c == nil {
  1504  			continue
  1505  		}
  1506  		purgecachedstats(c)
  1507  	}
  1508  }
  1509  
  1510  //go:nowritebarrier
  1511  func flushallmcaches() {
  1512  	for i := 0; ; i++ {
  1513  		p := allp[i]
  1514  		if p == nil {
  1515  			break
  1516  		}
  1517  		c := p.mcache
  1518  		if c == nil {
  1519  			continue
  1520  		}
  1521  		mCache_ReleaseAll(c)
  1522  		stackcache_clear(c)
  1523  	}
  1524  }
  1525  
  1526  //go:nowritebarrier
  1527  func updatememstats(stats *gcstats) {
  1528  	if stats != nil {
  1529  		*stats = gcstats{}
  1530  	}
  1531  	for mp := allm; mp != nil; mp = mp.alllink {
  1532  		if stats != nil {
  1533  			src := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(&mp.gcstats))
  1534  			dst := (*[unsafe.Sizeof(gcstats{}) / 8]uint64)(unsafe.Pointer(stats))
  1535  			for i, v := range src {
  1536  				dst[i] += v
  1537  			}
  1538  			mp.gcstats = gcstats{}
  1539  		}
  1540  	}
  1541  
  1542  	memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
  1543  	memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
  1544  	memstats.sys = memstats.heap_sys + memstats.stacks_sys + memstats.mspan_sys +
  1545  		memstats.mcache_sys + memstats.buckhash_sys + memstats.gc_sys + memstats.other_sys
  1546  
  1547  	// Calculate memory allocator stats.
  1548  	// During program execution we only count number of frees and amount of freed memory.
  1549  	// Current number of alive object in the heap and amount of alive heap memory
  1550  	// are calculated by scanning all spans.
  1551  	// Total number of mallocs is calculated as number of frees plus number of alive objects.
  1552  	// Similarly, total amount of allocated memory is calculated as amount of freed memory
  1553  	// plus amount of alive heap memory.
  1554  	memstats.alloc = 0
  1555  	memstats.total_alloc = 0
  1556  	memstats.nmalloc = 0
  1557  	memstats.nfree = 0
  1558  	for i := 0; i < len(memstats.by_size); i++ {
  1559  		memstats.by_size[i].nmalloc = 0
  1560  		memstats.by_size[i].nfree = 0
  1561  	}
  1562  
  1563  	// Flush MCache's to MCentral.
  1564  	systemstack(flushallmcaches)
  1565  
  1566  	// Aggregate local stats.
  1567  	cachestats()
  1568  
  1569  	// Scan all spans and count number of alive objects.
  1570  	lock(&mheap_.lock)
  1571  	for i := uint32(0); i < mheap_.nspan; i++ {
  1572  		s := h_allspans[i]
  1573  		if s.state != mSpanInUse {
  1574  			continue
  1575  		}
  1576  		if s.sizeclass == 0 {
  1577  			memstats.nmalloc++
  1578  			memstats.alloc += uint64(s.elemsize)
  1579  		} else {
  1580  			memstats.nmalloc += uint64(s.ref)
  1581  			memstats.by_size[s.sizeclass].nmalloc += uint64(s.ref)
  1582  			memstats.alloc += uint64(s.ref) * uint64(s.elemsize)
  1583  		}
  1584  	}
  1585  	unlock(&mheap_.lock)
  1586  
  1587  	// Aggregate by size class.
  1588  	smallfree := uint64(0)
  1589  	memstats.nfree = mheap_.nlargefree
  1590  	for i := 0; i < len(memstats.by_size); i++ {
  1591  		memstats.nfree += mheap_.nsmallfree[i]
  1592  		memstats.by_size[i].nfree = mheap_.nsmallfree[i]
  1593  		memstats.by_size[i].nmalloc += mheap_.nsmallfree[i]
  1594  		smallfree += uint64(mheap_.nsmallfree[i]) * uint64(class_to_size[i])
  1595  	}
  1596  	memstats.nfree += memstats.tinyallocs
  1597  	memstats.nmalloc += memstats.nfree
  1598  
  1599  	// Calculate derived stats.
  1600  	memstats.total_alloc = uint64(memstats.alloc) + uint64(mheap_.largefree) + smallfree
  1601  	memstats.heap_alloc = memstats.alloc
  1602  	memstats.heap_objects = memstats.nmalloc - memstats.nfree
  1603  }
  1604  
  1605  func gcinit() {
  1606  	if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
  1607  		throw("runtime: size of Workbuf is suboptimal")
  1608  	}
  1609  
  1610  	work.markfor = parforalloc(_MaxGcproc)
  1611  	gcpercent = readgogc()
  1612  	gcdatamask = unrollglobgcprog((*byte)(unsafe.Pointer(&gcdata)), uintptr(unsafe.Pointer(&edata))-uintptr(unsafe.Pointer(&data)))
  1613  	gcbssmask = unrollglobgcprog((*byte)(unsafe.Pointer(&gcbss)), uintptr(unsafe.Pointer(&ebss))-uintptr(unsafe.Pointer(&bss)))
  1614  }
  1615  
  1616  // Called from malloc.go using onM, stopping and starting the world handled in caller.
  1617  //go:nowritebarrier
  1618  func gc_m(start_time int64, eagersweep bool) {
  1619  	_g_ := getg()
  1620  	gp := _g_.m.curg
  1621  	casgstatus(gp, _Grunning, _Gwaiting)
  1622  	gp.waitreason = "garbage collection"
  1623  
  1624  	gc(start_time, eagersweep)
  1625  	casgstatus(gp, _Gwaiting, _Grunning)
  1626  }
  1627  
  1628  // Similar to clearcheckmarkbits but works on a single span.
  1629  // It preforms two tasks.
  1630  // 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
  1631  //    for nibbles with the BoundaryBit set.
  1632  // 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
  1633  //    BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
  1634  // For the second case it is possible to restore the BitsDead pattern but since
  1635  // clearmark is a debug tool performance has a lower priority than simplicity.
  1636  // The span is MSpanInUse and the world is stopped.
  1637  //go:nowritebarrier
  1638  func clearcheckmarkbitsspan(s *mspan) {
  1639  	if s.state != _MSpanInUse {
  1640  		print("runtime:clearcheckmarkbitsspan: state=", s.state, "\n")
  1641  		throw("clearcheckmarkbitsspan: bad span state")
  1642  	}
  1643  
  1644  	arena_start := mheap_.arena_start
  1645  	cl := s.sizeclass
  1646  	size := s.elemsize
  1647  	var n int32
  1648  	if cl == 0 {
  1649  		n = 1
  1650  	} else {
  1651  		// Chunk full of small blocks
  1652  		npages := class_to_allocnpages[cl]
  1653  		n = npages << _PageShift / int32(size)
  1654  	}
  1655  
  1656  	// MSpan_Sweep has similar code but instead of overloading and
  1657  	// complicating that routine we do a simpler walk here.
  1658  	// Sweep through n objects of given size starting at p.
  1659  	// This thread owns the span now, so it can manipulate
  1660  	// the block bitmap without atomic operations.
  1661  	p := uintptr(s.start) << _PageShift
  1662  
  1663  	// Find bits for the beginning of the span.
  1664  	off := (p - arena_start) / ptrSize
  1665  	bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
  1666  	step := size / (ptrSize * wordsPerBitmapByte)
  1667  
  1668  	// The type bit values are:
  1669  	//	00 - BitsDead, for us BitsScalarMarked
  1670  	//	01 - BitsScalar
  1671  	//	10 - BitsPointer
  1672  	//	11 - unused, for us BitsPointerMarked
  1673  	//
  1674  	// When called to prepare for the checkmark phase (checkmark==1),
  1675  	// we change BitsDead to BitsScalar, so that there are no BitsScalarMarked
  1676  	// type bits anywhere.
  1677  	//
  1678  	// The checkmark phase marks by changing BitsScalar to BitsScalarMarked
  1679  	// and BitsPointer to BitsPointerMarked.
  1680  	//
  1681  	// When called to clean up after the checkmark phase (checkmark==0),
  1682  	// we unmark by changing BitsScalarMarked back to BitsScalar and
  1683  	// BitsPointerMarked back to BitsPointer.
  1684  	//
  1685  	// There are two problems with the scheme as just described.
  1686  	// First, the setup rewrites BitsDead to BitsScalar, but the type bits
  1687  	// following a BitsDead are uninitialized and must not be used.
  1688  	// Second, objects that are free are expected to have their type
  1689  	// bits zeroed (BitsDead), so in the cleanup we need to restore
  1690  	// any BitsDeads that were there originally.
  1691  	//
  1692  	// In a one-word object (8-byte allocation on 64-bit system),
  1693  	// there is no difference between BitsScalar and BitsDead, because
  1694  	// neither is a pointer and there are no more words in the object,
  1695  	// so using BitsScalar during the checkmark is safe and mapping
  1696  	// both back to BitsDead during cleanup is also safe.
  1697  	//
  1698  	// In a larger object, we need to be more careful. During setup,
  1699  	// if the type of the first word is BitsDead, we change it to BitsScalar
  1700  	// (as we must) but also initialize the type of the second
  1701  	// word to BitsDead, so that a scan during the checkmark phase
  1702  	// will still stop before seeing the uninitialized type bits in the
  1703  	// rest of the object. The sequence 'BitsScalar BitsDead' never
  1704  	// happens in real type bitmaps - BitsDead is always as early
  1705  	// as possible, so immediately after the last BitsPointer.
  1706  	// During cleanup, if we see a BitsScalar, we can check to see if it
  1707  	// is followed by BitsDead. If so, it was originally BitsDead and
  1708  	// we can change it back.
  1709  
  1710  	if step == 0 {
  1711  		// updating top and bottom nibbles, all boundaries
  1712  		for i := int32(0); i < n/2; i, bitp = i+1, addb(bitp, uintptrMask&-1) {
  1713  			if *bitp&bitBoundary == 0 {
  1714  				throw("missing bitBoundary")
  1715  			}
  1716  			b := (*bitp & bitPtrMask) >> 2
  1717  			if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
  1718  				*bitp &^= 0x0c // convert to _BitsDead
  1719  			} else if b == _BitsScalarMarked || b == _BitsPointerMarked {
  1720  				*bitp &^= _BitsCheckMarkXor << 2
  1721  			}
  1722  
  1723  			if (*bitp>>gcBits)&bitBoundary == 0 {
  1724  				throw("missing bitBoundary")
  1725  			}
  1726  			b = ((*bitp >> gcBits) & bitPtrMask) >> 2
  1727  			if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
  1728  				*bitp &^= 0xc0 // convert to _BitsDead
  1729  			} else if b == _BitsScalarMarked || b == _BitsPointerMarked {
  1730  				*bitp &^= _BitsCheckMarkXor << (2 + gcBits)
  1731  			}
  1732  		}
  1733  	} else {
  1734  		// updating bottom nibble for first word of each object
  1735  		for i := int32(0); i < n; i, bitp = i+1, addb(bitp, -step) {
  1736  			if *bitp&bitBoundary == 0 {
  1737  				throw("missing bitBoundary")
  1738  			}
  1739  			b := (*bitp & bitPtrMask) >> 2
  1740  
  1741  			if checkmark && b == _BitsDead {
  1742  				// move BitsDead into second word.
  1743  				// set bits to BitsScalar in preparation for checkmark phase.
  1744  				*bitp &^= 0xc0
  1745  				*bitp |= _BitsScalar << 2
  1746  			} else if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) && *bitp&0xc0 == 0 {
  1747  				// Cleaning up after checkmark phase.
  1748  				// First word is scalar or dead (we forgot)
  1749  				// and second word is dead.
  1750  				// First word might as well be dead too.
  1751  				*bitp &^= 0x0c
  1752  			} else if b == _BitsScalarMarked || b == _BitsPointerMarked {
  1753  				*bitp ^= _BitsCheckMarkXor << 2
  1754  			}
  1755  		}
  1756  	}
  1757  }
  1758  
  1759  // clearcheckmarkbits preforms two tasks.
  1760  // 1. When used before the checkmark phase it converts BitsDead (00) to bitsScalar (01)
  1761  //    for nibbles with the BoundaryBit set.
  1762  // 2. When used after the checkmark phase it converts BitsPointerMark (11) to BitsPointer 10 and
  1763  //    BitsScalarMark (00) to BitsScalar (01), thus clearing the checkmark mark encoding.
  1764  // This is a bit expensive but preserves the BitsDead encoding during the normal marking.
  1765  // BitsDead remains valid for every nibble except the ones with BitsBoundary set.
  1766  //go:nowritebarrier
  1767  func clearcheckmarkbits() {
  1768  	for _, s := range work.spans {
  1769  		if s.state == _MSpanInUse {
  1770  			clearcheckmarkbitsspan(s)
  1771  		}
  1772  	}
  1773  }
  1774  
  1775  // Called from malloc.go using onM.
  1776  // The world is stopped. Rerun the scan and mark phases
  1777  // using the bitMarkedCheck bit instead of the
  1778  // bitMarked bit. If the marking encounters an
  1779  // bitMarked bit that is not set then we throw.
  1780  //go:nowritebarrier
  1781  func gccheckmark_m(startTime int64, eagersweep bool) {
  1782  	if !gccheckmarkenable {
  1783  		return
  1784  	}
  1785  
  1786  	if checkmark {
  1787  		throw("gccheckmark_m, entered with checkmark already true")
  1788  	}
  1789  
  1790  	checkmark = true
  1791  	clearcheckmarkbits()        // Converts BitsDead to BitsScalar.
  1792  	gc_m(startTime, eagersweep) // turns off checkmark + calls clearcheckmarkbits
  1793  }
  1794  
  1795  //go:nowritebarrier
  1796  func gccheckmarkenable_m() {
  1797  	gccheckmarkenable = true
  1798  }
  1799  
  1800  //go:nowritebarrier
  1801  func gccheckmarkdisable_m() {
  1802  	gccheckmarkenable = false
  1803  }
  1804  
  1805  //go:nowritebarrier
  1806  func finishsweep_m() {
  1807  	// The world is stopped so we should be able to complete the sweeps
  1808  	// quickly.
  1809  	for sweepone() != ^uintptr(0) {
  1810  		sweep.npausesweep++
  1811  	}
  1812  
  1813  	// There may be some other spans being swept concurrently that
  1814  	// we need to wait for. If finishsweep_m is done with the world stopped
  1815  	// this code is not required.
  1816  	sg := mheap_.sweepgen
  1817  	for _, s := range work.spans {
  1818  		if s.sweepgen != sg && s.state == _MSpanInUse {
  1819  			mSpan_EnsureSwept(s)
  1820  		}
  1821  	}
  1822  }
  1823  
  1824  // Scan all of the stacks, greying (or graying if in America) the referents
  1825  // but not blackening them since the mark write barrier isn't installed.
  1826  //go:nowritebarrier
  1827  func gcscan_m() {
  1828  	_g_ := getg()
  1829  
  1830  	// Grab the g that called us and potentially allow rescheduling.
  1831  	// This allows it to be scanned like other goroutines.
  1832  	mastergp := _g_.m.curg
  1833  	casgstatus(mastergp, _Grunning, _Gwaiting)
  1834  	mastergp.waitreason = "garbage collection scan"
  1835  
  1836  	// Span sweeping has been done by finishsweep_m.
  1837  	// Long term we will want to make this goroutine runnable
  1838  	// by placing it onto a scanenqueue state and then calling
  1839  	// runtimeĀ·restartg(mastergp) to make it Grunnable.
  1840  	// At the bottom we will want to return this p back to the scheduler.
  1841  
  1842  	// Prepare flag indicating that the scan has not been completed.
  1843  	lock(&allglock)
  1844  	local_allglen := allglen
  1845  	for i := uintptr(0); i < local_allglen; i++ {
  1846  		gp := allgs[i]
  1847  		gp.gcworkdone = false // set to true in gcphasework
  1848  	}
  1849  	unlock(&allglock)
  1850  
  1851  	work.nwait = 0
  1852  	work.ndone = 0
  1853  	work.nproc = 1 // For now do not do this in parallel.
  1854  	//	ackgcphase is not needed since we are not scanning running goroutines.
  1855  	parforsetup(work.markfor, work.nproc, uint32(_RootCount+local_allglen), nil, false, markroot)
  1856  	parfordo(work.markfor)
  1857  
  1858  	lock(&allglock)
  1859  	// Check that gc work is done.
  1860  	for i := uintptr(0); i < local_allglen; i++ {
  1861  		gp := allgs[i]
  1862  		if !gp.gcworkdone {
  1863  			throw("scan missed a g")
  1864  		}
  1865  	}
  1866  	unlock(&allglock)
  1867  
  1868  	casgstatus(mastergp, _Gwaiting, _Grunning)
  1869  	// Let the g that called us continue to run.
  1870  }
  1871  
  1872  // Mark all objects that are known about.
  1873  //go:nowritebarrier
  1874  func gcmark_m() {
  1875  	scanblock(0, 0, nil)
  1876  }
  1877  
  1878  // For now this must be bracketed with a stoptheworld and a starttheworld to ensure
  1879  // all go routines see the new barrier.
  1880  //go:nowritebarrier
  1881  func gcinstallmarkwb_m() {
  1882  	gcphase = _GCmark
  1883  }
  1884  
  1885  // For now this must be bracketed with a stoptheworld and a starttheworld to ensure
  1886  // all go routines see the new barrier.
  1887  //go:nowritebarrier
  1888  func gcinstalloffwb_m() {
  1889  	gcphase = _GCoff
  1890  }
  1891  
  1892  //TODO go:nowritebarrier
  1893  func gc(start_time int64, eagersweep bool) {
  1894  	if _DebugGCPtrs {
  1895  		print("GC start\n")
  1896  	}
  1897  
  1898  	if debug.allocfreetrace > 0 {
  1899  		tracegc()
  1900  	}
  1901  
  1902  	_g_ := getg()
  1903  	_g_.m.traceback = 2
  1904  	t0 := start_time
  1905  	work.tstart = start_time
  1906  
  1907  	var t1 int64
  1908  	if debug.gctrace > 0 {
  1909  		t1 = nanotime()
  1910  	}
  1911  
  1912  	if !checkmark {
  1913  		finishsweep_m() // skip during checkmark debug phase.
  1914  	}
  1915  
  1916  	// Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
  1917  	// resizing/freeing allspans.
  1918  	// New spans can be created while GC progresses, but they are not garbage for
  1919  	// this round:
  1920  	//  - new stack spans can be created even while the world is stopped.
  1921  	//  - new malloc spans can be created during the concurrent sweep
  1922  
  1923  	// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
  1924  	lock(&mheap_.lock)
  1925  	// Free the old cached sweep array if necessary.
  1926  	if work.spans != nil && &work.spans[0] != &h_allspans[0] {
  1927  		sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
  1928  	}
  1929  	// Cache the current array for marking.
  1930  	mheap_.gcspans = mheap_.allspans
  1931  	work.spans = h_allspans
  1932  	unlock(&mheap_.lock)
  1933  	oldphase := gcphase
  1934  
  1935  	work.nwait = 0
  1936  	work.ndone = 0
  1937  	work.nproc = uint32(gcprocs())
  1938  	gcphase = _GCmarktermination
  1939  
  1940  	// World is stopped so allglen will not change.
  1941  	for i := uintptr(0); i < allglen; i++ {
  1942  		gp := allgs[i]
  1943  		gp.gcworkdone = false // set to true in gcphasework
  1944  	}
  1945  
  1946  	parforsetup(work.markfor, work.nproc, uint32(_RootCount+allglen), nil, false, markroot)
  1947  	if work.nproc > 1 {
  1948  		noteclear(&work.alldone)
  1949  		helpgc(int32(work.nproc))
  1950  	}
  1951  
  1952  	var t2 int64
  1953  	if debug.gctrace > 0 {
  1954  		t2 = nanotime()
  1955  	}
  1956  
  1957  	gchelperstart()
  1958  	parfordo(work.markfor)
  1959  	scanblock(0, 0, nil)
  1960  
  1961  	if work.full != 0 {
  1962  		throw("work.full != 0")
  1963  	}
  1964  	if work.partial != 0 {
  1965  		throw("work.partial != 0")
  1966  	}
  1967  
  1968  	gcphase = oldphase
  1969  	var t3 int64
  1970  	if debug.gctrace > 0 {
  1971  		t3 = nanotime()
  1972  	}
  1973  
  1974  	if work.nproc > 1 {
  1975  		notesleep(&work.alldone)
  1976  	}
  1977  
  1978  	shrinkfinish()
  1979  
  1980  	cachestats()
  1981  	// next_gc calculation is tricky with concurrent sweep since we don't know size of live heap
  1982  	// estimate what was live heap size after previous GC (for printing only)
  1983  	heap0 := memstats.next_gc * 100 / (uint64(gcpercent) + 100)
  1984  	// conservatively set next_gc to high value assuming that everything is live
  1985  	// concurrent/lazy sweep will reduce this number while discovering new garbage
  1986  	memstats.next_gc = memstats.heap_alloc + memstats.heap_alloc*uint64(gcpercent)/100
  1987  
  1988  	t4 := nanotime()
  1989  	atomicstore64(&memstats.last_gc, uint64(unixnanotime())) // must be Unix time to make sense to user
  1990  	memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(t4 - t0)
  1991  	memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(t4)
  1992  	memstats.pause_total_ns += uint64(t4 - t0)
  1993  	memstats.numgc++
  1994  	if memstats.debuggc {
  1995  		print("pause ", t4-t0, "\n")
  1996  	}
  1997  
  1998  	if debug.gctrace > 0 {
  1999  		heap1 := memstats.heap_alloc
  2000  		var stats gcstats
  2001  		updatememstats(&stats)
  2002  		if heap1 != memstats.heap_alloc {
  2003  			print("runtime: mstats skew: heap=", heap1, "/", memstats.heap_alloc, "\n")
  2004  			throw("mstats skew")
  2005  		}
  2006  		obj := memstats.nmalloc - memstats.nfree
  2007  
  2008  		stats.nprocyield += work.markfor.nprocyield
  2009  		stats.nosyield += work.markfor.nosyield
  2010  		stats.nsleep += work.markfor.nsleep
  2011  
  2012  		print("gc", memstats.numgc, "(", work.nproc, "): ",
  2013  			(t1-t0)/1000, "+", (t2-t1)/1000, "+", (t3-t2)/1000, "+", (t4-t3)/1000, " us, ",
  2014  			heap0>>20, " -> ", heap1>>20, " MB, ",
  2015  			obj, " (", memstats.nmalloc, "-", memstats.nfree, ") objects, ",
  2016  			gcount(), " goroutines, ",
  2017  			len(work.spans), "/", sweep.nbgsweep, "/", sweep.npausesweep, " sweeps, ",
  2018  			stats.nhandoff, "(", stats.nhandoffcnt, ") handoff, ",
  2019  			work.markfor.nsteal, "(", work.markfor.nstealcnt, ") steal, ",
  2020  			stats.nprocyield, "/", stats.nosyield, "/", stats.nsleep, " yields\n")
  2021  		sweep.nbgsweep = 0
  2022  		sweep.npausesweep = 0
  2023  	}
  2024  
  2025  	// See the comment in the beginning of this function as to why we need the following.
  2026  	// Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
  2027  	lock(&mheap_.lock)
  2028  	// Free the old cached mark array if necessary.
  2029  	if work.spans != nil && &work.spans[0] != &h_allspans[0] {
  2030  		sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
  2031  	}
  2032  
  2033  	if gccheckmarkenable {
  2034  		if !checkmark {
  2035  			// first half of two-pass; don't set up sweep
  2036  			unlock(&mheap_.lock)
  2037  			return
  2038  		}
  2039  		checkmark = false // done checking marks
  2040  		clearcheckmarkbits()
  2041  	}
  2042  
  2043  	// Cache the current array for sweeping.
  2044  	mheap_.gcspans = mheap_.allspans
  2045  	mheap_.sweepgen += 2
  2046  	mheap_.sweepdone = 0
  2047  	work.spans = h_allspans
  2048  	sweep.spanidx = 0
  2049  	unlock(&mheap_.lock)
  2050  
  2051  	if _ConcurrentSweep && !eagersweep {
  2052  		lock(&gclock)
  2053  		if !sweep.started {
  2054  			go bgsweep()
  2055  			sweep.started = true
  2056  		} else if sweep.parked {
  2057  			sweep.parked = false
  2058  			ready(sweep.g)
  2059  		}
  2060  		unlock(&gclock)
  2061  	} else {
  2062  		// Sweep all spans eagerly.
  2063  		for sweepone() != ^uintptr(0) {
  2064  			sweep.npausesweep++
  2065  		}
  2066  		// Do an additional mProf_GC, because all 'free' events are now real as well.
  2067  		mProf_GC()
  2068  	}
  2069  
  2070  	mProf_GC()
  2071  	_g_.m.traceback = 0
  2072  
  2073  	if _DebugGCPtrs {
  2074  		print("GC end\n")
  2075  	}
  2076  }
  2077  
  2078  func readmemstats_m(stats *MemStats) {
  2079  	updatememstats(nil)
  2080  
  2081  	// Size of the trailing by_size array differs between Go and C,
  2082  	// NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
  2083  	memmove(unsafe.Pointer(stats), unsafe.Pointer(&memstats), sizeof_C_MStats)
  2084  
  2085  	// Stack numbers are part of the heap numbers, separate those out for user consumption
  2086  	stats.StackSys = stats.StackInuse
  2087  	stats.HeapInuse -= stats.StackInuse
  2088  	stats.HeapSys -= stats.StackInuse
  2089  }
  2090  
  2091  //go:linkname readGCStats runtime/debug.readGCStats
  2092  func readGCStats(pauses *[]uint64) {
  2093  	systemstack(func() {
  2094  		readGCStats_m(pauses)
  2095  	})
  2096  }
  2097  
  2098  func readGCStats_m(pauses *[]uint64) {
  2099  	p := *pauses
  2100  	// Calling code in runtime/debug should make the slice large enough.
  2101  	if cap(p) < len(memstats.pause_ns)+3 {
  2102  		throw("runtime: short slice passed to readGCStats")
  2103  	}
  2104  
  2105  	// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
  2106  	lock(&mheap_.lock)
  2107  
  2108  	n := memstats.numgc
  2109  	if n > uint32(len(memstats.pause_ns)) {
  2110  		n = uint32(len(memstats.pause_ns))
  2111  	}
  2112  
  2113  	// The pause buffer is circular. The most recent pause is at
  2114  	// pause_ns[(numgc-1)%len(pause_ns)], and then backward
  2115  	// from there to go back farther in time. We deliver the times
  2116  	// most recent first (in p[0]).
  2117  	p = p[:cap(p)]
  2118  	for i := uint32(0); i < n; i++ {
  2119  		j := (memstats.numgc - 1 - i) % uint32(len(memstats.pause_ns))
  2120  		p[i] = memstats.pause_ns[j]
  2121  		p[n+i] = memstats.pause_end[j]
  2122  	}
  2123  
  2124  	p[n+n] = memstats.last_gc
  2125  	p[n+n+1] = uint64(memstats.numgc)
  2126  	p[n+n+2] = memstats.pause_total_ns
  2127  	unlock(&mheap_.lock)
  2128  	*pauses = p[:n+n+3]
  2129  }
  2130  
  2131  func setGCPercent(in int32) (out int32) {
  2132  	lock(&mheap_.lock)
  2133  	out = gcpercent
  2134  	if in < 0 {
  2135  		in = -1
  2136  	}
  2137  	gcpercent = in
  2138  	unlock(&mheap_.lock)
  2139  	return out
  2140  }
  2141  
  2142  func gchelperstart() {
  2143  	_g_ := getg()
  2144  
  2145  	if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
  2146  		throw("gchelperstart: bad m->helpgc")
  2147  	}
  2148  	if _g_ != _g_.m.g0 {
  2149  		throw("gchelper not running on g0 stack")
  2150  	}
  2151  }
  2152  
  2153  func wakefing() *g {
  2154  	var res *g
  2155  	lock(&finlock)
  2156  	if fingwait && fingwake {
  2157  		fingwait = false
  2158  		fingwake = false
  2159  		res = fing
  2160  	}
  2161  	unlock(&finlock)
  2162  	return res
  2163  }
  2164  
  2165  //go:nowritebarrier
  2166  func addb(p *byte, n uintptr) *byte {
  2167  	return (*byte)(add(unsafe.Pointer(p), n))
  2168  }
  2169  
  2170  // Recursively unrolls GC program in prog.
  2171  // mask is where to store the result.
  2172  // ppos is a pointer to position in mask, in bits.
  2173  // sparse says to generate 4-bits per word mask for heap (2-bits for data/bss otherwise).
  2174  //go:nowritebarrier
  2175  func unrollgcprog1(maskp *byte, prog *byte, ppos *uintptr, inplace, sparse bool) *byte {
  2176  	arena_start := mheap_.arena_start
  2177  	pos := *ppos
  2178  	mask := (*[1 << 30]byte)(unsafe.Pointer(maskp))
  2179  	for {
  2180  		switch *prog {
  2181  		default:
  2182  			throw("unrollgcprog: unknown instruction")
  2183  
  2184  		case insData:
  2185  			prog = addb(prog, 1)
  2186  			siz := int(*prog)
  2187  			prog = addb(prog, 1)
  2188  			p := (*[1 << 30]byte)(unsafe.Pointer(prog))
  2189  			for i := 0; i < siz; i++ {
  2190  				v := p[i/_PointersPerByte]
  2191  				v >>= (uint(i) % _PointersPerByte) * _BitsPerPointer
  2192  				v &= _BitsMask
  2193  				if inplace {
  2194  					// Store directly into GC bitmap.
  2195  					off := (uintptr(unsafe.Pointer(&mask[pos])) - arena_start) / ptrSize
  2196  					bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
  2197  					shift := (off % wordsPerBitmapByte) * gcBits
  2198  					if shift == 0 {
  2199  						*bitp = 0
  2200  					}
  2201  					*bitp |= v << (shift + 2)
  2202  					pos += ptrSize
  2203  				} else if sparse {
  2204  					// 4-bits per word
  2205  					v <<= (pos % 8) + 2
  2206  					mask[pos/8] |= v
  2207  					pos += gcBits
  2208  				} else {
  2209  					// 2-bits per word
  2210  					v <<= pos % 8
  2211  					mask[pos/8] |= v
  2212  					pos += _BitsPerPointer
  2213  				}
  2214  			}
  2215  			prog = addb(prog, round(uintptr(siz)*_BitsPerPointer, 8)/8)
  2216  
  2217  		case insArray:
  2218  			prog = (*byte)(add(unsafe.Pointer(prog), 1))
  2219  			siz := uintptr(0)
  2220  			for i := uintptr(0); i < ptrSize; i++ {
  2221  				siz = (siz << 8) + uintptr(*(*byte)(add(unsafe.Pointer(prog), ptrSize-i-1)))
  2222  			}
  2223  			prog = (*byte)(add(unsafe.Pointer(prog), ptrSize))
  2224  			var prog1 *byte
  2225  			for i := uintptr(0); i < siz; i++ {
  2226  				prog1 = unrollgcprog1(&mask[0], prog, &pos, inplace, sparse)
  2227  			}
  2228  			if *prog1 != insArrayEnd {
  2229  				throw("unrollgcprog: array does not end with insArrayEnd")
  2230  			}
  2231  			prog = (*byte)(add(unsafe.Pointer(prog1), 1))
  2232  
  2233  		case insArrayEnd, insEnd:
  2234  			*ppos = pos
  2235  			return prog
  2236  		}
  2237  	}
  2238  }
  2239  
  2240  // Unrolls GC program prog for data/bss, returns dense GC mask.
  2241  func unrollglobgcprog(prog *byte, size uintptr) bitvector {
  2242  	masksize := round(round(size, ptrSize)/ptrSize*bitsPerPointer, 8) / 8
  2243  	mask := (*[1 << 30]byte)(persistentalloc(masksize+1, 0, &memstats.gc_sys))
  2244  	mask[masksize] = 0xa1
  2245  	pos := uintptr(0)
  2246  	prog = unrollgcprog1(&mask[0], prog, &pos, false, false)
  2247  	if pos != size/ptrSize*bitsPerPointer {
  2248  		print("unrollglobgcprog: bad program size, got ", pos, ", expect ", size/ptrSize*bitsPerPointer, "\n")
  2249  		throw("unrollglobgcprog: bad program size")
  2250  	}
  2251  	if *prog != insEnd {
  2252  		throw("unrollglobgcprog: program does not end with insEnd")
  2253  	}
  2254  	if mask[masksize] != 0xa1 {
  2255  		throw("unrollglobgcprog: overflow")
  2256  	}
  2257  	return bitvector{int32(masksize * 8), &mask[0]}
  2258  }
  2259  
  2260  func unrollgcproginplace_m(v unsafe.Pointer, typ *_type, size, size0 uintptr) {
  2261  	pos := uintptr(0)
  2262  	prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
  2263  	for pos != size0 {
  2264  		unrollgcprog1((*byte)(v), prog, &pos, true, true)
  2265  	}
  2266  
  2267  	// Mark first word as bitAllocated.
  2268  	arena_start := mheap_.arena_start
  2269  	off := (uintptr(v) - arena_start) / ptrSize
  2270  	bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
  2271  	shift := (off % wordsPerBitmapByte) * gcBits
  2272  	*bitp |= bitBoundary << shift
  2273  
  2274  	// Mark word after last as BitsDead.
  2275  	if size0 < size {
  2276  		off := (uintptr(v) + size0 - arena_start) / ptrSize
  2277  		bitp := (*byte)(unsafe.Pointer(arena_start - off/wordsPerBitmapByte - 1))
  2278  		shift := (off % wordsPerBitmapByte) * gcBits
  2279  		*bitp &= uint8(^(bitPtrMask << shift) | uintptr(bitsDead)<<(shift+2))
  2280  	}
  2281  }
  2282  
  2283  var unroll mutex
  2284  
  2285  // Unrolls GC program in typ.gc[1] into typ.gc[0]
  2286  //go:nowritebarrier
  2287  func unrollgcprog_m(typ *_type) {
  2288  	lock(&unroll)
  2289  	mask := (*byte)(unsafe.Pointer(uintptr(typ.gc[0])))
  2290  	if *mask == 0 {
  2291  		pos := uintptr(8) // skip the unroll flag
  2292  		prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
  2293  		prog = unrollgcprog1(mask, prog, &pos, false, true)
  2294  		if *prog != insEnd {
  2295  			throw("unrollgcprog: program does not end with insEnd")
  2296  		}
  2297  		if typ.size/ptrSize%2 != 0 {
  2298  			// repeat the program
  2299  			prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
  2300  			unrollgcprog1(mask, prog, &pos, false, true)
  2301  		}
  2302  
  2303  		// atomic way to say mask[0] = 1
  2304  		atomicor8(mask, 1)
  2305  	}
  2306  	unlock(&unroll)
  2307  }
  2308  
  2309  // mark the span of memory at v as having n blocks of the given size.
  2310  // if leftover is true, there is left over space at the end of the span.
  2311  //go:nowritebarrier
  2312  func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
  2313  	if uintptr(v)+size*n > mheap_.arena_used || uintptr(v) < mheap_.arena_start {
  2314  		throw("markspan: bad pointer")
  2315  	}
  2316  
  2317  	// Find bits of the beginning of the span.
  2318  	off := (uintptr(v) - uintptr(mheap_.arena_start)) / ptrSize
  2319  	if off%wordsPerBitmapByte != 0 {
  2320  		throw("markspan: unaligned length")
  2321  	}
  2322  	b := mheap_.arena_start - off/wordsPerBitmapByte - 1
  2323  
  2324  	// Okay to use non-atomic ops here, because we control
  2325  	// the entire span, and each bitmap byte has bits for only
  2326  	// one span, so no other goroutines are changing these bitmap words.
  2327  
  2328  	if size == ptrSize {
  2329  		// Possible only on 64-bits (minimal size class is 8 bytes).
  2330  		// Set memory to 0x11.
  2331  		if (bitBoundary|bitsDead)<<gcBits|bitBoundary|bitsDead != 0x11 {
  2332  			throw("markspan: bad bits")
  2333  		}
  2334  		if n%(wordsPerBitmapByte*ptrSize) != 0 {
  2335  			throw("markspan: unaligned length")
  2336  		}
  2337  		b = b - n/wordsPerBitmapByte + 1 // find first byte
  2338  		if b%ptrSize != 0 {
  2339  			throw("markspan: unaligned pointer")
  2340  		}
  2341  		for i := uintptr(0); i < n; i, b = i+wordsPerBitmapByte*ptrSize, b+ptrSize {
  2342  			*(*uintptr)(unsafe.Pointer(b)) = uintptrMask & 0x1111111111111111 // bitBoundary | bitsDead, repeated
  2343  		}
  2344  		return
  2345  	}
  2346  
  2347  	if leftover {
  2348  		n++ // mark a boundary just past end of last block too
  2349  	}
  2350  	step := size / (ptrSize * wordsPerBitmapByte)
  2351  	for i := uintptr(0); i < n; i, b = i+1, b-step {
  2352  		*(*byte)(unsafe.Pointer(b)) = bitBoundary | bitsDead<<2
  2353  	}
  2354  }
  2355  
  2356  // unmark the span of memory at v of length n bytes.
  2357  //go:nowritebarrier
  2358  func unmarkspan(v, n uintptr) {
  2359  	if v+n > mheap_.arena_used || v < mheap_.arena_start {
  2360  		throw("markspan: bad pointer")
  2361  	}
  2362  
  2363  	off := (v - mheap_.arena_start) / ptrSize // word offset
  2364  	if off%(ptrSize*wordsPerBitmapByte) != 0 {
  2365  		throw("markspan: unaligned pointer")
  2366  	}
  2367  
  2368  	b := mheap_.arena_start - off/wordsPerBitmapByte - 1
  2369  	n /= ptrSize
  2370  	if n%(ptrSize*wordsPerBitmapByte) != 0 {
  2371  		throw("unmarkspan: unaligned length")
  2372  	}
  2373  
  2374  	// Okay to use non-atomic ops here, because we control
  2375  	// the entire span, and each bitmap word has bits for only
  2376  	// one span, so no other goroutines are changing these
  2377  	// bitmap words.
  2378  	n /= wordsPerBitmapByte
  2379  	memclr(unsafe.Pointer(b-n+1), n)
  2380  }
  2381  
  2382  //go:nowritebarrier
  2383  func mHeap_MapBits(h *mheap) {
  2384  	// Caller has added extra mappings to the arena.
  2385  	// Add extra mappings of bitmap words as needed.
  2386  	// We allocate extra bitmap pieces in chunks of bitmapChunk.
  2387  	const bitmapChunk = 8192
  2388  
  2389  	n := (h.arena_used - h.arena_start) / (ptrSize * wordsPerBitmapByte)
  2390  	n = round(n, bitmapChunk)
  2391  	n = round(n, _PhysPageSize)
  2392  	if h.bitmap_mapped >= n {
  2393  		return
  2394  	}
  2395  
  2396  	sysMap(unsafe.Pointer(h.arena_start-n), n-h.bitmap_mapped, h.arena_reserved, &memstats.gc_sys)
  2397  	h.bitmap_mapped = n
  2398  }
  2399  
  2400  func getgcmaskcb(frame *stkframe, ctxt unsafe.Pointer) bool {
  2401  	target := (*stkframe)(ctxt)
  2402  	if frame.sp <= target.sp && target.sp < frame.varp {
  2403  		*target = *frame
  2404  		return false
  2405  	}
  2406  	return true
  2407  }
  2408  
  2409  // Returns GC type info for object p for testing.
  2410  func getgcmask(p unsafe.Pointer, t *_type, mask **byte, len *uintptr) {
  2411  	*mask = nil
  2412  	*len = 0
  2413  
  2414  	// data
  2415  	if uintptr(unsafe.Pointer(&data)) <= uintptr(p) && uintptr(p) < uintptr(unsafe.Pointer(&edata)) {
  2416  		n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2417  		*len = n / ptrSize
  2418  		*mask = &make([]byte, *len)[0]
  2419  		for i := uintptr(0); i < n; i += ptrSize {
  2420  			off := (uintptr(p) + i - uintptr(unsafe.Pointer(&data))) / ptrSize
  2421  			bits := (*(*byte)(add(unsafe.Pointer(gcdatamask.bytedata), off/pointersPerByte)) >> ((off % pointersPerByte) * bitsPerPointer)) & bitsMask
  2422  			*(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
  2423  		}
  2424  		return
  2425  	}
  2426  
  2427  	// bss
  2428  	if uintptr(unsafe.Pointer(&bss)) <= uintptr(p) && uintptr(p) < uintptr(unsafe.Pointer(&ebss)) {
  2429  		n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2430  		*len = n / ptrSize
  2431  		*mask = &make([]byte, *len)[0]
  2432  		for i := uintptr(0); i < n; i += ptrSize {
  2433  			off := (uintptr(p) + i - uintptr(unsafe.Pointer(&bss))) / ptrSize
  2434  			bits := (*(*byte)(add(unsafe.Pointer(gcbssmask.bytedata), off/pointersPerByte)) >> ((off % pointersPerByte) * bitsPerPointer)) & bitsMask
  2435  			*(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
  2436  		}
  2437  		return
  2438  	}
  2439  
  2440  	// heap
  2441  	var n uintptr
  2442  	var base uintptr
  2443  	if mlookup(uintptr(p), &base, &n, nil) != 0 {
  2444  		*len = n / ptrSize
  2445  		*mask = &make([]byte, *len)[0]
  2446  		for i := uintptr(0); i < n; i += ptrSize {
  2447  			off := (uintptr(base) + i - mheap_.arena_start) / ptrSize
  2448  			b := mheap_.arena_start - off/wordsPerBitmapByte - 1
  2449  			shift := (off % wordsPerBitmapByte) * gcBits
  2450  			bits := (*(*byte)(unsafe.Pointer(b)) >> (shift + 2)) & bitsMask
  2451  			*(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
  2452  		}
  2453  		return
  2454  	}
  2455  
  2456  	// stack
  2457  	var frame stkframe
  2458  	frame.sp = uintptr(p)
  2459  	_g_ := getg()
  2460  	gentraceback(_g_.m.curg.sched.pc, _g_.m.curg.sched.sp, 0, _g_.m.curg, 0, nil, 1000, getgcmaskcb, noescape(unsafe.Pointer(&frame)), 0)
  2461  	if frame.fn != nil {
  2462  		f := frame.fn
  2463  		targetpc := frame.continpc
  2464  		if targetpc == 0 {
  2465  			return
  2466  		}
  2467  		if targetpc != f.entry {
  2468  			targetpc--
  2469  		}
  2470  		pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
  2471  		if pcdata == -1 {
  2472  			return
  2473  		}
  2474  		stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
  2475  		if stkmap == nil || stkmap.n <= 0 {
  2476  			return
  2477  		}
  2478  		bv := stackmapdata(stkmap, pcdata)
  2479  		size := uintptr(bv.n) / bitsPerPointer * ptrSize
  2480  		n := (*ptrtype)(unsafe.Pointer(t)).elem.size
  2481  		*len = n / ptrSize
  2482  		*mask = &make([]byte, *len)[0]
  2483  		for i := uintptr(0); i < n; i += ptrSize {
  2484  			off := (uintptr(p) + i - frame.varp + size) / ptrSize
  2485  			bits := ((*(*byte)(add(unsafe.Pointer(bv.bytedata), off*bitsPerPointer/8))) >> ((off * bitsPerPointer) % 8)) & bitsMask
  2486  			*(*byte)(add(unsafe.Pointer(*mask), i/ptrSize)) = bits
  2487  		}
  2488  	}
  2489  }
  2490  
  2491  func unixnanotime() int64 {
  2492  	var now int64
  2493  	gc_unixnanotime(&now)
  2494  	return now
  2495  }