github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/mgcmark.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector: marking and scanning
     6  
     7  package runtime
     8  
     9  import (
    10  	"runtime/internal/atomic"
    11  	"runtime/internal/sys"
    12  	"unsafe"
    13  )
    14  
    15  const (
    16  	fixedRootFinalizers = iota
    17  	fixedRootFreeGStacks
    18  	fixedRootCount
    19  
    20  	// rootBlockBytes is the number of bytes to scan per data or
    21  	// BSS root.
    22  	rootBlockBytes = 256 << 10
    23  
    24  	// rootBlockSpans is the number of spans to scan per span
    25  	// root.
    26  	rootBlockSpans = 8 * 1024 // 64MB worth of spans
    27  
    28  	// maxObletBytes is the maximum bytes of an object to scan at
    29  	// once. Larger objects will be split up into "oblets" of at
    30  	// most this size. Since we can scan 1–2 MB/ms, 128 KB bounds
    31  	// scan preemption at ~100 µs.
    32  	//
    33  	// This must be > _MaxSmallSize so that the object base is the
    34  	// span base.
    35  	maxObletBytes = 128 << 10
    36  )
    37  
    38  // gcMarkRootPrepare queues root scanning jobs (stacks, globals, and
    39  // some miscellany) and initializes scanning-related state.
    40  //
    41  // The caller must have call gcCopySpans().
    42  //
    43  // The world must be stopped.
    44  //
    45  //go:nowritebarrier
    46  func gcMarkRootPrepare() {
    47  	if gcphase == _GCmarktermination {
    48  		work.nFlushCacheRoots = int(gomaxprocs)
    49  	} else {
    50  		work.nFlushCacheRoots = 0
    51  	}
    52  
    53  	// Compute how many data and BSS root blocks there are.
    54  	nBlocks := func(bytes uintptr) int {
    55  		return int((bytes + rootBlockBytes - 1) / rootBlockBytes)
    56  	}
    57  
    58  	work.nDataRoots = 0
    59  	work.nBSSRoots = 0
    60  
    61  	// Only scan globals once per cycle; preferably concurrently.
    62  	if !work.markrootDone {
    63  		for _, datap := range activeModules() {
    64  			nDataRoots := nBlocks(datap.edata - datap.data)
    65  			if nDataRoots > work.nDataRoots {
    66  				work.nDataRoots = nDataRoots
    67  			}
    68  		}
    69  
    70  		for _, datap := range activeModules() {
    71  			nBSSRoots := nBlocks(datap.ebss - datap.bss)
    72  			if nBSSRoots > work.nBSSRoots {
    73  				work.nBSSRoots = nBSSRoots
    74  			}
    75  		}
    76  	}
    77  
    78  	if !work.markrootDone {
    79  		// On the first markroot, we need to scan span roots.
    80  		// In concurrent GC, this happens during concurrent
    81  		// mark and we depend on addfinalizer to ensure the
    82  		// above invariants for objects that get finalizers
    83  		// after concurrent mark. In STW GC, this will happen
    84  		// during mark termination.
    85  		//
    86  		// We're only interested in scanning the in-use spans,
    87  		// which will all be swept at this point. More spans
    88  		// may be added to this list during concurrent GC, but
    89  		// we only care about spans that were allocated before
    90  		// this mark phase.
    91  		work.nSpanRoots = mheap_.sweepSpans[mheap_.sweepgen/2%2].numBlocks()
    92  
    93  		// On the first markroot, we need to scan all Gs. Gs
    94  		// may be created after this point, but it's okay that
    95  		// we ignore them because they begin life without any
    96  		// roots, so there's nothing to scan, and any roots
    97  		// they create during the concurrent phase will be
    98  		// scanned during mark termination. During mark
    99  		// termination, allglen isn't changing, so we'll scan
   100  		// all Gs.
   101  		work.nStackRoots = int(atomic.Loaduintptr(&allglen))
   102  		work.nRescanRoots = 0
   103  	} else {
   104  		// We've already scanned span roots and kept the scan
   105  		// up-to-date during concurrent mark.
   106  		work.nSpanRoots = 0
   107  
   108  		// On the second pass of markroot, we're just scanning
   109  		// dirty stacks. It's safe to access rescan since the
   110  		// world is stopped.
   111  		work.nStackRoots = 0
   112  		work.nRescanRoots = len(work.rescan.list)
   113  	}
   114  
   115  	work.markrootNext = 0
   116  	work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots + work.nRescanRoots)
   117  }
   118  
   119  // gcMarkRootCheck checks that all roots have been scanned. It is
   120  // purely for debugging.
   121  func gcMarkRootCheck() {
   122  	if work.markrootNext < work.markrootJobs {
   123  		print(work.markrootNext, " of ", work.markrootJobs, " markroot jobs done\n")
   124  		throw("left over markroot jobs")
   125  	}
   126  
   127  	lock(&allglock)
   128  	// Check that stacks have been scanned.
   129  	if gcphase == _GCmarktermination && debug.gcrescanstacks > 0 {
   130  		for i := 0; i < len(allgs); i++ {
   131  			gp := allgs[i]
   132  			if !(gp.gcscandone && gp.gcscanvalid) && readgstatus(gp) != _Gdead {
   133  				println("gp", gp, "goid", gp.goid,
   134  					"status", readgstatus(gp),
   135  					"gcscandone", gp.gcscandone,
   136  					"gcscanvalid", gp.gcscanvalid)
   137  				throw("scan missed a g")
   138  			}
   139  		}
   140  	} else {
   141  		for i := 0; i < work.nStackRoots; i++ {
   142  			gp := allgs[i]
   143  			if !gp.gcscandone {
   144  				throw("scan missed a g")
   145  			}
   146  		}
   147  	}
   148  	unlock(&allglock)
   149  }
   150  
   151  // ptrmask for an allocation containing a single pointer.
   152  var oneptrmask = [...]uint8{1}
   153  
   154  // markroot scans the i'th root.
   155  //
   156  // Preemption must be disabled (because this uses a gcWork).
   157  //
   158  // nowritebarrier is only advisory here.
   159  //
   160  //go:nowritebarrier
   161  func markroot(gcw *gcWork, i uint32) {
   162  	// TODO(austin): This is a bit ridiculous. Compute and store
   163  	// the bases in gcMarkRootPrepare instead of the counts.
   164  	baseFlushCache := uint32(fixedRootCount)
   165  	baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
   166  	baseBSS := baseData + uint32(work.nDataRoots)
   167  	baseSpans := baseBSS + uint32(work.nBSSRoots)
   168  	baseStacks := baseSpans + uint32(work.nSpanRoots)
   169  	baseRescan := baseStacks + uint32(work.nStackRoots)
   170  	end := baseRescan + uint32(work.nRescanRoots)
   171  
   172  	// Note: if you add a case here, please also update heapdump.go:dumproots.
   173  	switch {
   174  	case baseFlushCache <= i && i < baseData:
   175  		flushmcache(int(i - baseFlushCache))
   176  
   177  	case baseData <= i && i < baseBSS:
   178  		for _, datap := range activeModules() {
   179  			markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData))
   180  		}
   181  
   182  	case baseBSS <= i && i < baseSpans:
   183  		for _, datap := range activeModules() {
   184  			markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS))
   185  		}
   186  
   187  	case i == fixedRootFinalizers:
   188  		for fb := allfin; fb != nil; fb = fb.alllink {
   189  			cnt := uintptr(atomic.Load(&fb.cnt))
   190  			scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw)
   191  		}
   192  
   193  	case i == fixedRootFreeGStacks:
   194  		// Only do this once per GC cycle; preferably
   195  		// concurrently.
   196  		if !work.markrootDone {
   197  			// Switch to the system stack so we can call
   198  			// stackfree.
   199  			systemstack(markrootFreeGStacks)
   200  		}
   201  
   202  	case baseSpans <= i && i < baseStacks:
   203  		// mark MSpan.specials
   204  		markrootSpans(gcw, int(i-baseSpans))
   205  
   206  	default:
   207  		// the rest is scanning goroutine stacks
   208  		var gp *g
   209  		if baseStacks <= i && i < baseRescan {
   210  			gp = allgs[i-baseStacks]
   211  		} else if baseRescan <= i && i < end {
   212  			gp = work.rescan.list[i-baseRescan].ptr()
   213  			if gp.gcRescan != int32(i-baseRescan) {
   214  				// Looking for issue #17099.
   215  				println("runtime: gp", gp, "found at rescan index", i-baseRescan, "but should be at", gp.gcRescan)
   216  				throw("bad g rescan index")
   217  			}
   218  		} else {
   219  			throw("markroot: bad index")
   220  		}
   221  
   222  		// remember when we've first observed the G blocked
   223  		// needed only to output in traceback
   224  		status := readgstatus(gp) // We are not in a scan state
   225  		if (status == _Gwaiting || status == _Gsyscall) && gp.waitsince == 0 {
   226  			gp.waitsince = work.tstart
   227  		}
   228  
   229  		// scang must be done on the system stack in case
   230  		// we're trying to scan our own stack.
   231  		systemstack(func() {
   232  			// If this is a self-scan, put the user G in
   233  			// _Gwaiting to prevent self-deadlock. It may
   234  			// already be in _Gwaiting if this is a mark
   235  			// worker or we're in mark termination.
   236  			userG := getg().m.curg
   237  			selfScan := gp == userG && readgstatus(userG) == _Grunning
   238  			if selfScan {
   239  				casgstatus(userG, _Grunning, _Gwaiting)
   240  				userG.waitreason = "garbage collection scan"
   241  			}
   242  
   243  			// TODO: scang blocks until gp's stack has
   244  			// been scanned, which may take a while for
   245  			// running goroutines. Consider doing this in
   246  			// two phases where the first is non-blocking:
   247  			// we scan the stacks we can and ask running
   248  			// goroutines to scan themselves; and the
   249  			// second blocks.
   250  			scang(gp, gcw)
   251  
   252  			if selfScan {
   253  				casgstatus(userG, _Gwaiting, _Grunning)
   254  			}
   255  		})
   256  	}
   257  }
   258  
   259  // markrootBlock scans the shard'th shard of the block of memory [b0,
   260  // b0+n0), with the given pointer mask.
   261  //
   262  //go:nowritebarrier
   263  func markrootBlock(b0, n0 uintptr, ptrmask0 *uint8, gcw *gcWork, shard int) {
   264  	if rootBlockBytes%(8*sys.PtrSize) != 0 {
   265  		// This is necessary to pick byte offsets in ptrmask0.
   266  		throw("rootBlockBytes must be a multiple of 8*ptrSize")
   267  	}
   268  
   269  	b := b0 + uintptr(shard)*rootBlockBytes
   270  	if b >= b0+n0 {
   271  		return
   272  	}
   273  	ptrmask := (*uint8)(add(unsafe.Pointer(ptrmask0), uintptr(shard)*(rootBlockBytes/(8*sys.PtrSize))))
   274  	n := uintptr(rootBlockBytes)
   275  	if b+n > b0+n0 {
   276  		n = b0 + n0 - b
   277  	}
   278  
   279  	// Scan this shard.
   280  	scanblock(b, n, ptrmask, gcw)
   281  }
   282  
   283  // markrootFreeGStacks frees stacks of dead Gs.
   284  //
   285  // This does not free stacks of dead Gs cached on Ps, but having a few
   286  // cached stacks around isn't a problem.
   287  //
   288  //TODO go:nowritebarrier
   289  func markrootFreeGStacks() {
   290  	// Take list of dead Gs with stacks.
   291  	lock(&sched.gflock)
   292  	list := sched.gfreeStack
   293  	sched.gfreeStack = nil
   294  	unlock(&sched.gflock)
   295  	if list == nil {
   296  		return
   297  	}
   298  
   299  	// Free stacks.
   300  	tail := list
   301  	for gp := list; gp != nil; gp = gp.schedlink.ptr() {
   302  		shrinkstack(gp)
   303  		tail = gp
   304  	}
   305  
   306  	// Put Gs back on the free list.
   307  	lock(&sched.gflock)
   308  	tail.schedlink.set(sched.gfreeNoStack)
   309  	sched.gfreeNoStack = list
   310  	unlock(&sched.gflock)
   311  }
   312  
   313  // markrootSpans marks roots for one shard of work.spans.
   314  //
   315  //go:nowritebarrier
   316  func markrootSpans(gcw *gcWork, shard int) {
   317  	// Objects with finalizers have two GC-related invariants:
   318  	//
   319  	// 1) Everything reachable from the object must be marked.
   320  	// This ensures that when we pass the object to its finalizer,
   321  	// everything the finalizer can reach will be retained.
   322  	//
   323  	// 2) Finalizer specials (which are not in the garbage
   324  	// collected heap) are roots. In practice, this means the fn
   325  	// field must be scanned.
   326  	//
   327  	// TODO(austin): There are several ideas for making this more
   328  	// efficient in issue #11485.
   329  
   330  	if work.markrootDone {
   331  		throw("markrootSpans during second markroot")
   332  	}
   333  
   334  	sg := mheap_.sweepgen
   335  	spans := mheap_.sweepSpans[mheap_.sweepgen/2%2].block(shard)
   336  	// Note that work.spans may not include spans that were
   337  	// allocated between entering the scan phase and now. This is
   338  	// okay because any objects with finalizers in those spans
   339  	// must have been allocated and given finalizers after we
   340  	// entered the scan phase, so addfinalizer will have ensured
   341  	// the above invariants for them.
   342  	for _, s := range spans {
   343  		if s.state != mSpanInUse {
   344  			continue
   345  		}
   346  		if !useCheckmark && s.sweepgen != sg {
   347  			// sweepgen was updated (+2) during non-checkmark GC pass
   348  			print("sweep ", s.sweepgen, " ", sg, "\n")
   349  			throw("gc: unswept span")
   350  		}
   351  
   352  		// Speculatively check if there are any specials
   353  		// without acquiring the span lock. This may race with
   354  		// adding the first special to a span, but in that
   355  		// case addfinalizer will observe that the GC is
   356  		// active (which is globally synchronized) and ensure
   357  		// the above invariants. We may also ensure the
   358  		// invariants, but it's okay to scan an object twice.
   359  		if s.specials == nil {
   360  			continue
   361  		}
   362  
   363  		// Lock the specials to prevent a special from being
   364  		// removed from the list while we're traversing it.
   365  		lock(&s.speciallock)
   366  
   367  		for sp := s.specials; sp != nil; sp = sp.next {
   368  			if sp.kind != _KindSpecialFinalizer {
   369  				continue
   370  			}
   371  			// don't mark finalized object, but scan it so we
   372  			// retain everything it points to.
   373  			spf := (*specialfinalizer)(unsafe.Pointer(sp))
   374  			// A finalizer can be set for an inner byte of an object, find object beginning.
   375  			p := s.base() + uintptr(spf.special.offset)/s.elemsize*s.elemsize
   376  
   377  			// Mark everything that can be reached from
   378  			// the object (but *not* the object itself or
   379  			// we'll never collect it).
   380  			scanobject(p, gcw)
   381  
   382  			// The special itself is a root.
   383  			scanblock(uintptr(unsafe.Pointer(&spf.fn)), sys.PtrSize, &oneptrmask[0], gcw)
   384  		}
   385  
   386  		unlock(&s.speciallock)
   387  	}
   388  }
   389  
   390  // gcAssistAlloc performs GC work to make gp's assist debt positive.
   391  // gp must be the calling user gorountine.
   392  //
   393  // This must be called with preemption enabled.
   394  func gcAssistAlloc(gp *g) {
   395  	// Don't assist in non-preemptible contexts. These are
   396  	// generally fragile and won't allow the assist to block.
   397  	if getg() == gp.m.g0 {
   398  		return
   399  	}
   400  	if mp := getg().m; mp.locks > 0 || mp.preemptoff != "" {
   401  		return
   402  	}
   403  
   404  retry:
   405  	// Compute the amount of scan work we need to do to make the
   406  	// balance positive. When the required amount of work is low,
   407  	// we over-assist to build up credit for future allocations
   408  	// and amortize the cost of assisting.
   409  	debtBytes := -gp.gcAssistBytes
   410  	scanWork := int64(gcController.assistWorkPerByte * float64(debtBytes))
   411  	if scanWork < gcOverAssistWork {
   412  		scanWork = gcOverAssistWork
   413  		debtBytes = int64(gcController.assistBytesPerWork * float64(scanWork))
   414  	}
   415  
   416  	// Steal as much credit as we can from the background GC's
   417  	// scan credit. This is racy and may drop the background
   418  	// credit below 0 if two mutators steal at the same time. This
   419  	// will just cause steals to fail until credit is accumulated
   420  	// again, so in the long run it doesn't really matter, but we
   421  	// do have to handle the negative credit case.
   422  	bgScanCredit := atomic.Loadint64(&gcController.bgScanCredit)
   423  	stolen := int64(0)
   424  	if bgScanCredit > 0 {
   425  		if bgScanCredit < scanWork {
   426  			stolen = bgScanCredit
   427  			gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(stolen))
   428  		} else {
   429  			stolen = scanWork
   430  			gp.gcAssistBytes += debtBytes
   431  		}
   432  		atomic.Xaddint64(&gcController.bgScanCredit, -stolen)
   433  
   434  		scanWork -= stolen
   435  
   436  		if scanWork == 0 {
   437  			// We were able to steal all of the credit we
   438  			// needed.
   439  			return
   440  		}
   441  	}
   442  
   443  	// Perform assist work
   444  	systemstack(func() {
   445  		gcAssistAlloc1(gp, scanWork)
   446  		// The user stack may have moved, so this can't touch
   447  		// anything on it until it returns from systemstack.
   448  	})
   449  
   450  	completed := gp.param != nil
   451  	gp.param = nil
   452  	if completed {
   453  		gcMarkDone()
   454  	}
   455  
   456  	if gp.gcAssistBytes < 0 {
   457  		// We were unable steal enough credit or perform
   458  		// enough work to pay off the assist debt. We need to
   459  		// do one of these before letting the mutator allocate
   460  		// more to prevent over-allocation.
   461  		//
   462  		// If this is because we were preempted, reschedule
   463  		// and try some more.
   464  		if gp.preempt {
   465  			Gosched()
   466  			goto retry
   467  		}
   468  
   469  		// Add this G to an assist queue and park. When the GC
   470  		// has more background credit, it will satisfy queued
   471  		// assists before flushing to the global credit pool.
   472  		//
   473  		// Note that this does *not* get woken up when more
   474  		// work is added to the work list. The theory is that
   475  		// there wasn't enough work to do anyway, so we might
   476  		// as well let background marking take care of the
   477  		// work that is available.
   478  		if !gcParkAssist() {
   479  			goto retry
   480  		}
   481  
   482  		// At this point either background GC has satisfied
   483  		// this G's assist debt, or the GC cycle is over.
   484  	}
   485  }
   486  
   487  // gcAssistAlloc1 is the part of gcAssistAlloc that runs on the system
   488  // stack. This is a separate function to make it easier to see that
   489  // we're not capturing anything from the user stack, since the user
   490  // stack may move while we're in this function.
   491  //
   492  // gcAssistAlloc1 indicates whether this assist completed the mark
   493  // phase by setting gp.param to non-nil. This can't be communicated on
   494  // the stack since it may move.
   495  //
   496  //go:systemstack
   497  func gcAssistAlloc1(gp *g, scanWork int64) {
   498  	// Clear the flag indicating that this assist completed the
   499  	// mark phase.
   500  	gp.param = nil
   501  
   502  	if atomic.Load(&gcBlackenEnabled) == 0 {
   503  		// The gcBlackenEnabled check in malloc races with the
   504  		// store that clears it but an atomic check in every malloc
   505  		// would be a performance hit.
   506  		// Instead we recheck it here on the non-preemptable system
   507  		// stack to determine if we should preform an assist.
   508  
   509  		// GC is done, so ignore any remaining debt.
   510  		gp.gcAssistBytes = 0
   511  		return
   512  	}
   513  	// Track time spent in this assist. Since we're on the
   514  	// system stack, this is non-preemptible, so we can
   515  	// just measure start and end time.
   516  	startTime := nanotime()
   517  
   518  	decnwait := atomic.Xadd(&work.nwait, -1)
   519  	if decnwait == work.nproc {
   520  		println("runtime: work.nwait =", decnwait, "work.nproc=", work.nproc)
   521  		throw("nwait > work.nprocs")
   522  	}
   523  
   524  	// gcDrainN requires the caller to be preemptible.
   525  	casgstatus(gp, _Grunning, _Gwaiting)
   526  	gp.waitreason = "GC assist marking"
   527  
   528  	// drain own cached work first in the hopes that it
   529  	// will be more cache friendly.
   530  	gcw := &getg().m.p.ptr().gcw
   531  	workDone := gcDrainN(gcw, scanWork)
   532  	// If we are near the end of the mark phase
   533  	// dispose of the gcw.
   534  	if gcBlackenPromptly {
   535  		gcw.dispose()
   536  	}
   537  
   538  	casgstatus(gp, _Gwaiting, _Grunning)
   539  
   540  	// Record that we did this much scan work.
   541  	//
   542  	// Back out the number of bytes of assist credit that
   543  	// this scan work counts for. The "1+" is a poor man's
   544  	// round-up, to ensure this adds credit even if
   545  	// assistBytesPerWork is very low.
   546  	gp.gcAssistBytes += 1 + int64(gcController.assistBytesPerWork*float64(workDone))
   547  
   548  	// If this is the last worker and we ran out of work,
   549  	// signal a completion point.
   550  	incnwait := atomic.Xadd(&work.nwait, +1)
   551  	if incnwait > work.nproc {
   552  		println("runtime: work.nwait=", incnwait,
   553  			"work.nproc=", work.nproc,
   554  			"gcBlackenPromptly=", gcBlackenPromptly)
   555  		throw("work.nwait > work.nproc")
   556  	}
   557  
   558  	if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
   559  		// This has reached a background completion point. Set
   560  		// gp.param to a non-nil value to indicate this. It
   561  		// doesn't matter what we set it to (it just has to be
   562  		// a valid pointer).
   563  		gp.param = unsafe.Pointer(gp)
   564  	}
   565  	duration := nanotime() - startTime
   566  	_p_ := gp.m.p.ptr()
   567  	_p_.gcAssistTime += duration
   568  	if _p_.gcAssistTime > gcAssistTimeSlack {
   569  		atomic.Xaddint64(&gcController.assistTime, _p_.gcAssistTime)
   570  		_p_.gcAssistTime = 0
   571  	}
   572  }
   573  
   574  // gcWakeAllAssists wakes all currently blocked assists. This is used
   575  // at the end of a GC cycle. gcBlackenEnabled must be false to prevent
   576  // new assists from going to sleep after this point.
   577  func gcWakeAllAssists() {
   578  	lock(&work.assistQueue.lock)
   579  	injectglist(work.assistQueue.head.ptr())
   580  	work.assistQueue.head.set(nil)
   581  	work.assistQueue.tail.set(nil)
   582  	unlock(&work.assistQueue.lock)
   583  }
   584  
   585  // gcParkAssist puts the current goroutine on the assist queue and parks.
   586  //
   587  // gcParkAssist returns whether the assist is now satisfied. If it
   588  // returns false, the caller must retry the assist.
   589  //
   590  //go:nowritebarrier
   591  func gcParkAssist() bool {
   592  	lock(&work.assistQueue.lock)
   593  	// If the GC cycle finished while we were getting the lock,
   594  	// exit the assist. The cycle can't finish while we hold the
   595  	// lock.
   596  	if atomic.Load(&gcBlackenEnabled) == 0 {
   597  		unlock(&work.assistQueue.lock)
   598  		return true
   599  	}
   600  
   601  	gp := getg()
   602  	oldHead, oldTail := work.assistQueue.head, work.assistQueue.tail
   603  	if oldHead == 0 {
   604  		work.assistQueue.head.set(gp)
   605  	} else {
   606  		oldTail.ptr().schedlink.set(gp)
   607  	}
   608  	work.assistQueue.tail.set(gp)
   609  	gp.schedlink.set(nil)
   610  
   611  	// Recheck for background credit now that this G is in
   612  	// the queue, but can still back out. This avoids a
   613  	// race in case background marking has flushed more
   614  	// credit since we checked above.
   615  	if atomic.Loadint64(&gcController.bgScanCredit) > 0 {
   616  		work.assistQueue.head = oldHead
   617  		work.assistQueue.tail = oldTail
   618  		if oldTail != 0 {
   619  			oldTail.ptr().schedlink.set(nil)
   620  		}
   621  		unlock(&work.assistQueue.lock)
   622  		return false
   623  	}
   624  	// Park.
   625  	goparkunlock(&work.assistQueue.lock, "GC assist wait", traceEvGoBlockGC, 2)
   626  	return true
   627  }
   628  
   629  // gcFlushBgCredit flushes scanWork units of background scan work
   630  // credit. This first satisfies blocked assists on the
   631  // work.assistQueue and then flushes any remaining credit to
   632  // gcController.bgScanCredit.
   633  //
   634  // Write barriers are disallowed because this is used by gcDrain after
   635  // it has ensured that all work is drained and this must preserve that
   636  // condition.
   637  //
   638  //go:nowritebarrierrec
   639  func gcFlushBgCredit(scanWork int64) {
   640  	if work.assistQueue.head == 0 {
   641  		// Fast path; there are no blocked assists. There's a
   642  		// small window here where an assist may add itself to
   643  		// the blocked queue and park. If that happens, we'll
   644  		// just get it on the next flush.
   645  		atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
   646  		return
   647  	}
   648  
   649  	scanBytes := int64(float64(scanWork) * gcController.assistBytesPerWork)
   650  
   651  	lock(&work.assistQueue.lock)
   652  	gp := work.assistQueue.head.ptr()
   653  	for gp != nil && scanBytes > 0 {
   654  		// Note that gp.gcAssistBytes is negative because gp
   655  		// is in debt. Think carefully about the signs below.
   656  		if scanBytes+gp.gcAssistBytes >= 0 {
   657  			// Satisfy this entire assist debt.
   658  			scanBytes += gp.gcAssistBytes
   659  			gp.gcAssistBytes = 0
   660  			xgp := gp
   661  			gp = gp.schedlink.ptr()
   662  			// It's important that we *not* put xgp in
   663  			// runnext. Otherwise, it's possible for user
   664  			// code to exploit the GC worker's high
   665  			// scheduler priority to get itself always run
   666  			// before other goroutines and always in the
   667  			// fresh quantum started by GC.
   668  			ready(xgp, 0, false)
   669  		} else {
   670  			// Partially satisfy this assist.
   671  			gp.gcAssistBytes += scanBytes
   672  			scanBytes = 0
   673  			// As a heuristic, we move this assist to the
   674  			// back of the queue so that large assists
   675  			// can't clog up the assist queue and
   676  			// substantially delay small assists.
   677  			xgp := gp
   678  			gp = gp.schedlink.ptr()
   679  			if gp == nil {
   680  				// gp is the only assist in the queue.
   681  				gp = xgp
   682  			} else {
   683  				xgp.schedlink = 0
   684  				work.assistQueue.tail.ptr().schedlink.set(xgp)
   685  				work.assistQueue.tail.set(xgp)
   686  			}
   687  			break
   688  		}
   689  	}
   690  	work.assistQueue.head.set(gp)
   691  	if gp == nil {
   692  		work.assistQueue.tail.set(nil)
   693  	}
   694  
   695  	if scanBytes > 0 {
   696  		// Convert from scan bytes back to work.
   697  		scanWork = int64(float64(scanBytes) * gcController.assistWorkPerByte)
   698  		atomic.Xaddint64(&gcController.bgScanCredit, scanWork)
   699  	}
   700  	unlock(&work.assistQueue.lock)
   701  }
   702  
   703  // scanstack scans gp's stack, greying all pointers found on the stack.
   704  //
   705  // During mark phase, it also installs stack barriers while traversing
   706  // gp's stack. During mark termination, it stops scanning when it
   707  // reaches an unhit stack barrier.
   708  //
   709  // scanstack is marked go:systemstack because it must not be preempted
   710  // while using a workbuf.
   711  //
   712  //go:nowritebarrier
   713  //go:systemstack
   714  func scanstack(gp *g, gcw *gcWork) {
   715  	if gp.gcscanvalid {
   716  		return
   717  	}
   718  
   719  	if readgstatus(gp)&_Gscan == 0 {
   720  		print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
   721  		throw("scanstack - bad status")
   722  	}
   723  
   724  	switch readgstatus(gp) &^ _Gscan {
   725  	default:
   726  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   727  		throw("mark - bad status")
   728  	case _Gdead:
   729  		return
   730  	case _Grunning:
   731  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
   732  		throw("scanstack: goroutine not stopped")
   733  	case _Grunnable, _Gsyscall, _Gwaiting:
   734  		// ok
   735  	}
   736  
   737  	if gp == getg() {
   738  		throw("can't scan our own stack")
   739  	}
   740  	mp := gp.m
   741  	if mp != nil && mp.helpgc != 0 {
   742  		throw("can't scan gchelper stack")
   743  	}
   744  
   745  	// Shrink the stack if not much of it is being used. During
   746  	// concurrent GC, we can do this during concurrent mark.
   747  	if !work.markrootDone {
   748  		shrinkstack(gp)
   749  	}
   750  
   751  	// Prepare for stack barrier insertion/removal.
   752  	var sp, barrierOffset, nextBarrier uintptr
   753  	if gp.syscallsp != 0 {
   754  		sp = gp.syscallsp
   755  	} else {
   756  		sp = gp.sched.sp
   757  	}
   758  	gcLockStackBarriers(gp) // Not necessary during mark term, but harmless.
   759  	switch gcphase {
   760  	case _GCmark:
   761  		// Install stack barriers during stack scan.
   762  		barrierOffset = uintptr(firstStackBarrierOffset)
   763  		nextBarrier = sp + barrierOffset
   764  
   765  		if debug.gcstackbarrieroff > 0 {
   766  			nextBarrier = ^uintptr(0)
   767  		}
   768  
   769  		// Remove any existing stack barriers before we
   770  		// install new ones.
   771  		gcRemoveStackBarriers(gp)
   772  
   773  	case _GCmarktermination:
   774  		if !work.markrootDone {
   775  			// This is a STW GC. There may be stale stack
   776  			// barriers from an earlier cycle since we
   777  			// never passed through mark phase.
   778  			gcRemoveStackBarriers(gp)
   779  		}
   780  
   781  		if int(gp.stkbarPos) == len(gp.stkbar) {
   782  			// gp hit all of the stack barriers (or there
   783  			// were none). Re-scan the whole stack.
   784  			nextBarrier = ^uintptr(0)
   785  		} else {
   786  			// Only re-scan up to the lowest un-hit
   787  			// barrier. Any frames above this have not
   788  			// executed since the concurrent scan of gp and
   789  			// any writes through up-pointers to above
   790  			// this barrier had write barriers.
   791  			nextBarrier = gp.stkbar[gp.stkbarPos].savedLRPtr
   792  			if debugStackBarrier {
   793  				print("rescan below ", hex(nextBarrier), " in [", hex(sp), ",", hex(gp.stack.hi), ") goid=", gp.goid, "\n")
   794  			}
   795  		}
   796  
   797  	default:
   798  		throw("scanstack in wrong phase")
   799  	}
   800  
   801  	// Scan the stack.
   802  	var cache pcvalueCache
   803  	n := 0
   804  	scanframe := func(frame *stkframe, unused unsafe.Pointer) bool {
   805  		scanframeworker(frame, &cache, gcw)
   806  
   807  		if frame.fp > nextBarrier {
   808  			// We skip installing a barrier on bottom-most
   809  			// frame because on LR machines this LR is not
   810  			// on the stack.
   811  			if gcphase == _GCmark && n != 0 {
   812  				if gcInstallStackBarrier(gp, frame) {
   813  					barrierOffset *= 2
   814  					nextBarrier = sp + barrierOffset
   815  				}
   816  			} else if gcphase == _GCmarktermination {
   817  				// We just scanned a frame containing
   818  				// a return to a stack barrier. Since
   819  				// this frame never returned, we can
   820  				// stop scanning.
   821  				return false
   822  			}
   823  		}
   824  		n++
   825  
   826  		return true
   827  	}
   828  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
   829  	tracebackdefers(gp, scanframe, nil)
   830  	gcUnlockStackBarriers(gp)
   831  	if gcphase == _GCmark {
   832  		// gp may have added itself to the rescan list between
   833  		// when GC started and now. It's clean now, so remove
   834  		// it. This isn't safe during mark termination because
   835  		// mark termination is consuming this list, but it's
   836  		// also not necessary.
   837  		dequeueRescan(gp)
   838  	}
   839  	gp.gcscanvalid = true
   840  }
   841  
   842  // Scan a stack frame: local variables and function arguments/results.
   843  //go:nowritebarrier
   844  func scanframeworker(frame *stkframe, cache *pcvalueCache, gcw *gcWork) {
   845  
   846  	f := frame.fn
   847  	targetpc := frame.continpc
   848  	if targetpc == 0 {
   849  		// Frame is dead.
   850  		return
   851  	}
   852  	if _DebugGC > 1 {
   853  		print("scanframe ", funcname(f), "\n")
   854  	}
   855  	if targetpc != f.entry {
   856  		targetpc--
   857  	}
   858  	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, cache)
   859  	if pcdata == -1 {
   860  		// We do not have a valid pcdata value but there might be a
   861  		// stackmap for this function. It is likely that we are looking
   862  		// at the function prologue, assume so and hope for the best.
   863  		pcdata = 0
   864  	}
   865  
   866  	// Scan local variables if stack frame has been allocated.
   867  	size := frame.varp - frame.sp
   868  	var minsize uintptr
   869  	switch sys.ArchFamily {
   870  	case sys.ARM64:
   871  		minsize = sys.SpAlign
   872  	default:
   873  		minsize = sys.MinFrameSize
   874  	}
   875  	if size > minsize {
   876  		stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
   877  		if stkmap == nil || stkmap.n <= 0 {
   878  			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
   879  			throw("missing stackmap")
   880  		}
   881  
   882  		// Locals bitmap information, scan just the pointers in locals.
   883  		if pcdata < 0 || pcdata >= stkmap.n {
   884  			// don't know where we are
   885  			print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
   886  			throw("scanframe: bad symbol table")
   887  		}
   888  		bv := stackmapdata(stkmap, pcdata)
   889  		size = uintptr(bv.n) * sys.PtrSize
   890  		scanblock(frame.varp-size, size, bv.bytedata, gcw)
   891  	}
   892  
   893  	// Scan arguments.
   894  	if frame.arglen > 0 {
   895  		var bv bitvector
   896  		if frame.argmap != nil {
   897  			bv = *frame.argmap
   898  		} else {
   899  			stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   900  			if stkmap == nil || stkmap.n <= 0 {
   901  				print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
   902  				throw("missing stackmap")
   903  			}
   904  			if pcdata < 0 || pcdata >= stkmap.n {
   905  				// don't know where we are
   906  				print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
   907  				throw("scanframe: bad symbol table")
   908  			}
   909  			bv = stackmapdata(stkmap, pcdata)
   910  		}
   911  		scanblock(frame.argp, uintptr(bv.n)*sys.PtrSize, bv.bytedata, gcw)
   912  	}
   913  }
   914  
   915  // queueRescan adds gp to the stack rescan list and clears
   916  // gp.gcscanvalid. The caller must own gp and ensure that gp isn't
   917  // already on the rescan list.
   918  func queueRescan(gp *g) {
   919  	if debug.gcrescanstacks == 0 {
   920  		// Clear gcscanvalid to keep assertions happy.
   921  		//
   922  		// TODO: Remove gcscanvalid entirely when we remove
   923  		// stack rescanning.
   924  		gp.gcscanvalid = false
   925  		return
   926  	}
   927  
   928  	if gcphase == _GCoff {
   929  		gp.gcscanvalid = false
   930  		return
   931  	}
   932  	if gp.gcRescan != -1 {
   933  		throw("g already on rescan list")
   934  	}
   935  
   936  	lock(&work.rescan.lock)
   937  	gp.gcscanvalid = false
   938  
   939  	// Recheck gcphase under the lock in case there was a phase change.
   940  	if gcphase == _GCoff {
   941  		unlock(&work.rescan.lock)
   942  		return
   943  	}
   944  	if len(work.rescan.list) == cap(work.rescan.list) {
   945  		throw("rescan list overflow")
   946  	}
   947  	n := len(work.rescan.list)
   948  	gp.gcRescan = int32(n)
   949  	work.rescan.list = work.rescan.list[:n+1]
   950  	work.rescan.list[n].set(gp)
   951  	unlock(&work.rescan.lock)
   952  }
   953  
   954  // dequeueRescan removes gp from the stack rescan list, if gp is on
   955  // the rescan list. The caller must own gp.
   956  func dequeueRescan(gp *g) {
   957  	if debug.gcrescanstacks == 0 {
   958  		return
   959  	}
   960  
   961  	if gp.gcRescan == -1 {
   962  		return
   963  	}
   964  	if gcphase == _GCoff {
   965  		gp.gcRescan = -1
   966  		return
   967  	}
   968  
   969  	lock(&work.rescan.lock)
   970  	if work.rescan.list[gp.gcRescan].ptr() != gp {
   971  		throw("bad dequeueRescan")
   972  	}
   973  	// Careful: gp may itself be the last G on the list.
   974  	last := work.rescan.list[len(work.rescan.list)-1]
   975  	work.rescan.list[gp.gcRescan] = last
   976  	last.ptr().gcRescan = gp.gcRescan
   977  	gp.gcRescan = -1
   978  	work.rescan.list = work.rescan.list[:len(work.rescan.list)-1]
   979  	unlock(&work.rescan.lock)
   980  }
   981  
   982  type gcDrainFlags int
   983  
   984  const (
   985  	gcDrainUntilPreempt gcDrainFlags = 1 << iota
   986  	gcDrainNoBlock
   987  	gcDrainFlushBgCredit
   988  
   989  	// gcDrainBlock means neither gcDrainUntilPreempt or
   990  	// gcDrainNoBlock. It is the default, but callers should use
   991  	// the constant for documentation purposes.
   992  	gcDrainBlock gcDrainFlags = 0
   993  )
   994  
   995  // gcDrain scans roots and objects in work buffers, blackening grey
   996  // objects until all roots and work buffers have been drained.
   997  //
   998  // If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
   999  // is set. This implies gcDrainNoBlock.
  1000  //
  1001  // If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is
  1002  // unable to get more work. Otherwise, it will block until all
  1003  // blocking calls are blocked in gcDrain.
  1004  //
  1005  // If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
  1006  // credit to gcController.bgScanCredit every gcCreditSlack units of
  1007  // scan work.
  1008  //
  1009  //go:nowritebarrier
  1010  func gcDrain(gcw *gcWork, flags gcDrainFlags) {
  1011  	if !writeBarrier.needed {
  1012  		throw("gcDrain phase incorrect")
  1013  	}
  1014  
  1015  	gp := getg().m.curg
  1016  	preemptible := flags&gcDrainUntilPreempt != 0
  1017  	blocking := flags&(gcDrainUntilPreempt|gcDrainNoBlock) == 0
  1018  	flushBgCredit := flags&gcDrainFlushBgCredit != 0
  1019  
  1020  	// Drain root marking jobs.
  1021  	if work.markrootNext < work.markrootJobs {
  1022  		for !(preemptible && gp.preempt) {
  1023  			job := atomic.Xadd(&work.markrootNext, +1) - 1
  1024  			if job >= work.markrootJobs {
  1025  				break
  1026  			}
  1027  			markroot(gcw, job)
  1028  		}
  1029  	}
  1030  
  1031  	initScanWork := gcw.scanWork
  1032  
  1033  	// Drain heap marking jobs.
  1034  	for !(preemptible && gp.preempt) {
  1035  		// Try to keep work available on the global queue. We used to
  1036  		// check if there were waiting workers, but it's better to
  1037  		// just keep work available than to make workers wait. In the
  1038  		// worst case, we'll do O(log(_WorkbufSize)) unnecessary
  1039  		// balances.
  1040  		if work.full == 0 {
  1041  			gcw.balance()
  1042  		}
  1043  
  1044  		var b uintptr
  1045  		if blocking {
  1046  			b = gcw.get()
  1047  		} else {
  1048  			b = gcw.tryGetFast()
  1049  			if b == 0 {
  1050  				b = gcw.tryGet()
  1051  			}
  1052  		}
  1053  		if b == 0 {
  1054  			// work barrier reached or tryGet failed.
  1055  			break
  1056  		}
  1057  		scanobject(b, gcw)
  1058  
  1059  		// Flush background scan work credit to the global
  1060  		// account if we've accumulated enough locally so
  1061  		// mutator assists can draw on it.
  1062  		if gcw.scanWork >= gcCreditSlack {
  1063  			atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
  1064  			if flushBgCredit {
  1065  				gcFlushBgCredit(gcw.scanWork - initScanWork)
  1066  				initScanWork = 0
  1067  			}
  1068  			gcw.scanWork = 0
  1069  		}
  1070  	}
  1071  
  1072  	// In blocking mode, write barriers are not allowed after this
  1073  	// point because we must preserve the condition that the work
  1074  	// buffers are empty.
  1075  
  1076  	// Flush remaining scan work credit.
  1077  	if gcw.scanWork > 0 {
  1078  		atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
  1079  		if flushBgCredit {
  1080  			gcFlushBgCredit(gcw.scanWork - initScanWork)
  1081  		}
  1082  		gcw.scanWork = 0
  1083  	}
  1084  }
  1085  
  1086  // gcDrainN blackens grey objects until it has performed roughly
  1087  // scanWork units of scan work or the G is preempted. This is
  1088  // best-effort, so it may perform less work if it fails to get a work
  1089  // buffer. Otherwise, it will perform at least n units of work, but
  1090  // may perform more because scanning is always done in whole object
  1091  // increments. It returns the amount of scan work performed.
  1092  //
  1093  // The caller goroutine must be in a preemptible state (e.g.,
  1094  // _Gwaiting) to prevent deadlocks during stack scanning. As a
  1095  // consequence, this must be called on the system stack.
  1096  //
  1097  //go:nowritebarrier
  1098  //go:systemstack
  1099  func gcDrainN(gcw *gcWork, scanWork int64) int64 {
  1100  	if !writeBarrier.needed {
  1101  		throw("gcDrainN phase incorrect")
  1102  	}
  1103  
  1104  	// There may already be scan work on the gcw, which we don't
  1105  	// want to claim was done by this call.
  1106  	workFlushed := -gcw.scanWork
  1107  
  1108  	gp := getg().m.curg
  1109  	for !gp.preempt && workFlushed+gcw.scanWork < scanWork {
  1110  		// See gcDrain comment.
  1111  		if work.full == 0 {
  1112  			gcw.balance()
  1113  		}
  1114  
  1115  		// This might be a good place to add prefetch code...
  1116  		// if(wbuf.nobj > 4) {
  1117  		//         PREFETCH(wbuf->obj[wbuf.nobj - 3];
  1118  		//  }
  1119  		//
  1120  		b := gcw.tryGetFast()
  1121  		if b == 0 {
  1122  			b = gcw.tryGet()
  1123  		}
  1124  
  1125  		if b == 0 {
  1126  			// Try to do a root job.
  1127  			//
  1128  			// TODO: Assists should get credit for this
  1129  			// work.
  1130  			if work.markrootNext < work.markrootJobs {
  1131  				job := atomic.Xadd(&work.markrootNext, +1) - 1
  1132  				if job < work.markrootJobs {
  1133  					markroot(gcw, job)
  1134  					continue
  1135  				}
  1136  			}
  1137  			// No heap or root jobs.
  1138  			break
  1139  		}
  1140  		scanobject(b, gcw)
  1141  
  1142  		// Flush background scan work credit.
  1143  		if gcw.scanWork >= gcCreditSlack {
  1144  			atomic.Xaddint64(&gcController.scanWork, gcw.scanWork)
  1145  			workFlushed += gcw.scanWork
  1146  			gcw.scanWork = 0
  1147  		}
  1148  	}
  1149  
  1150  	// Unlike gcDrain, there's no need to flush remaining work
  1151  	// here because this never flushes to bgScanCredit and
  1152  	// gcw.dispose will flush any remaining work to scanWork.
  1153  
  1154  	return workFlushed + gcw.scanWork
  1155  }
  1156  
  1157  // scanblock scans b as scanobject would, but using an explicit
  1158  // pointer bitmap instead of the heap bitmap.
  1159  //
  1160  // This is used to scan non-heap roots, so it does not update
  1161  // gcw.bytesMarked or gcw.scanWork.
  1162  //
  1163  //go:nowritebarrier
  1164  func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
  1165  	// Use local copies of original parameters, so that a stack trace
  1166  	// due to one of the throws below shows the original block
  1167  	// base and extent.
  1168  	b := b0
  1169  	n := n0
  1170  
  1171  	arena_start := mheap_.arena_start
  1172  	arena_used := mheap_.arena_used
  1173  
  1174  	for i := uintptr(0); i < n; {
  1175  		// Find bits for the next word.
  1176  		bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
  1177  		if bits == 0 {
  1178  			i += sys.PtrSize * 8
  1179  			continue
  1180  		}
  1181  		for j := 0; j < 8 && i < n; j++ {
  1182  			if bits&1 != 0 {
  1183  				// Same work as in scanobject; see comments there.
  1184  				obj := *(*uintptr)(unsafe.Pointer(b + i))
  1185  				if obj != 0 && arena_start <= obj && obj < arena_used {
  1186  					if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 {
  1187  						greyobject(obj, b, i, hbits, span, gcw, objIndex)
  1188  					}
  1189  				}
  1190  			}
  1191  			bits >>= 1
  1192  			i += sys.PtrSize
  1193  		}
  1194  	}
  1195  }
  1196  
  1197  // scanobject scans the object starting at b, adding pointers to gcw.
  1198  // b must point to the beginning of a heap object or an oblet.
  1199  // scanobject consults the GC bitmap for the pointer mask and the
  1200  // spans for the size of the object.
  1201  //
  1202  //go:nowritebarrier
  1203  func scanobject(b uintptr, gcw *gcWork) {
  1204  	// Note that arena_used may change concurrently during
  1205  	// scanobject and hence scanobject may encounter a pointer to
  1206  	// a newly allocated heap object that is *not* in
  1207  	// [start,used). It will not mark this object; however, we
  1208  	// know that it was just installed by a mutator, which means
  1209  	// that mutator will execute a write barrier and take care of
  1210  	// marking it. This is even more pronounced on relaxed memory
  1211  	// architectures since we access arena_used without barriers
  1212  	// or synchronization, but the same logic applies.
  1213  	arena_start := mheap_.arena_start
  1214  	arena_used := mheap_.arena_used
  1215  
  1216  	// Find the bits for b and the size of the object at b.
  1217  	//
  1218  	// b is either the beginning of an object, in which case this
  1219  	// is the size of the object to scan, or it points to an
  1220  	// oblet, in which case we compute the size to scan below.
  1221  	hbits := heapBitsForAddr(b)
  1222  	s := spanOfUnchecked(b)
  1223  	n := s.elemsize
  1224  	if n == 0 {
  1225  		throw("scanobject n == 0")
  1226  	}
  1227  
  1228  	if n > maxObletBytes {
  1229  		// Large object. Break into oblets for better
  1230  		// parallelism and lower latency.
  1231  		if b == s.base() {
  1232  			// It's possible this is a noscan object (not
  1233  			// from greyobject, but from other code
  1234  			// paths), in which case we must *not* enqueue
  1235  			// oblets since their bitmaps will be
  1236  			// uninitialized.
  1237  			if !hbits.hasPointers(n) {
  1238  				// Bypass the whole scan.
  1239  				gcw.bytesMarked += uint64(n)
  1240  				return
  1241  			}
  1242  
  1243  			// Enqueue the other oblets to scan later.
  1244  			// Some oblets may be in b's scalar tail, but
  1245  			// these will be marked as "no more pointers",
  1246  			// so we'll drop out immediately when we go to
  1247  			// scan those.
  1248  			for oblet := b + maxObletBytes; oblet < s.base()+s.elemsize; oblet += maxObletBytes {
  1249  				if !gcw.putFast(oblet) {
  1250  					gcw.put(oblet)
  1251  				}
  1252  			}
  1253  		}
  1254  
  1255  		// Compute the size of the oblet. Since this object
  1256  		// must be a large object, s.base() is the beginning
  1257  		// of the object.
  1258  		n = s.base() + s.elemsize - b
  1259  		if n > maxObletBytes {
  1260  			n = maxObletBytes
  1261  		}
  1262  	}
  1263  
  1264  	var i uintptr
  1265  	for i = 0; i < n; i += sys.PtrSize {
  1266  		// Find bits for this word.
  1267  		if i != 0 {
  1268  			// Avoid needless hbits.next() on last iteration.
  1269  			hbits = hbits.next()
  1270  		}
  1271  		// Load bits once. See CL 22712 and issue 16973 for discussion.
  1272  		bits := hbits.bits()
  1273  		// During checkmarking, 1-word objects store the checkmark
  1274  		// in the type bit for the one word. The only one-word objects
  1275  		// are pointers, or else they'd be merged with other non-pointer
  1276  		// data into larger allocations.
  1277  		if i != 1*sys.PtrSize && bits&bitScan == 0 {
  1278  			break // no more pointers in this object
  1279  		}
  1280  		if bits&bitPointer == 0 {
  1281  			continue // not a pointer
  1282  		}
  1283  
  1284  		// Work here is duplicated in scanblock and above.
  1285  		// If you make changes here, make changes there too.
  1286  		obj := *(*uintptr)(unsafe.Pointer(b + i))
  1287  
  1288  		// At this point we have extracted the next potential pointer.
  1289  		// Check if it points into heap and not back at the current object.
  1290  		if obj != 0 && arena_start <= obj && obj < arena_used && obj-b >= n {
  1291  			// Mark the object.
  1292  			if obj, hbits, span, objIndex := heapBitsForObject(obj, b, i); obj != 0 {
  1293  				greyobject(obj, b, i, hbits, span, gcw, objIndex)
  1294  			}
  1295  		}
  1296  	}
  1297  	gcw.bytesMarked += uint64(n)
  1298  	gcw.scanWork += int64(i)
  1299  }
  1300  
  1301  // Shade the object if it isn't already.
  1302  // The object is not nil and known to be in the heap.
  1303  // Preemption must be disabled.
  1304  //go:nowritebarrier
  1305  func shade(b uintptr) {
  1306  	if obj, hbits, span, objIndex := heapBitsForObject(b, 0, 0); obj != 0 {
  1307  		gcw := &getg().m.p.ptr().gcw
  1308  		greyobject(obj, 0, 0, hbits, span, gcw, objIndex)
  1309  		if gcphase == _GCmarktermination || gcBlackenPromptly {
  1310  			// Ps aren't allowed to cache work during mark
  1311  			// termination.
  1312  			gcw.dispose()
  1313  		}
  1314  	}
  1315  }
  1316  
  1317  // obj is the start of an object with mark mbits.
  1318  // If it isn't already marked, mark it and enqueue into gcw.
  1319  // base and off are for debugging only and could be removed.
  1320  //go:nowritebarrierrec
  1321  func greyobject(obj, base, off uintptr, hbits heapBits, span *mspan, gcw *gcWork, objIndex uintptr) {
  1322  	// obj should be start of allocation, and so must be at least pointer-aligned.
  1323  	if obj&(sys.PtrSize-1) != 0 {
  1324  		throw("greyobject: obj not pointer-aligned")
  1325  	}
  1326  	mbits := span.markBitsForIndex(objIndex)
  1327  
  1328  	if useCheckmark {
  1329  		if !mbits.isMarked() {
  1330  			printlock()
  1331  			print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
  1332  			print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
  1333  
  1334  			// Dump the source (base) object
  1335  			gcDumpObject("base", base, off)
  1336  
  1337  			// Dump the object
  1338  			gcDumpObject("obj", obj, ^uintptr(0))
  1339  
  1340  			throw("checkmark found unmarked object")
  1341  		}
  1342  		if hbits.isCheckmarked(span.elemsize) {
  1343  			return
  1344  		}
  1345  		hbits.setCheckmarked(span.elemsize)
  1346  		if !hbits.isCheckmarked(span.elemsize) {
  1347  			throw("setCheckmarked and isCheckmarked disagree")
  1348  		}
  1349  	} else {
  1350  		if debug.gccheckmark > 0 && span.isFree(objIndex) {
  1351  			print("runtime: marking free object ", hex(obj), " found at *(", hex(base), "+", hex(off), ")\n")
  1352  			gcDumpObject("base", base, off)
  1353  			gcDumpObject("obj", obj, ^uintptr(0))
  1354  			throw("marking free object")
  1355  		}
  1356  
  1357  		// If marked we have nothing to do.
  1358  		if mbits.isMarked() {
  1359  			return
  1360  		}
  1361  		// mbits.setMarked() // Avoid extra call overhead with manual inlining.
  1362  		atomic.Or8(mbits.bytep, mbits.mask)
  1363  		// If this is a noscan object, fast-track it to black
  1364  		// instead of greying it.
  1365  		if !hbits.hasPointers(span.elemsize) {
  1366  			gcw.bytesMarked += uint64(span.elemsize)
  1367  			return
  1368  		}
  1369  	}
  1370  
  1371  	// Queue the obj for scanning. The PREFETCH(obj) logic has been removed but
  1372  	// seems like a nice optimization that can be added back in.
  1373  	// There needs to be time between the PREFETCH and the use.
  1374  	// Previously we put the obj in an 8 element buffer that is drained at a rate
  1375  	// to give the PREFETCH time to do its work.
  1376  	// Use of PREFETCHNTA might be more appropriate than PREFETCH
  1377  	if !gcw.putFast(obj) {
  1378  		gcw.put(obj)
  1379  	}
  1380  }
  1381  
  1382  // gcDumpObject dumps the contents of obj for debugging and marks the
  1383  // field at byte offset off in obj.
  1384  func gcDumpObject(label string, obj, off uintptr) {
  1385  	if obj < mheap_.arena_start || obj >= mheap_.arena_used {
  1386  		print(label, "=", hex(obj), " is not in the Go heap\n")
  1387  		return
  1388  	}
  1389  	k := obj >> _PageShift
  1390  	x := k
  1391  	x -= mheap_.arena_start >> _PageShift
  1392  	s := mheap_.spans[x]
  1393  	print(label, "=", hex(obj), " k=", hex(k))
  1394  	if s == nil {
  1395  		print(" s=nil\n")
  1396  		return
  1397  	}
  1398  	print(" s.base()=", hex(s.base()), " s.limit=", hex(s.limit), " s.sizeclass=", s.sizeclass, " s.elemsize=", s.elemsize, " s.state=")
  1399  	if 0 <= s.state && int(s.state) < len(mSpanStateNames) {
  1400  		print(mSpanStateNames[s.state], "\n")
  1401  	} else {
  1402  		print("unknown(", s.state, ")\n")
  1403  	}
  1404  
  1405  	skipped := false
  1406  	size := s.elemsize
  1407  	if s.state == _MSpanStack && size == 0 {
  1408  		// We're printing something from a stack frame. We
  1409  		// don't know how big it is, so just show up to an
  1410  		// including off.
  1411  		size = off + sys.PtrSize
  1412  	}
  1413  	for i := uintptr(0); i < size; i += sys.PtrSize {
  1414  		// For big objects, just print the beginning (because
  1415  		// that usually hints at the object's type) and the
  1416  		// fields around off.
  1417  		if !(i < 128*sys.PtrSize || off-16*sys.PtrSize < i && i < off+16*sys.PtrSize) {
  1418  			skipped = true
  1419  			continue
  1420  		}
  1421  		if skipped {
  1422  			print(" ...\n")
  1423  			skipped = false
  1424  		}
  1425  		print(" *(", label, "+", i, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + i))))
  1426  		if i == off {
  1427  			print(" <==")
  1428  		}
  1429  		print("\n")
  1430  	}
  1431  	if skipped {
  1432  		print(" ...\n")
  1433  	}
  1434  }
  1435  
  1436  // gcmarknewobject marks a newly allocated object black. obj must
  1437  // not contain any non-nil pointers.
  1438  //
  1439  // This is nosplit so it can manipulate a gcWork without preemption.
  1440  //
  1441  //go:nowritebarrier
  1442  //go:nosplit
  1443  func gcmarknewobject(obj, size, scanSize uintptr) {
  1444  	if useCheckmark && !gcBlackenPromptly { // The world should be stopped so this should not happen.
  1445  		throw("gcmarknewobject called while doing checkmark")
  1446  	}
  1447  	markBitsForAddr(obj).setMarked()
  1448  	gcw := &getg().m.p.ptr().gcw
  1449  	gcw.bytesMarked += uint64(size)
  1450  	gcw.scanWork += int64(scanSize)
  1451  	if gcBlackenPromptly {
  1452  		// There shouldn't be anything in the work queue, but
  1453  		// we still need to flush stats.
  1454  		gcw.dispose()
  1455  	}
  1456  }
  1457  
  1458  // gcMarkTinyAllocs greys all active tiny alloc blocks.
  1459  //
  1460  // The world must be stopped.
  1461  func gcMarkTinyAllocs() {
  1462  	for _, p := range &allp {
  1463  		if p == nil || p.status == _Pdead {
  1464  			break
  1465  		}
  1466  		c := p.mcache
  1467  		if c == nil || c.tiny == 0 {
  1468  			continue
  1469  		}
  1470  		_, hbits, span, objIndex := heapBitsForObject(c.tiny, 0, 0)
  1471  		gcw := &p.gcw
  1472  		greyobject(c.tiny, 0, 0, hbits, span, gcw, objIndex)
  1473  		if gcBlackenPromptly {
  1474  			gcw.dispose()
  1475  		}
  1476  	}
  1477  }
  1478  
  1479  // Checkmarking
  1480  
  1481  // To help debug the concurrent GC we remark with the world
  1482  // stopped ensuring that any object encountered has their normal
  1483  // mark bit set. To do this we use an orthogonal bit
  1484  // pattern to indicate the object is marked. The following pattern
  1485  // uses the upper two bits in the object's boundary nibble.
  1486  // 01: scalar  not marked
  1487  // 10: pointer not marked
  1488  // 11: pointer     marked
  1489  // 00: scalar      marked
  1490  // Xoring with 01 will flip the pattern from marked to unmarked and vica versa.
  1491  // The higher bit is 1 for pointers and 0 for scalars, whether the object
  1492  // is marked or not.
  1493  // The first nibble no longer holds the typeDead pattern indicating that the
  1494  // there are no more pointers in the object. This information is held
  1495  // in the second nibble.
  1496  
  1497  // If useCheckmark is true, marking of an object uses the
  1498  // checkmark bits (encoding above) instead of the standard
  1499  // mark bits.
  1500  var useCheckmark = false
  1501  
  1502  //go:nowritebarrier
  1503  func initCheckmarks() {
  1504  	useCheckmark = true
  1505  	for _, s := range mheap_.allspans {
  1506  		if s.state == _MSpanInUse {
  1507  			heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout())
  1508  		}
  1509  	}
  1510  }
  1511  
  1512  func clearCheckmarks() {
  1513  	useCheckmark = false
  1514  	for _, s := range mheap_.allspans {
  1515  		if s.state == _MSpanInUse {
  1516  			heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout())
  1517  		}
  1518  	}
  1519  }