github.com/aloncn/graphics-go@v0.0.1/src/runtime/mgc.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // TODO(rsc): The code having to do with the heap bitmap needs very serious cleanup.
     6  // It has gotten completely out of control.
     7  
     8  // Garbage collector (GC).
     9  //
    10  // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
    11  // GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
    12  // non-generational and non-compacting. Allocation is done using size segregated per P allocation
    13  // areas to minimize fragmentation while eliminating locks in the common case.
    14  //
    15  // The algorithm decomposes into several steps.
    16  // This is a high level description of the algorithm being used. For an overview of GC a good
    17  // place to start is Richard Jones' gchandbook.org.
    18  //
    19  // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
    20  // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
    21  // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
    22  // 966-975.
    23  // For journal quality proofs that these steps are complete, correct, and terminate see
    24  // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
    25  // Concurrency and Computation: Practice and Experience 15(3-5), 2003.
    26  //
    27  //  0. Set phase = GCscan from GCoff.
    28  //  1. Wait for all P's to acknowledge phase change.
    29  //         At this point all goroutines have passed through a GC safepoint and
    30  //         know we are in the GCscan phase.
    31  //  2. GC scans all goroutine stacks, mark and enqueues all encountered pointers
    32  //       (marking avoids most duplicate enqueuing but races may produce benign duplication).
    33  //       Preempted goroutines are scanned before P schedules next goroutine.
    34  //  3. Set phase = GCmark.
    35  //  4. Wait for all P's to acknowledge phase change.
    36  //  5. Now write barrier marks and enqueues black, grey, or white to white pointers.
    37  //       Malloc still allocates white (non-marked) objects.
    38  //  6. Meanwhile GC transitively walks the heap marking reachable objects.
    39  //  7. When GC finishes marking heap, it preempts P's one-by-one and
    40  //       retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine
    41  //       currently scheduled on the P).
    42  //  8. Once the GC has exhausted all available marking work it sets phase = marktermination.
    43  //  9. Wait for all P's to acknowledge phase change.
    44  // 10. Malloc now allocates black objects, so number of unmarked reachable objects
    45  //        monotonically decreases.
    46  // 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet
    47  //        reachable objects.
    48  // 12. When GC completes a full cycle over P's and discovers no new grey
    49  //         objects, (which means all reachable objects are marked) set phase = GCoff.
    50  // 13. Wait for all P's to acknowledge phase change.
    51  // 14. Now malloc allocates white (but sweeps spans before use).
    52  //         Write barrier becomes nop.
    53  // 15. GC does background sweeping, see description below.
    54  // 16. When sufficient allocation has taken place replay the sequence starting at 0 above,
    55  //         see discussion of GC rate below.
    56  
    57  // Changing phases.
    58  // Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase.
    59  // All phase action must be benign in the presence of a change.
    60  // Starting with GCoff
    61  // GCoff to GCscan
    62  //     GSscan scans stacks and globals greying them and never marks an object black.
    63  //     Once all the P's are aware of the new phase they will scan gs on preemption.
    64  //     This means that the scanning of preempted gs can't start until all the Ps
    65  //     have acknowledged.
    66  //     When a stack is scanned, this phase also installs stack barriers to
    67  //     track how much of the stack has been active.
    68  //     This transition enables write barriers because stack barriers
    69  //     assume that writes to higher frames will be tracked by write
    70  //     barriers. Technically this only needs write barriers for writes
    71  //     to stack slots, but we enable write barriers in general.
    72  // GCscan to GCmark
    73  //     In GCmark, work buffers are drained until there are no more
    74  //     pointers to scan.
    75  //     No scanning of objects (making them black) can happen until all
    76  //     Ps have enabled the write barrier, but that already happened in
    77  //     the transition to GCscan.
    78  // GCmark to GCmarktermination
    79  //     The only change here is that we start allocating black so the Ps must acknowledge
    80  //     the change before we begin the termination algorithm
    81  // GCmarktermination to GSsweep
    82  //     Object currently on the freelist must be marked black for this to work.
    83  //     Are things on the free lists black or white? How does the sweep phase work?
    84  
    85  // Concurrent sweep.
    86  //
    87  // The sweep phase proceeds concurrently with normal program execution.
    88  // The heap is swept span-by-span both lazily (when a goroutine needs another span)
    89  // and concurrently in a background goroutine (this helps programs that are not CPU bound).
    90  // At the end of STW mark termination all spans are marked as "needs sweeping".
    91  //
    92  // The background sweeper goroutine simply sweeps spans one-by-one.
    93  //
    94  // To avoid requesting more OS memory while there are unswept spans, when a
    95  // goroutine needs another span, it first attempts to reclaim that much memory
    96  // by sweeping. When a goroutine needs to allocate a new small-object span, it
    97  // sweeps small-object spans for the same object size until it frees at least
    98  // one object. When a goroutine needs to allocate large-object span from heap,
    99  // it sweeps spans until it frees at least that many pages into heap. There is
   100  // one case where this may not suffice: if a goroutine sweeps and frees two
   101  // nonadjacent one-page spans to the heap, it will allocate a new two-page
   102  // span, but there can still be other one-page unswept spans which could be
   103  // combined into a two-page span.
   104  //
   105  // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
   106  // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
   107  // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
   108  // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
   109  // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
   110  // The finalizer goroutine is kicked off only when all spans are swept.
   111  // When the next GC starts, it sweeps all not-yet-swept spans (if any).
   112  
   113  // GC rate.
   114  // Next GC is after we've allocated an extra amount of memory proportional to
   115  // the amount already in use. The proportion is controlled by GOGC environment variable
   116  // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
   117  // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
   118  // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
   119  // (and also the amount of extra memory used).
   120  
   121  package runtime
   122  
   123  import (
   124  	"runtime/internal/atomic"
   125  	"runtime/internal/sys"
   126  	"unsafe"
   127  )
   128  
   129  const (
   130  	_DebugGC         = 0
   131  	_ConcurrentSweep = true
   132  	_FinBlockSize    = 4 * 1024
   133  
   134  	// sweepMinHeapDistance is a lower bound on the heap distance
   135  	// (in bytes) reserved for concurrent sweeping between GC
   136  	// cycles. This will be scaled by gcpercent/100.
   137  	sweepMinHeapDistance = 1024 * 1024
   138  )
   139  
   140  // heapminimum is the minimum heap size at which to trigger GC.
   141  // For small heaps, this overrides the usual GOGC*live set rule.
   142  //
   143  // When there is a very small live set but a lot of allocation, simply
   144  // collecting when the heap reaches GOGC*live results in many GC
   145  // cycles and high total per-GC overhead. This minimum amortizes this
   146  // per-GC overhead while keeping the heap reasonably small.
   147  //
   148  // During initialization this is set to 4MB*GOGC/100. In the case of
   149  // GOGC==0, this will set heapminimum to 0, resulting in constant
   150  // collection even when the heap size is small, which is useful for
   151  // debugging.
   152  var heapminimum uint64 = defaultHeapMinimum
   153  
   154  // defaultHeapMinimum is the value of heapminimum for GOGC==100.
   155  const defaultHeapMinimum = 4 << 20
   156  
   157  // Initialized from $GOGC.  GOGC=off means no GC.
   158  var gcpercent int32
   159  
   160  func gcinit() {
   161  	if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
   162  		throw("size of Workbuf is suboptimal")
   163  	}
   164  
   165  	_ = setGCPercent(readgogc())
   166  	for datap := &firstmoduledata; datap != nil; datap = datap.next {
   167  		datap.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(datap.gcdata)), datap.edata-datap.data)
   168  		datap.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(datap.gcbss)), datap.ebss-datap.bss)
   169  	}
   170  	memstats.next_gc = heapminimum
   171  	work.startSema = 1
   172  	work.markDoneSema = 1
   173  }
   174  
   175  func readgogc() int32 {
   176  	p := gogetenv("GOGC")
   177  	if p == "" {
   178  		return 100
   179  	}
   180  	if p == "off" {
   181  		return -1
   182  	}
   183  	return int32(atoi(p))
   184  }
   185  
   186  // gcenable is called after the bulk of the runtime initialization,
   187  // just before we're about to start letting user code run.
   188  // It kicks off the background sweeper goroutine and enables GC.
   189  func gcenable() {
   190  	c := make(chan int, 1)
   191  	go bgsweep(c)
   192  	<-c
   193  	memstats.enablegc = true // now that runtime is initialized, GC is okay
   194  }
   195  
   196  //go:linkname setGCPercent runtime/debug.setGCPercent
   197  func setGCPercent(in int32) (out int32) {
   198  	lock(&mheap_.lock)
   199  	out = gcpercent
   200  	if in < 0 {
   201  		in = -1
   202  	}
   203  	gcpercent = in
   204  	heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100
   205  	if gcController.triggerRatio > float64(gcpercent)/100 {
   206  		gcController.triggerRatio = float64(gcpercent) / 100
   207  	}
   208  	unlock(&mheap_.lock)
   209  	return out
   210  }
   211  
   212  // Garbage collector phase.
   213  // Indicates to write barrier and sychronization task to preform.
   214  var gcphase uint32
   215  
   216  // The compiler knows about this variable.
   217  // If you change it, you must change the compiler too.
   218  var writeBarrier struct {
   219  	enabled bool // compiler emits a check of this before calling write barrier
   220  	needed  bool // whether we need a write barrier for current GC phase
   221  	cgo     bool // whether we need a write barrier for a cgo check
   222  }
   223  
   224  // gcBlackenEnabled is 1 if mutator assists and background mark
   225  // workers are allowed to blacken objects. This must only be set when
   226  // gcphase == _GCmark.
   227  var gcBlackenEnabled uint32
   228  
   229  // gcBlackenPromptly indicates that optimizations that may
   230  // hide work from the global work queue should be disabled.
   231  //
   232  // If gcBlackenPromptly is true, per-P gcWork caches should
   233  // be flushed immediately and new objects should be allocated black.
   234  //
   235  // There is a tension between allocating objects white and
   236  // allocating them black. If white and the objects die before being
   237  // marked they can be collected during this GC cycle. On the other
   238  // hand allocating them black will reduce _GCmarktermination latency
   239  // since more work is done in the mark phase. This tension is resolved
   240  // by allocating white until the mark phase is approaching its end and
   241  // then allocating black for the remainder of the mark phase.
   242  var gcBlackenPromptly bool
   243  
   244  const (
   245  	_GCoff             = iota // GC not running; sweeping in background, write barrier disabled
   246  	_GCmark                   // GC marking roots and workbufs, write barrier ENABLED
   247  	_GCmarktermination        // GC mark termination: allocate black, P's help GC, write barrier ENABLED
   248  )
   249  
   250  //go:nosplit
   251  func setGCPhase(x uint32) {
   252  	atomic.Store(&gcphase, x)
   253  	writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
   254  	writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo
   255  }
   256  
   257  // gcMarkWorkerMode represents the mode that a concurrent mark worker
   258  // should operate in.
   259  //
   260  // Concurrent marking happens through four different mechanisms. One
   261  // is mutator assists, which happen in response to allocations and are
   262  // not scheduled. The other three are variations in the per-P mark
   263  // workers and are distinguished by gcMarkWorkerMode.
   264  type gcMarkWorkerMode int
   265  
   266  const (
   267  	// gcMarkWorkerDedicatedMode indicates that the P of a mark
   268  	// worker is dedicated to running that mark worker. The mark
   269  	// worker should run without preemption.
   270  	gcMarkWorkerDedicatedMode gcMarkWorkerMode = iota
   271  
   272  	// gcMarkWorkerFractionalMode indicates that a P is currently
   273  	// running the "fractional" mark worker. The fractional worker
   274  	// is necessary when GOMAXPROCS*gcGoalUtilization is not an
   275  	// integer. The fractional worker should run until it is
   276  	// preempted and will be scheduled to pick up the fractional
   277  	// part of GOMAXPROCS*gcGoalUtilization.
   278  	gcMarkWorkerFractionalMode
   279  
   280  	// gcMarkWorkerIdleMode indicates that a P is running the mark
   281  	// worker because it has nothing else to do. The idle worker
   282  	// should run until it is preempted and account its time
   283  	// against gcController.idleMarkTime.
   284  	gcMarkWorkerIdleMode
   285  )
   286  
   287  // gcController implements the GC pacing controller that determines
   288  // when to trigger concurrent garbage collection and how much marking
   289  // work to do in mutator assists and background marking.
   290  //
   291  // It uses a feedback control algorithm to adjust the memstats.next_gc
   292  // trigger based on the heap growth and GC CPU utilization each cycle.
   293  // This algorithm optimizes for heap growth to match GOGC and for CPU
   294  // utilization between assist and background marking to be 25% of
   295  // GOMAXPROCS. The high-level design of this algorithm is documented
   296  // at https://golang.org/s/go15gcpacing.
   297  var gcController = gcControllerState{
   298  	// Initial trigger ratio guess.
   299  	triggerRatio: 7 / 8.0,
   300  }
   301  
   302  type gcControllerState struct {
   303  	// scanWork is the total scan work performed this cycle. This
   304  	// is updated atomically during the cycle. Updates occur in
   305  	// bounded batches, since it is both written and read
   306  	// throughout the cycle.
   307  	//
   308  	// Currently this is the bytes of heap scanned. For most uses,
   309  	// this is an opaque unit of work, but for estimation the
   310  	// definition is important.
   311  	scanWork int64
   312  
   313  	// bgScanCredit is the scan work credit accumulated by the
   314  	// concurrent background scan. This credit is accumulated by
   315  	// the background scan and stolen by mutator assists. This is
   316  	// updated atomically. Updates occur in bounded batches, since
   317  	// it is both written and read throughout the cycle.
   318  	bgScanCredit int64
   319  
   320  	// assistTime is the nanoseconds spent in mutator assists
   321  	// during this cycle. This is updated atomically. Updates
   322  	// occur in bounded batches, since it is both written and read
   323  	// throughout the cycle.
   324  	assistTime int64
   325  
   326  	// dedicatedMarkTime is the nanoseconds spent in dedicated
   327  	// mark workers during this cycle. This is updated atomically
   328  	// at the end of the concurrent mark phase.
   329  	dedicatedMarkTime int64
   330  
   331  	// fractionalMarkTime is the nanoseconds spent in the
   332  	// fractional mark worker during this cycle. This is updated
   333  	// atomically throughout the cycle and will be up-to-date if
   334  	// the fractional mark worker is not currently running.
   335  	fractionalMarkTime int64
   336  
   337  	// idleMarkTime is the nanoseconds spent in idle marking
   338  	// during this cycle. This is updated atomically throughout
   339  	// the cycle.
   340  	idleMarkTime int64
   341  
   342  	// bgMarkStartTime is the absolute start time in nanoseconds
   343  	// that the background mark phase started.
   344  	bgMarkStartTime int64
   345  
   346  	// assistTime is the absolute start time in nanoseconds that
   347  	// mutator assists were enabled.
   348  	assistStartTime int64
   349  
   350  	// heapGoal is the goal memstats.heap_live for when this cycle
   351  	// ends. This is computed at the beginning of each cycle.
   352  	heapGoal uint64
   353  
   354  	// dedicatedMarkWorkersNeeded is the number of dedicated mark
   355  	// workers that need to be started. This is computed at the
   356  	// beginning of each cycle and decremented atomically as
   357  	// dedicated mark workers get started.
   358  	dedicatedMarkWorkersNeeded int64
   359  
   360  	// assistWorkPerByte is the ratio of scan work to allocated
   361  	// bytes that should be performed by mutator assists. This is
   362  	// computed at the beginning of each cycle and updated every
   363  	// time heap_scan is updated.
   364  	assistWorkPerByte float64
   365  
   366  	// assistBytesPerWork is 1/assistWorkPerByte.
   367  	assistBytesPerWork float64
   368  
   369  	// fractionalUtilizationGoal is the fraction of wall clock
   370  	// time that should be spent in the fractional mark worker.
   371  	// For example, if the overall mark utilization goal is 25%
   372  	// and GOMAXPROCS is 6, one P will be a dedicated mark worker
   373  	// and this will be set to 0.5 so that 50% of the time some P
   374  	// is in a fractional mark worker. This is computed at the
   375  	// beginning of each cycle.
   376  	fractionalUtilizationGoal float64
   377  
   378  	// triggerRatio is the heap growth ratio at which the garbage
   379  	// collection cycle should start. E.g., if this is 0.6, then
   380  	// GC should start when the live heap has reached 1.6 times
   381  	// the heap size marked by the previous cycle. This is updated
   382  	// at the end of of each cycle.
   383  	triggerRatio float64
   384  
   385  	_ [sys.CacheLineSize]byte
   386  
   387  	// fractionalMarkWorkersNeeded is the number of fractional
   388  	// mark workers that need to be started. This is either 0 or
   389  	// 1. This is potentially updated atomically at every
   390  	// scheduling point (hence it gets its own cache line).
   391  	fractionalMarkWorkersNeeded int64
   392  
   393  	_ [sys.CacheLineSize]byte
   394  }
   395  
   396  // startCycle resets the GC controller's state and computes estimates
   397  // for a new GC cycle. The caller must hold worldsema.
   398  func (c *gcControllerState) startCycle() {
   399  	c.scanWork = 0
   400  	c.bgScanCredit = 0
   401  	c.assistTime = 0
   402  	c.dedicatedMarkTime = 0
   403  	c.fractionalMarkTime = 0
   404  	c.idleMarkTime = 0
   405  
   406  	// If this is the first GC cycle or we're operating on a very
   407  	// small heap, fake heap_marked so it looks like next_gc is
   408  	// the appropriate growth from heap_marked, even though the
   409  	// real heap_marked may not have a meaningful value (on the
   410  	// first cycle) or may be much smaller (resulting in a large
   411  	// error response).
   412  	if memstats.next_gc <= heapminimum {
   413  		memstats.heap_marked = uint64(float64(memstats.next_gc) / (1 + c.triggerRatio))
   414  		memstats.heap_reachable = memstats.heap_marked
   415  	}
   416  
   417  	// Compute the heap goal for this cycle
   418  	c.heapGoal = memstats.heap_reachable + memstats.heap_reachable*uint64(gcpercent)/100
   419  
   420  	// Ensure that the heap goal is at least a little larger than
   421  	// the current live heap size. This may not be the case if GC
   422  	// start is delayed or if the allocation that pushed heap_live
   423  	// over next_gc is large or if the trigger is really close to
   424  	// GOGC. Assist is proportional to this distance, so enforce a
   425  	// minimum distance, even if it means going over the GOGC goal
   426  	// by a tiny bit.
   427  	if c.heapGoal < memstats.heap_live+1024*1024 {
   428  		c.heapGoal = memstats.heap_live + 1024*1024
   429  	}
   430  
   431  	// Compute the total mark utilization goal and divide it among
   432  	// dedicated and fractional workers.
   433  	totalUtilizationGoal := float64(gomaxprocs) * gcGoalUtilization
   434  	c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal)
   435  	c.fractionalUtilizationGoal = totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)
   436  	if c.fractionalUtilizationGoal > 0 {
   437  		c.fractionalMarkWorkersNeeded = 1
   438  	} else {
   439  		c.fractionalMarkWorkersNeeded = 0
   440  	}
   441  
   442  	// Clear per-P state
   443  	for _, p := range &allp {
   444  		if p == nil {
   445  			break
   446  		}
   447  		p.gcAssistTime = 0
   448  	}
   449  
   450  	// Compute initial values for controls that are updated
   451  	// throughout the cycle.
   452  	c.revise()
   453  
   454  	if debug.gcpacertrace > 0 {
   455  		print("pacer: assist ratio=", c.assistWorkPerByte,
   456  			" (scan ", memstats.heap_scan>>20, " MB in ",
   457  			work.initialHeapLive>>20, "->",
   458  			c.heapGoal>>20, " MB)",
   459  			" workers=", c.dedicatedMarkWorkersNeeded,
   460  			"+", c.fractionalMarkWorkersNeeded, "\n")
   461  	}
   462  }
   463  
   464  // revise updates the assist ratio during the GC cycle to account for
   465  // improved estimates. This should be called either under STW or
   466  // whenever memstats.heap_scan or memstats.heap_live is updated (with
   467  // mheap_.lock held).
   468  //
   469  // It should only be called when gcBlackenEnabled != 0 (because this
   470  // is when assists are enabled and the necessary statistics are
   471  // available).
   472  func (c *gcControllerState) revise() {
   473  	// Compute the expected scan work remaining.
   474  	//
   475  	// Note that the scannable heap size is likely to increase
   476  	// during the GC cycle. This is why it's important to revise
   477  	// the assist ratio throughout the cycle: if the scannable
   478  	// heap size increases, the assist ratio based on the initial
   479  	// scannable heap size may target too little scan work.
   480  	//
   481  	// This particular estimate is a strict upper bound on the
   482  	// possible remaining scan work for the current heap.
   483  	// You might consider dividing this by 2 (or by
   484  	// (100+GOGC)/100) to counter this over-estimation, but
   485  	// benchmarks show that this has almost no effect on mean
   486  	// mutator utilization, heap size, or assist time and it
   487  	// introduces the danger of under-estimating and letting the
   488  	// mutator outpace the garbage collector.
   489  	scanWorkExpected := int64(memstats.heap_scan) - c.scanWork
   490  	if scanWorkExpected < 1000 {
   491  		// We set a somewhat arbitrary lower bound on
   492  		// remaining scan work since if we aim a little high,
   493  		// we can miss by a little.
   494  		//
   495  		// We *do* need to enforce that this is at least 1,
   496  		// since marking is racy and double-scanning objects
   497  		// may legitimately make the expected scan work
   498  		// negative.
   499  		scanWorkExpected = 1000
   500  	}
   501  
   502  	// Compute the heap distance remaining.
   503  	heapDistance := int64(c.heapGoal) - int64(memstats.heap_live)
   504  	if heapDistance <= 0 {
   505  		// This shouldn't happen, but if it does, avoid
   506  		// dividing by zero or setting the assist negative.
   507  		heapDistance = 1
   508  	}
   509  
   510  	// Compute the mutator assist ratio so by the time the mutator
   511  	// allocates the remaining heap bytes up to next_gc, it will
   512  	// have done (or stolen) the remaining amount of scan work.
   513  	c.assistWorkPerByte = float64(scanWorkExpected) / float64(heapDistance)
   514  	c.assistBytesPerWork = float64(heapDistance) / float64(scanWorkExpected)
   515  }
   516  
   517  // endCycle updates the GC controller state at the end of the
   518  // concurrent part of the GC cycle.
   519  func (c *gcControllerState) endCycle() {
   520  	h_t := c.triggerRatio // For debugging
   521  
   522  	// Proportional response gain for the trigger controller. Must
   523  	// be in [0, 1]. Lower values smooth out transient effects but
   524  	// take longer to respond to phase changes. Higher values
   525  	// react to phase changes quickly, but are more affected by
   526  	// transient changes. Values near 1 may be unstable.
   527  	const triggerGain = 0.5
   528  
   529  	// Compute next cycle trigger ratio. First, this computes the
   530  	// "error" for this cycle; that is, how far off the trigger
   531  	// was from what it should have been, accounting for both heap
   532  	// growth and GC CPU utilization. We compute the actual heap
   533  	// growth during this cycle and scale that by how far off from
   534  	// the goal CPU utilization we were (to estimate the heap
   535  	// growth if we had the desired CPU utilization). The
   536  	// difference between this estimate and the GOGC-based goal
   537  	// heap growth is the error.
   538  	//
   539  	// TODO(austin): next_gc is based on heap_reachable, not
   540  	// heap_marked, which means the actual growth ratio
   541  	// technically isn't comparable to the trigger ratio.
   542  	goalGrowthRatio := float64(gcpercent) / 100
   543  	actualGrowthRatio := float64(memstats.heap_live)/float64(memstats.heap_marked) - 1
   544  	assistDuration := nanotime() - c.assistStartTime
   545  
   546  	// Assume background mark hit its utilization goal.
   547  	utilization := gcGoalUtilization
   548  	// Add assist utilization; avoid divide by zero.
   549  	if assistDuration > 0 {
   550  		utilization += float64(c.assistTime) / float64(assistDuration*int64(gomaxprocs))
   551  	}
   552  
   553  	triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio)
   554  
   555  	// Finally, we adjust the trigger for next time by this error,
   556  	// damped by the proportional gain.
   557  	c.triggerRatio += triggerGain * triggerError
   558  	if c.triggerRatio < 0 {
   559  		// This can happen if the mutator is allocating very
   560  		// quickly or the GC is scanning very slowly.
   561  		c.triggerRatio = 0
   562  	} else if c.triggerRatio > goalGrowthRatio*0.95 {
   563  		// Ensure there's always a little margin so that the
   564  		// mutator assist ratio isn't infinity.
   565  		c.triggerRatio = goalGrowthRatio * 0.95
   566  	}
   567  
   568  	if debug.gcpacertrace > 0 {
   569  		// Print controller state in terms of the design
   570  		// document.
   571  		H_m_prev := memstats.heap_marked
   572  		H_T := memstats.next_gc
   573  		h_a := actualGrowthRatio
   574  		H_a := memstats.heap_live
   575  		h_g := goalGrowthRatio
   576  		H_g := int64(float64(H_m_prev) * (1 + h_g))
   577  		u_a := utilization
   578  		u_g := gcGoalUtilization
   579  		W_a := c.scanWork
   580  		print("pacer: H_m_prev=", H_m_prev,
   581  			" h_t=", h_t, " H_T=", H_T,
   582  			" h_a=", h_a, " H_a=", H_a,
   583  			" h_g=", h_g, " H_g=", H_g,
   584  			" u_a=", u_a, " u_g=", u_g,
   585  			" W_a=", W_a,
   586  			" goalΔ=", goalGrowthRatio-h_t,
   587  			" actualΔ=", h_a-h_t,
   588  			" u_a/u_g=", u_a/u_g,
   589  			"\n")
   590  	}
   591  }
   592  
   593  // enlistWorker encourages another dedicated mark worker to start on
   594  // another P if there are spare worker slots. It is used by putfull
   595  // when more work is made available.
   596  //
   597  //go:nowritebarrier
   598  func (c *gcControllerState) enlistWorker() {
   599  	if c.dedicatedMarkWorkersNeeded <= 0 {
   600  		return
   601  	}
   602  	// Pick a random other P to preempt.
   603  	if gomaxprocs <= 1 {
   604  		return
   605  	}
   606  	gp := getg()
   607  	if gp == nil || gp.m == nil || gp.m.p == 0 {
   608  		return
   609  	}
   610  	myID := gp.m.p.ptr().id
   611  	for tries := 0; tries < 5; tries++ {
   612  		id := int32(fastrand1() % uint32(gomaxprocs-1))
   613  		if id >= myID {
   614  			id++
   615  		}
   616  		p := allp[id]
   617  		if p.status != _Prunning {
   618  			continue
   619  		}
   620  		if preemptone(p) {
   621  			return
   622  		}
   623  	}
   624  }
   625  
   626  // findRunnableGCWorker returns the background mark worker for _p_ if it
   627  // should be run. This must only be called when gcBlackenEnabled != 0.
   628  func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
   629  	if gcBlackenEnabled == 0 {
   630  		throw("gcControllerState.findRunnable: blackening not enabled")
   631  	}
   632  	if _p_.gcBgMarkWorker == 0 {
   633  		// The mark worker associated with this P is blocked
   634  		// performing a mark transition. We can't run it
   635  		// because it may be on some other run or wait queue.
   636  		return nil
   637  	}
   638  
   639  	if !gcMarkWorkAvailable(_p_) {
   640  		// No work to be done right now. This can happen at
   641  		// the end of the mark phase when there are still
   642  		// assists tapering off. Don't bother running a worker
   643  		// now because it'll just return immediately.
   644  		return nil
   645  	}
   646  
   647  	decIfPositive := func(ptr *int64) bool {
   648  		if *ptr > 0 {
   649  			if atomic.Xaddint64(ptr, -1) >= 0 {
   650  				return true
   651  			}
   652  			// We lost a race
   653  			atomic.Xaddint64(ptr, +1)
   654  		}
   655  		return false
   656  	}
   657  
   658  	if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
   659  		// This P is now dedicated to marking until the end of
   660  		// the concurrent mark phase.
   661  		_p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
   662  		// TODO(austin): This P isn't going to run anything
   663  		// else for a while, so kick everything out of its run
   664  		// queue.
   665  	} else {
   666  		if !decIfPositive(&c.fractionalMarkWorkersNeeded) {
   667  			// No more workers are need right now.
   668  			return nil
   669  		}
   670  
   671  		// This P has picked the token for the fractional worker.
   672  		// Is the GC currently under or at the utilization goal?
   673  		// If so, do more work.
   674  		//
   675  		// We used to check whether doing one time slice of work
   676  		// would remain under the utilization goal, but that has the
   677  		// effect of delaying work until the mutator has run for
   678  		// enough time slices to pay for the work. During those time
   679  		// slices, write barriers are enabled, so the mutator is running slower.
   680  		// Now instead we do the work whenever we're under or at the
   681  		// utilization work and pay for it by letting the mutator run later.
   682  		// This doesn't change the overall utilization averages, but it
   683  		// front loads the GC work so that the GC finishes earlier and
   684  		// write barriers can be turned off sooner, effectively giving
   685  		// the mutator a faster machine.
   686  		//
   687  		// The old, slower behavior can be restored by setting
   688  		//	gcForcePreemptNS = forcePreemptNS.
   689  		const gcForcePreemptNS = 0
   690  
   691  		// TODO(austin): We could fast path this and basically
   692  		// eliminate contention on c.fractionalMarkWorkersNeeded by
   693  		// precomputing the minimum time at which it's worth
   694  		// next scheduling the fractional worker. Then Ps
   695  		// don't have to fight in the window where we've
   696  		// passed that deadline and no one has started the
   697  		// worker yet.
   698  		//
   699  		// TODO(austin): Shorter preemption interval for mark
   700  		// worker to improve fairness and give this
   701  		// finer-grained control over schedule?
   702  		now := nanotime() - gcController.bgMarkStartTime
   703  		then := now + gcForcePreemptNS
   704  		timeUsed := c.fractionalMarkTime + gcForcePreemptNS
   705  		if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal {
   706  			// Nope, we'd overshoot the utilization goal
   707  			atomic.Xaddint64(&c.fractionalMarkWorkersNeeded, +1)
   708  			return nil
   709  		}
   710  		_p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
   711  	}
   712  
   713  	// Run the background mark worker
   714  	gp := _p_.gcBgMarkWorker.ptr()
   715  	casgstatus(gp, _Gwaiting, _Grunnable)
   716  	if trace.enabled {
   717  		traceGoUnpark(gp, 0)
   718  	}
   719  	return gp
   720  }
   721  
   722  // gcGoalUtilization is the goal CPU utilization for background
   723  // marking as a fraction of GOMAXPROCS.
   724  const gcGoalUtilization = 0.25
   725  
   726  // gcCreditSlack is the amount of scan work credit that can can
   727  // accumulate locally before updating gcController.scanWork and,
   728  // optionally, gcController.bgScanCredit. Lower values give a more
   729  // accurate assist ratio and make it more likely that assists will
   730  // successfully steal background credit. Higher values reduce memory
   731  // contention.
   732  const gcCreditSlack = 2000
   733  
   734  // gcAssistTimeSlack is the nanoseconds of mutator assist time that
   735  // can accumulate on a P before updating gcController.assistTime.
   736  const gcAssistTimeSlack = 5000
   737  
   738  // gcOverAssistBytes determines how many extra allocation bytes of
   739  // assist credit a GC assist builds up when an assist happens. This
   740  // amortizes the cost of an assist by pre-paying for this many bytes
   741  // of future allocations.
   742  const gcOverAssistBytes = 1 << 20
   743  
   744  var work struct {
   745  	full  uint64                   // lock-free list of full blocks workbuf
   746  	empty uint64                   // lock-free list of empty blocks workbuf
   747  	pad0  [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
   748  
   749  	markrootNext uint32 // next markroot job
   750  	markrootJobs uint32 // number of markroot jobs
   751  
   752  	nproc   uint32
   753  	tstart  int64
   754  	nwait   uint32
   755  	ndone   uint32
   756  	alldone note
   757  
   758  	// Number of roots of various root types. Set by gcMarkRootPrepare.
   759  	nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
   760  
   761  	// finalizersDone indicates that finalizers and objects with
   762  	// finalizers have been scanned by markroot. During concurrent
   763  	// GC, this happens during the concurrent scan phase. During
   764  	// STW GC, this happens during mark termination.
   765  	finalizersDone bool
   766  
   767  	// Each type of GC state transition is protected by a lock.
   768  	// Since multiple threads can simultaneously detect the state
   769  	// transition condition, any thread that detects a transition
   770  	// condition must acquire the appropriate transition lock,
   771  	// re-check the transition condition and return if it no
   772  	// longer holds or perform the transition if it does.
   773  	// Likewise, any transition must invalidate the transition
   774  	// condition before releasing the lock. This ensures that each
   775  	// transition is performed by exactly one thread and threads
   776  	// that need the transition to happen block until it has
   777  	// happened.
   778  	//
   779  	// startSema protects the transition from "off" to mark or
   780  	// mark termination.
   781  	startSema uint32
   782  	// markDoneSema protects transitions from mark 1 to mark 2 and
   783  	// from mark 2 to mark termination.
   784  	markDoneSema uint32
   785  
   786  	bgMarkReady note   // signal background mark worker has started
   787  	bgMarkDone  uint32 // cas to 1 when at a background mark completion point
   788  	// Background mark completion signaling
   789  
   790  	// mode is the concurrency mode of the current GC cycle.
   791  	mode gcMode
   792  
   793  	// Copy of mheap.allspans for marker or sweeper.
   794  	spans []*mspan
   795  
   796  	// totaltime is the CPU nanoseconds spent in GC since the
   797  	// program started if debug.gctrace > 0.
   798  	totaltime int64
   799  
   800  	// bytesMarked is the number of bytes marked this cycle. This
   801  	// includes bytes blackened in scanned objects, noscan objects
   802  	// that go straight to black, and permagrey objects scanned by
   803  	// markroot during the concurrent scan phase. This is updated
   804  	// atomically during the cycle. Updates may be batched
   805  	// arbitrarily, since the value is only read at the end of the
   806  	// cycle.
   807  	//
   808  	// Because of benign races during marking, this number may not
   809  	// be the exact number of marked bytes, but it should be very
   810  	// close.
   811  	bytesMarked uint64
   812  
   813  	// initialHeapLive is the value of memstats.heap_live at the
   814  	// beginning of this GC cycle.
   815  	initialHeapLive uint64
   816  
   817  	// assistQueue is a queue of assists that are blocked because
   818  	// there was neither enough credit to steal or enough work to
   819  	// do.
   820  	assistQueue struct {
   821  		lock       mutex
   822  		head, tail guintptr
   823  	}
   824  
   825  	// Timing/utilization stats for this cycle.
   826  	stwprocs, maxprocs                 int32
   827  	tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
   828  
   829  	pauseNS    int64 // total STW time this cycle
   830  	pauseStart int64 // nanotime() of last STW
   831  
   832  	// debug.gctrace heap sizes for this cycle.
   833  	heap0, heap1, heap2, heapGoal uint64
   834  }
   835  
   836  // GC runs a garbage collection and blocks the caller until the
   837  // garbage collection is complete. It may also block the entire
   838  // program.
   839  func GC() {
   840  	gcStart(gcForceBlockMode, false)
   841  }
   842  
   843  // gcMode indicates how concurrent a GC cycle should be.
   844  type gcMode int
   845  
   846  const (
   847  	gcBackgroundMode gcMode = iota // concurrent GC and sweep
   848  	gcForceMode                    // stop-the-world GC now, concurrent sweep
   849  	gcForceBlockMode               // stop-the-world GC now and STW sweep
   850  )
   851  
   852  // gcShouldStart returns true if the exit condition for the _GCoff
   853  // phase has been met. The exit condition should be tested when
   854  // allocating.
   855  //
   856  // If forceTrigger is true, it ignores the current heap size, but
   857  // checks all other conditions. In general this should be false.
   858  func gcShouldStart(forceTrigger bool) bool {
   859  	return gcphase == _GCoff && (forceTrigger || memstats.heap_live >= memstats.next_gc) && memstats.enablegc && panicking == 0 && gcpercent >= 0
   860  }
   861  
   862  // gcStart transitions the GC from _GCoff to _GCmark (if mode ==
   863  // gcBackgroundMode) or _GCmarktermination (if mode !=
   864  // gcBackgroundMode) by performing sweep termination and GC
   865  // initialization.
   866  //
   867  // This may return without performing this transition in some cases,
   868  // such as when called on a system stack or with locks held.
   869  func gcStart(mode gcMode, forceTrigger bool) {
   870  	// Since this is called from malloc and malloc is called in
   871  	// the guts of a number of libraries that might be holding
   872  	// locks, don't attempt to start GC in non-preemptible or
   873  	// potentially unstable situations.
   874  	mp := acquirem()
   875  	if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" {
   876  		releasem(mp)
   877  		return
   878  	}
   879  	releasem(mp)
   880  	mp = nil
   881  
   882  	// Pick up the remaining unswept/not being swept spans concurrently
   883  	//
   884  	// This shouldn't happen if we're being invoked in background
   885  	// mode since proportional sweep should have just finished
   886  	// sweeping everything, but rounding errors, etc, may leave a
   887  	// few spans unswept. In forced mode, this is necessary since
   888  	// GC can be forced at any point in the sweeping cycle.
   889  	//
   890  	// We check the transition condition continuously here in case
   891  	// this G gets delayed in to the next GC cycle.
   892  	for (mode != gcBackgroundMode || gcShouldStart(forceTrigger)) && gosweepone() != ^uintptr(0) {
   893  		sweep.nbgsweep++
   894  	}
   895  
   896  	// Perform GC initialization and the sweep termination
   897  	// transition.
   898  	//
   899  	// If this is a forced GC, don't acquire the transition lock
   900  	// or re-check the transition condition because we
   901  	// specifically *don't* want to share the transition with
   902  	// another thread.
   903  	useStartSema := mode == gcBackgroundMode
   904  	if useStartSema {
   905  		semacquire(&work.startSema, false)
   906  		// Re-check transition condition under transition lock.
   907  		if !gcShouldStart(forceTrigger) {
   908  			semrelease(&work.startSema)
   909  			return
   910  		}
   911  	}
   912  
   913  	// In gcstoptheworld debug mode, upgrade the mode accordingly.
   914  	// We do this after re-checking the transition condition so
   915  	// that multiple goroutines that detect the heap trigger don't
   916  	// start multiple STW GCs.
   917  	if mode == gcBackgroundMode {
   918  		if debug.gcstoptheworld == 1 {
   919  			mode = gcForceMode
   920  		} else if debug.gcstoptheworld == 2 {
   921  			mode = gcForceBlockMode
   922  		}
   923  	}
   924  
   925  	// Ok, we're doing it!  Stop everybody else
   926  	semacquire(&worldsema, false)
   927  
   928  	if trace.enabled {
   929  		traceGCStart()
   930  	}
   931  
   932  	if mode == gcBackgroundMode {
   933  		gcBgMarkStartWorkers()
   934  	}
   935  
   936  	gcResetMarkState()
   937  
   938  	now := nanotime()
   939  	work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs
   940  	work.tSweepTerm = now
   941  	work.heap0 = memstats.heap_live
   942  	work.pauseNS = 0
   943  	work.mode = mode
   944  
   945  	work.pauseStart = now
   946  	systemstack(stopTheWorldWithSema)
   947  	// Finish sweep before we start concurrent scan.
   948  	systemstack(func() {
   949  		finishsweep_m(true)
   950  	})
   951  	// clearpools before we start the GC. If we wait they memory will not be
   952  	// reclaimed until the next GC cycle.
   953  	clearpools()
   954  
   955  	work.finalizersDone = false
   956  
   957  	if mode == gcBackgroundMode { // Do as much work concurrently as possible
   958  		gcController.startCycle()
   959  		work.heapGoal = gcController.heapGoal
   960  
   961  		// Enter concurrent mark phase and enable
   962  		// write barriers.
   963  		//
   964  		// Because the world is stopped, all Ps will
   965  		// observe that write barriers are enabled by
   966  		// the time we start the world and begin
   967  		// scanning.
   968  		//
   969  		// It's necessary to enable write barriers
   970  		// during the scan phase for several reasons:
   971  		//
   972  		// They must be enabled for writes to higher
   973  		// stack frames before we scan stacks and
   974  		// install stack barriers because this is how
   975  		// we track writes to inactive stack frames.
   976  		// (Alternatively, we could not install stack
   977  		// barriers over frame boundaries with
   978  		// up-pointers).
   979  		//
   980  		// They must be enabled before assists are
   981  		// enabled because they must be enabled before
   982  		// any non-leaf heap objects are marked. Since
   983  		// allocations are blocked until assists can
   984  		// happen, we want enable assists as early as
   985  		// possible.
   986  		setGCPhase(_GCmark)
   987  
   988  		// markrootSpans uses work.spans, so make sure
   989  		// it is up to date.
   990  		gcCopySpans()
   991  
   992  		gcBgMarkPrepare() // Must happen before assist enable.
   993  		gcMarkRootPrepare()
   994  
   995  		// At this point all Ps have enabled the write
   996  		// barrier, thus maintaining the no white to
   997  		// black invariant. Enable mutator assists to
   998  		// put back-pressure on fast allocating
   999  		// mutators.
  1000  		atomic.Store(&gcBlackenEnabled, 1)
  1001  
  1002  		// Assists and workers can start the moment we start
  1003  		// the world.
  1004  		gcController.assistStartTime = now
  1005  		gcController.bgMarkStartTime = now
  1006  
  1007  		// Concurrent mark.
  1008  		systemstack(startTheWorldWithSema)
  1009  		now = nanotime()
  1010  		work.pauseNS += now - work.pauseStart
  1011  		work.tMark = now
  1012  	} else {
  1013  		t := nanotime()
  1014  		work.tMark, work.tMarkTerm = t, t
  1015  		work.heapGoal = work.heap0
  1016  
  1017  		// Perform mark termination. This will restart the world.
  1018  		gcMarkTermination()
  1019  	}
  1020  
  1021  	if useStartSema {
  1022  		semrelease(&work.startSema)
  1023  	}
  1024  }
  1025  
  1026  // gcMarkDone transitions the GC from mark 1 to mark 2 and from mark 2
  1027  // to mark termination.
  1028  //
  1029  // This should be called when all mark work has been drained. In mark
  1030  // 1, this includes all root marking jobs, global work buffers, and
  1031  // active work buffers in assists and background workers; however,
  1032  // work may still be cached in per-P work buffers. In mark 2, per-P
  1033  // caches are disabled.
  1034  //
  1035  // The calling context must be preemptible.
  1036  //
  1037  // Note that it is explicitly okay to have write barriers in this
  1038  // function because completion of concurrent mark is best-effort
  1039  // anyway. Any work created by write barriers here will be cleaned up
  1040  // by mark termination.
  1041  func gcMarkDone() {
  1042  top:
  1043  	semacquire(&work.markDoneSema, false)
  1044  
  1045  	// Re-check transition condition under transition lock.
  1046  	if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
  1047  		semrelease(&work.markDoneSema)
  1048  		return
  1049  	}
  1050  
  1051  	// Disallow starting new workers so that any remaining workers
  1052  	// in the current mark phase will drain out.
  1053  	//
  1054  	// TODO(austin): Should dedicated workers keep an eye on this
  1055  	// and exit gcDrain promptly?
  1056  	atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff)
  1057  	atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff)
  1058  
  1059  	if !gcBlackenPromptly {
  1060  		// Transition from mark 1 to mark 2.
  1061  		//
  1062  		// The global work list is empty, but there can still be work
  1063  		// sitting in the per-P work caches and there can be more
  1064  		// objects reachable from global roots since they don't have write
  1065  		// barriers. Rescan some roots and flush work caches.
  1066  
  1067  		gcMarkRootCheck()
  1068  
  1069  		// Disallow caching workbufs and indicate that we're in mark 2.
  1070  		gcBlackenPromptly = true
  1071  
  1072  		// Prevent completion of mark 2 until we've flushed
  1073  		// cached workbufs.
  1074  		atomic.Xadd(&work.nwait, -1)
  1075  
  1076  		// Rescan global data and BSS. There may still work
  1077  		// workers running at this point, so bump "jobs" down
  1078  		// before "next" so they won't try running root jobs
  1079  		// until we set next.
  1080  		atomic.Store(&work.markrootJobs, uint32(fixedRootCount+work.nDataRoots+work.nBSSRoots))
  1081  		atomic.Store(&work.markrootNext, fixedRootCount)
  1082  
  1083  		// GC is set up for mark 2. Let Gs blocked on the
  1084  		// transition lock go while we flush caches.
  1085  		semrelease(&work.markDoneSema)
  1086  
  1087  		systemstack(func() {
  1088  			// Flush all currently cached workbufs and
  1089  			// ensure all Ps see gcBlackenPromptly. This
  1090  			// also blocks until any remaining mark 1
  1091  			// workers have exited their loop so we can
  1092  			// start new mark 2 workers that will observe
  1093  			// the new root marking jobs.
  1094  			forEachP(func(_p_ *p) {
  1095  				_p_.gcw.dispose()
  1096  			})
  1097  		})
  1098  
  1099  		// Now we can start up mark 2 workers.
  1100  		atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff)
  1101  		atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff)
  1102  
  1103  		incnwait := atomic.Xadd(&work.nwait, +1)
  1104  		if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
  1105  			// This loop will make progress because
  1106  			// gcBlackenPromptly is now true, so it won't
  1107  			// take this same "if" branch.
  1108  			goto top
  1109  		}
  1110  	} else {
  1111  		// Transition to mark termination.
  1112  		now := nanotime()
  1113  		work.tMarkTerm = now
  1114  		work.pauseStart = now
  1115  		getg().m.preemptoff = "gcing"
  1116  		systemstack(stopTheWorldWithSema)
  1117  		// The gcphase is _GCmark, it will transition to _GCmarktermination
  1118  		// below. The important thing is that the wb remains active until
  1119  		// all marking is complete. This includes writes made by the GC.
  1120  
  1121  		// markroot is done now, so record that objects with
  1122  		// finalizers have been scanned.
  1123  		work.finalizersDone = true
  1124  
  1125  		// Disable assists and background workers. We must do
  1126  		// this before waking blocked assists.
  1127  		atomic.Store(&gcBlackenEnabled, 0)
  1128  
  1129  		// Flush the gcWork caches. This must be done before
  1130  		// endCycle since endCycle depends on statistics kept
  1131  		// in these caches.
  1132  		gcFlushGCWork()
  1133  
  1134  		// Wake all blocked assists. These will run when we
  1135  		// start the world again.
  1136  		gcWakeAllAssists()
  1137  
  1138  		// Likewise, release the transition lock. Blocked
  1139  		// workers and assists will run when we start the
  1140  		// world again.
  1141  		semrelease(&work.markDoneSema)
  1142  
  1143  		gcController.endCycle()
  1144  
  1145  		// Perform mark termination. This will restart the world.
  1146  		gcMarkTermination()
  1147  	}
  1148  }
  1149  
  1150  func gcMarkTermination() {
  1151  	// World is stopped.
  1152  	// Start marktermination which includes enabling the write barrier.
  1153  	atomic.Store(&gcBlackenEnabled, 0)
  1154  	gcBlackenPromptly = false
  1155  	setGCPhase(_GCmarktermination)
  1156  
  1157  	work.heap1 = memstats.heap_live
  1158  	startTime := nanotime()
  1159  
  1160  	mp := acquirem()
  1161  	mp.preemptoff = "gcing"
  1162  	_g_ := getg()
  1163  	_g_.m.traceback = 2
  1164  	gp := _g_.m.curg
  1165  	casgstatus(gp, _Grunning, _Gwaiting)
  1166  	gp.waitreason = "garbage collection"
  1167  
  1168  	// Run gc on the g0 stack.  We do this so that the g stack
  1169  	// we're currently running on will no longer change.  Cuts
  1170  	// the root set down a bit (g0 stacks are not scanned, and
  1171  	// we don't need to scan gc's internal state).  We also
  1172  	// need to switch to g0 so we can shrink the stack.
  1173  	systemstack(func() {
  1174  		gcMark(startTime)
  1175  		// Must return immediately.
  1176  		// The outer function's stack may have moved
  1177  		// during gcMark (it shrinks stacks, including the
  1178  		// outer function's stack), so we must not refer
  1179  		// to any of its variables. Return back to the
  1180  		// non-system stack to pick up the new addresses
  1181  		// before continuing.
  1182  	})
  1183  
  1184  	systemstack(func() {
  1185  		work.heap2 = work.bytesMarked
  1186  		if debug.gccheckmark > 0 {
  1187  			// Run a full stop-the-world mark using checkmark bits,
  1188  			// to check that we didn't forget to mark anything during
  1189  			// the concurrent mark process.
  1190  			gcResetMarkState()
  1191  			initCheckmarks()
  1192  			gcMark(startTime)
  1193  			clearCheckmarks()
  1194  		}
  1195  
  1196  		// marking is complete so we can turn the write barrier off
  1197  		setGCPhase(_GCoff)
  1198  		gcSweep(work.mode)
  1199  
  1200  		if debug.gctrace > 1 {
  1201  			startTime = nanotime()
  1202  			// The g stacks have been scanned so
  1203  			// they have gcscanvalid==true and gcworkdone==true.
  1204  			// Reset these so that all stacks will be rescanned.
  1205  			gcResetMarkState()
  1206  			finishsweep_m(true)
  1207  
  1208  			// Still in STW but gcphase is _GCoff, reset to _GCmarktermination
  1209  			// At this point all objects will be found during the gcMark which
  1210  			// does a complete STW mark and object scan.
  1211  			setGCPhase(_GCmarktermination)
  1212  			gcMark(startTime)
  1213  			setGCPhase(_GCoff) // marking is done, turn off wb.
  1214  			gcSweep(work.mode)
  1215  		}
  1216  	})
  1217  
  1218  	_g_.m.traceback = 0
  1219  	casgstatus(gp, _Gwaiting, _Grunning)
  1220  
  1221  	if trace.enabled {
  1222  		traceGCDone()
  1223  	}
  1224  
  1225  	// all done
  1226  	mp.preemptoff = ""
  1227  
  1228  	if gcphase != _GCoff {
  1229  		throw("gc done but gcphase != _GCoff")
  1230  	}
  1231  
  1232  	// Update timing memstats
  1233  	now, unixNow := nanotime(), unixnanotime()
  1234  	work.pauseNS += now - work.pauseStart
  1235  	work.tEnd = now
  1236  	atomic.Store64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user
  1237  	memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
  1238  	memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow)
  1239  	memstats.pause_total_ns += uint64(work.pauseNS)
  1240  
  1241  	// Update work.totaltime.
  1242  	sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm)
  1243  	// We report idle marking time below, but omit it from the
  1244  	// overall utilization here since it's "free".
  1245  	markCpu := gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime
  1246  	markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm)
  1247  	cycleCpu := sweepTermCpu + markCpu + markTermCpu
  1248  	work.totaltime += cycleCpu
  1249  
  1250  	// Compute overall GC CPU utilization.
  1251  	totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs)
  1252  	memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu)
  1253  
  1254  	memstats.numgc++
  1255  
  1256  	// Reset sweep state.
  1257  	sweep.nbgsweep = 0
  1258  	sweep.npausesweep = 0
  1259  
  1260  	systemstack(startTheWorldWithSema)
  1261  
  1262  	// Free stack spans. This must be done between GC cycles.
  1263  	systemstack(freeStackSpans)
  1264  
  1265  	// Print gctrace before dropping worldsema. As soon as we drop
  1266  	// worldsema another cycle could start and smash the stats
  1267  	// we're trying to print.
  1268  	if debug.gctrace > 0 {
  1269  		util := int(memstats.gc_cpu_fraction * 100)
  1270  
  1271  		var sbuf [24]byte
  1272  		printlock()
  1273  		print("gc ", memstats.numgc,
  1274  			" @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
  1275  			util, "%: ")
  1276  		prev := work.tSweepTerm
  1277  		for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
  1278  			if i != 0 {
  1279  				print("+")
  1280  			}
  1281  			print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev))))
  1282  			prev = ns
  1283  		}
  1284  		print(" ms clock, ")
  1285  		for i, ns := range []int64{sweepTermCpu, gcController.assistTime, gcController.dedicatedMarkTime + gcController.fractionalMarkTime, gcController.idleMarkTime, markTermCpu} {
  1286  			if i == 2 || i == 3 {
  1287  				// Separate mark time components with /.
  1288  				print("/")
  1289  			} else if i != 0 {
  1290  				print("+")
  1291  			}
  1292  			print(string(fmtNSAsMS(sbuf[:], uint64(ns))))
  1293  		}
  1294  		print(" ms cpu, ",
  1295  			work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
  1296  			work.heapGoal>>20, " MB goal, ",
  1297  			work.maxprocs, " P")
  1298  		if work.mode != gcBackgroundMode {
  1299  			print(" (forced)")
  1300  		}
  1301  		print("\n")
  1302  		printunlock()
  1303  	}
  1304  
  1305  	semrelease(&worldsema)
  1306  	// Careful: another GC cycle may start now.
  1307  
  1308  	releasem(mp)
  1309  	mp = nil
  1310  
  1311  	// now that gc is done, kick off finalizer thread if needed
  1312  	if !concurrentSweep {
  1313  		// give the queued finalizers, if any, a chance to run
  1314  		Gosched()
  1315  	}
  1316  }
  1317  
  1318  // gcBgMarkStartWorkers prepares background mark worker goroutines.
  1319  // These goroutines will not run until the mark phase, but they must
  1320  // be started while the work is not stopped and from a regular G
  1321  // stack. The caller must hold worldsema.
  1322  func gcBgMarkStartWorkers() {
  1323  	// Background marking is performed by per-P G's. Ensure that
  1324  	// each P has a background GC G.
  1325  	for _, p := range &allp {
  1326  		if p == nil || p.status == _Pdead {
  1327  			break
  1328  		}
  1329  		if p.gcBgMarkWorker == 0 {
  1330  			go gcBgMarkWorker(p)
  1331  			notetsleepg(&work.bgMarkReady, -1)
  1332  			noteclear(&work.bgMarkReady)
  1333  		}
  1334  	}
  1335  }
  1336  
  1337  // gcBgMarkPrepare sets up state for background marking.
  1338  // Mutator assists must not yet be enabled.
  1339  func gcBgMarkPrepare() {
  1340  	// Background marking will stop when the work queues are empty
  1341  	// and there are no more workers (note that, since this is
  1342  	// concurrent, this may be a transient state, but mark
  1343  	// termination will clean it up). Between background workers
  1344  	// and assists, we don't really know how many workers there
  1345  	// will be, so we pretend to have an arbitrarily large number
  1346  	// of workers, almost all of which are "waiting". While a
  1347  	// worker is working it decrements nwait. If nproc == nwait,
  1348  	// there are no workers.
  1349  	work.nproc = ^uint32(0)
  1350  	work.nwait = ^uint32(0)
  1351  }
  1352  
  1353  func gcBgMarkWorker(_p_ *p) {
  1354  	type parkInfo struct {
  1355  		m      *m // Release this m on park.
  1356  		attach *p // If non-nil, attach to this p on park.
  1357  	}
  1358  	var park parkInfo
  1359  
  1360  	gp := getg()
  1361  	park.m = acquirem()
  1362  	park.attach = _p_
  1363  	// Inform gcBgMarkStartWorkers that this worker is ready.
  1364  	// After this point, the background mark worker is scheduled
  1365  	// cooperatively by gcController.findRunnable. Hence, it must
  1366  	// never be preempted, as this would put it into _Grunnable
  1367  	// and put it on a run queue. Instead, when the preempt flag
  1368  	// is set, this puts itself into _Gwaiting to be woken up by
  1369  	// gcController.findRunnable at the appropriate time.
  1370  	notewakeup(&work.bgMarkReady)
  1371  
  1372  	for {
  1373  		// Go to sleep until woken by gcContoller.findRunnable.
  1374  		// We can't releasem yet since even the call to gopark
  1375  		// may be preempted.
  1376  		gopark(func(g *g, parkp unsafe.Pointer) bool {
  1377  			park := (*parkInfo)(parkp)
  1378  
  1379  			// The worker G is no longer running, so it's
  1380  			// now safe to allow preemption.
  1381  			releasem(park.m)
  1382  
  1383  			// If the worker isn't attached to its P,
  1384  			// attach now. During initialization and after
  1385  			// a phase change, the worker may have been
  1386  			// running on a different P. As soon as we
  1387  			// attach, the owner P may schedule the
  1388  			// worker, so this must be done after the G is
  1389  			// stopped.
  1390  			if park.attach != nil {
  1391  				p := park.attach
  1392  				park.attach = nil
  1393  				// cas the worker because we may be
  1394  				// racing with a new worker starting
  1395  				// on this P.
  1396  				if !p.gcBgMarkWorker.cas(0, guintptr(unsafe.Pointer(g))) {
  1397  					// The P got a new worker.
  1398  					// Exit this worker.
  1399  					return false
  1400  				}
  1401  			}
  1402  			return true
  1403  		}, noescape(unsafe.Pointer(&park)), "GC worker (idle)", traceEvGoBlock, 0)
  1404  
  1405  		// Loop until the P dies and disassociates this
  1406  		// worker (the P may later be reused, in which case
  1407  		// it will get a new worker) or we failed to associate.
  1408  		if _p_.gcBgMarkWorker.ptr() != gp {
  1409  			break
  1410  		}
  1411  
  1412  		// Disable preemption so we can use the gcw. If the
  1413  		// scheduler wants to preempt us, we'll stop draining,
  1414  		// dispose the gcw, and then preempt.
  1415  		park.m = acquirem()
  1416  
  1417  		if gcBlackenEnabled == 0 {
  1418  			throw("gcBgMarkWorker: blackening not enabled")
  1419  		}
  1420  
  1421  		startTime := nanotime()
  1422  
  1423  		decnwait := atomic.Xadd(&work.nwait, -1)
  1424  		if decnwait == work.nproc {
  1425  			println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
  1426  			throw("work.nwait was > work.nproc")
  1427  		}
  1428  
  1429  		switch _p_.gcMarkWorkerMode {
  1430  		default:
  1431  			throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
  1432  		case gcMarkWorkerDedicatedMode:
  1433  			gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit)
  1434  		case gcMarkWorkerFractionalMode, gcMarkWorkerIdleMode:
  1435  			gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
  1436  		}
  1437  
  1438  		// If we are nearing the end of mark, dispose
  1439  		// of the cache promptly. We must do this
  1440  		// before signaling that we're no longer
  1441  		// working so that other workers can't observe
  1442  		// no workers and no work while we have this
  1443  		// cached, and before we compute done.
  1444  		if gcBlackenPromptly {
  1445  			_p_.gcw.dispose()
  1446  		}
  1447  
  1448  		// Account for time.
  1449  		duration := nanotime() - startTime
  1450  		switch _p_.gcMarkWorkerMode {
  1451  		case gcMarkWorkerDedicatedMode:
  1452  			atomic.Xaddint64(&gcController.dedicatedMarkTime, duration)
  1453  			atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1)
  1454  		case gcMarkWorkerFractionalMode:
  1455  			atomic.Xaddint64(&gcController.fractionalMarkTime, duration)
  1456  			atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 1)
  1457  		case gcMarkWorkerIdleMode:
  1458  			atomic.Xaddint64(&gcController.idleMarkTime, duration)
  1459  		}
  1460  
  1461  		// Was this the last worker and did we run out
  1462  		// of work?
  1463  		incnwait := atomic.Xadd(&work.nwait, +1)
  1464  		if incnwait > work.nproc {
  1465  			println("runtime: p.gcMarkWorkerMode=", _p_.gcMarkWorkerMode,
  1466  				"work.nwait=", incnwait, "work.nproc=", work.nproc)
  1467  			throw("work.nwait > work.nproc")
  1468  		}
  1469  
  1470  		// If this worker reached a background mark completion
  1471  		// point, signal the main GC goroutine.
  1472  		if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
  1473  			// Make this G preemptible and disassociate it
  1474  			// as the worker for this P so
  1475  			// findRunnableGCWorker doesn't try to
  1476  			// schedule it.
  1477  			_p_.gcBgMarkWorker.set(nil)
  1478  			releasem(park.m)
  1479  
  1480  			gcMarkDone()
  1481  
  1482  			// Disable preemption and prepare to reattach
  1483  			// to the P.
  1484  			//
  1485  			// We may be running on a different P at this
  1486  			// point, so we can't reattach until this G is
  1487  			// parked.
  1488  			park.m = acquirem()
  1489  			park.attach = _p_
  1490  		}
  1491  	}
  1492  }
  1493  
  1494  // gcMarkWorkAvailable returns true if executing a mark worker
  1495  // on p is potentially useful. p may be nil, in which case it only
  1496  // checks the global sources of work.
  1497  func gcMarkWorkAvailable(p *p) bool {
  1498  	if p != nil && !p.gcw.empty() {
  1499  		return true
  1500  	}
  1501  	if atomic.Load64(&work.full) != 0 {
  1502  		return true // global work available
  1503  	}
  1504  	if work.markrootNext < work.markrootJobs {
  1505  		return true // root scan work available
  1506  	}
  1507  	return false
  1508  }
  1509  
  1510  // gcFlushGCWork disposes the gcWork caches of all Ps. The world must
  1511  // be stopped.
  1512  //go:nowritebarrier
  1513  func gcFlushGCWork() {
  1514  	// Gather all cached GC work. All other Ps are stopped, so
  1515  	// it's safe to manipulate their GC work caches.
  1516  	for i := 0; i < int(gomaxprocs); i++ {
  1517  		allp[i].gcw.dispose()
  1518  	}
  1519  }
  1520  
  1521  // gcMark runs the mark (or, for concurrent GC, mark termination)
  1522  // STW is in effect at this point.
  1523  //TODO go:nowritebarrier
  1524  func gcMark(start_time int64) {
  1525  	if debug.allocfreetrace > 0 {
  1526  		tracegc()
  1527  	}
  1528  
  1529  	if gcphase != _GCmarktermination {
  1530  		throw("in gcMark expecting to see gcphase as _GCmarktermination")
  1531  	}
  1532  	work.tstart = start_time
  1533  
  1534  	gcCopySpans() // TODO(rlh): should this be hoisted and done only once? Right now it is done for normal marking and also for checkmarking.
  1535  
  1536  	// Make sure the per-P gcWork caches are empty. During mark
  1537  	// termination, these caches can still be used temporarily,
  1538  	// but must be disposed to the global lists immediately.
  1539  	gcFlushGCWork()
  1540  
  1541  	// Queue root marking jobs.
  1542  	gcMarkRootPrepare()
  1543  
  1544  	work.nwait = 0
  1545  	work.ndone = 0
  1546  	work.nproc = uint32(gcprocs())
  1547  
  1548  	if trace.enabled {
  1549  		traceGCScanStart()
  1550  	}
  1551  
  1552  	if work.nproc > 1 {
  1553  		noteclear(&work.alldone)
  1554  		helpgc(int32(work.nproc))
  1555  	}
  1556  
  1557  	gchelperstart()
  1558  
  1559  	var gcw gcWork
  1560  	gcDrain(&gcw, gcDrainBlock)
  1561  	gcw.dispose()
  1562  
  1563  	// TODO: Re-enable once this is cheap.
  1564  	//gcMarkRootCheck()
  1565  	if work.full != 0 {
  1566  		throw("work.full != 0")
  1567  	}
  1568  
  1569  	if work.nproc > 1 {
  1570  		notesleep(&work.alldone)
  1571  	}
  1572  
  1573  	// markroot is done now, so record that objects with
  1574  	// finalizers have been scanned.
  1575  	work.finalizersDone = true
  1576  
  1577  	for i := 0; i < int(gomaxprocs); i++ {
  1578  		if !allp[i].gcw.empty() {
  1579  			throw("P has cached GC work at end of mark termination")
  1580  		}
  1581  	}
  1582  
  1583  	if trace.enabled {
  1584  		traceGCScanDone()
  1585  	}
  1586  
  1587  	cachestats()
  1588  
  1589  	// Compute the reachable heap size at the beginning of the
  1590  	// cycle. This is approximately the marked heap size at the
  1591  	// end (which we know) minus the amount of marked heap that
  1592  	// was allocated after marking began (which we don't know, but
  1593  	// is approximately the amount of heap that was allocated
  1594  	// since marking began).
  1595  	allocatedDuringCycle := memstats.heap_live - work.initialHeapLive
  1596  	if memstats.heap_live < work.initialHeapLive {
  1597  		// This can happen if mCentral_UncacheSpan tightens
  1598  		// the heap_live approximation.
  1599  		allocatedDuringCycle = 0
  1600  	}
  1601  	if work.bytesMarked >= allocatedDuringCycle {
  1602  		memstats.heap_reachable = work.bytesMarked - allocatedDuringCycle
  1603  	} else {
  1604  		// This can happen if most of the allocation during
  1605  		// the cycle never became reachable from the heap.
  1606  		// Just set the reachable heap approximation to 0 and
  1607  		// let the heapminimum kick in below.
  1608  		memstats.heap_reachable = 0
  1609  	}
  1610  
  1611  	// Trigger the next GC cycle when the allocated heap has grown
  1612  	// by triggerRatio over the reachable heap size. Assume that
  1613  	// we're in steady state, so the reachable heap size is the
  1614  	// same now as it was at the beginning of the GC cycle.
  1615  	memstats.next_gc = uint64(float64(memstats.heap_reachable) * (1 + gcController.triggerRatio))
  1616  	if memstats.next_gc < heapminimum {
  1617  		memstats.next_gc = heapminimum
  1618  	}
  1619  	if int64(memstats.next_gc) < 0 {
  1620  		print("next_gc=", memstats.next_gc, " bytesMarked=", work.bytesMarked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "\n")
  1621  		throw("next_gc underflow")
  1622  	}
  1623  
  1624  	// Update other GC heap size stats. This must happen after
  1625  	// cachestats (which flushes local statistics to these) and
  1626  	// flushallmcaches (which modifies heap_live).
  1627  	memstats.heap_live = work.bytesMarked
  1628  	memstats.heap_marked = work.bytesMarked
  1629  	memstats.heap_scan = uint64(gcController.scanWork)
  1630  
  1631  	minNextGC := memstats.heap_live + sweepMinHeapDistance*uint64(gcpercent)/100
  1632  	if memstats.next_gc < minNextGC {
  1633  		// The allocated heap is already past the trigger.
  1634  		// This can happen if the triggerRatio is very low and
  1635  		// the reachable heap estimate is less than the live
  1636  		// heap size.
  1637  		//
  1638  		// Concurrent sweep happens in the heap growth from
  1639  		// heap_live to next_gc, so bump next_gc up to ensure
  1640  		// that concurrent sweep has some heap growth in which
  1641  		// to perform sweeping before we start the next GC
  1642  		// cycle.
  1643  		memstats.next_gc = minNextGC
  1644  	}
  1645  
  1646  	if trace.enabled {
  1647  		traceHeapAlloc()
  1648  		traceNextGC()
  1649  	}
  1650  }
  1651  
  1652  func gcSweep(mode gcMode) {
  1653  	if gcphase != _GCoff {
  1654  		throw("gcSweep being done but phase is not GCoff")
  1655  	}
  1656  	gcCopySpans()
  1657  
  1658  	lock(&mheap_.lock)
  1659  	mheap_.sweepgen += 2
  1660  	mheap_.sweepdone = 0
  1661  	sweep.spanidx = 0
  1662  	unlock(&mheap_.lock)
  1663  
  1664  	if !_ConcurrentSweep || mode == gcForceBlockMode {
  1665  		// Special case synchronous sweep.
  1666  		// Record that no proportional sweeping has to happen.
  1667  		lock(&mheap_.lock)
  1668  		mheap_.sweepPagesPerByte = 0
  1669  		mheap_.pagesSwept = 0
  1670  		unlock(&mheap_.lock)
  1671  		// Sweep all spans eagerly.
  1672  		for sweepone() != ^uintptr(0) {
  1673  			sweep.npausesweep++
  1674  		}
  1675  		// Do an additional mProf_GC, because all 'free' events are now real as well.
  1676  		mProf_GC()
  1677  		mProf_GC()
  1678  		return
  1679  	}
  1680  
  1681  	// Concurrent sweep needs to sweep all of the in-use pages by
  1682  	// the time the allocated heap reaches the GC trigger. Compute
  1683  	// the ratio of in-use pages to sweep per byte allocated.
  1684  	heapDistance := int64(memstats.next_gc) - int64(memstats.heap_live)
  1685  	// Add a little margin so rounding errors and concurrent
  1686  	// sweep are less likely to leave pages unswept when GC starts.
  1687  	heapDistance -= 1024 * 1024
  1688  	if heapDistance < _PageSize {
  1689  		// Avoid setting the sweep ratio extremely high
  1690  		heapDistance = _PageSize
  1691  	}
  1692  	lock(&mheap_.lock)
  1693  	mheap_.sweepPagesPerByte = float64(mheap_.pagesInUse) / float64(heapDistance)
  1694  	mheap_.pagesSwept = 0
  1695  	mheap_.spanBytesAlloc = 0
  1696  	unlock(&mheap_.lock)
  1697  
  1698  	// Background sweep.
  1699  	lock(&sweep.lock)
  1700  	if sweep.parked {
  1701  		sweep.parked = false
  1702  		ready(sweep.g, 0)
  1703  	}
  1704  	unlock(&sweep.lock)
  1705  	mProf_GC()
  1706  }
  1707  
  1708  func gcCopySpans() {
  1709  	// Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
  1710  	// resizing/freeing allspans.
  1711  	// New spans can be created while GC progresses, but they are not garbage for
  1712  	// this round:
  1713  	//  - new stack spans can be created even while the world is stopped.
  1714  	//  - new malloc spans can be created during the concurrent sweep
  1715  	// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
  1716  	lock(&mheap_.lock)
  1717  	// Free the old cached mark array if necessary.
  1718  	if work.spans != nil && &work.spans[0] != &h_allspans[0] {
  1719  		sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
  1720  	}
  1721  	// Cache the current array for sweeping.
  1722  	mheap_.gcspans = mheap_.allspans
  1723  	work.spans = h_allspans
  1724  	unlock(&mheap_.lock)
  1725  }
  1726  
  1727  // gcResetMarkState resets global state prior to marking (concurrent
  1728  // or STW) and resets the stack scan state of all Gs.
  1729  //
  1730  // This is safe to do without the world stopped because any Gs created
  1731  // during or after this will start out in the reset state.
  1732  func gcResetMarkState() {
  1733  	// This may be called during a concurrent phase, so make sure
  1734  	// allgs doesn't change.
  1735  	lock(&allglock)
  1736  	for _, gp := range allgs {
  1737  		gp.gcscandone = false  // set to true in gcphasework
  1738  		gp.gcscanvalid = false // stack has not been scanned
  1739  		gp.gcAssistBytes = 0
  1740  	}
  1741  	unlock(&allglock)
  1742  
  1743  	work.bytesMarked = 0
  1744  	work.initialHeapLive = memstats.heap_live
  1745  }
  1746  
  1747  // Hooks for other packages
  1748  
  1749  var poolcleanup func()
  1750  
  1751  //go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
  1752  func sync_runtime_registerPoolCleanup(f func()) {
  1753  	poolcleanup = f
  1754  }
  1755  
  1756  func clearpools() {
  1757  	// clear sync.Pools
  1758  	if poolcleanup != nil {
  1759  		poolcleanup()
  1760  	}
  1761  
  1762  	// Clear central sudog cache.
  1763  	// Leave per-P caches alone, they have strictly bounded size.
  1764  	// Disconnect cached list before dropping it on the floor,
  1765  	// so that a dangling ref to one entry does not pin all of them.
  1766  	lock(&sched.sudoglock)
  1767  	var sg, sgnext *sudog
  1768  	for sg = sched.sudogcache; sg != nil; sg = sgnext {
  1769  		sgnext = sg.next
  1770  		sg.next = nil
  1771  	}
  1772  	sched.sudogcache = nil
  1773  	unlock(&sched.sudoglock)
  1774  
  1775  	// Clear central defer pools.
  1776  	// Leave per-P pools alone, they have strictly bounded size.
  1777  	lock(&sched.deferlock)
  1778  	for i := range sched.deferpool {
  1779  		// disconnect cached list before dropping it on the floor,
  1780  		// so that a dangling ref to one entry does not pin all of them.
  1781  		var d, dlink *_defer
  1782  		for d = sched.deferpool[i]; d != nil; d = dlink {
  1783  			dlink = d.link
  1784  			d.link = nil
  1785  		}
  1786  		sched.deferpool[i] = nil
  1787  	}
  1788  	unlock(&sched.deferlock)
  1789  }
  1790  
  1791  // Timing
  1792  
  1793  //go:nowritebarrier
  1794  func gchelper() {
  1795  	_g_ := getg()
  1796  	_g_.m.traceback = 2
  1797  	gchelperstart()
  1798  
  1799  	if trace.enabled {
  1800  		traceGCScanStart()
  1801  	}
  1802  
  1803  	// Parallel mark over GC roots and heap
  1804  	if gcphase == _GCmarktermination {
  1805  		var gcw gcWork
  1806  		gcDrain(&gcw, gcDrainBlock) // blocks in getfull
  1807  		gcw.dispose()
  1808  	}
  1809  
  1810  	if trace.enabled {
  1811  		traceGCScanDone()
  1812  	}
  1813  
  1814  	nproc := work.nproc // work.nproc can change right after we increment work.ndone
  1815  	if atomic.Xadd(&work.ndone, +1) == nproc-1 {
  1816  		notewakeup(&work.alldone)
  1817  	}
  1818  	_g_.m.traceback = 0
  1819  }
  1820  
  1821  func gchelperstart() {
  1822  	_g_ := getg()
  1823  
  1824  	if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
  1825  		throw("gchelperstart: bad m->helpgc")
  1826  	}
  1827  	if _g_ != _g_.m.g0 {
  1828  		throw("gchelper not running on g0 stack")
  1829  	}
  1830  }
  1831  
  1832  // itoaDiv formats val/(10**dec) into buf.
  1833  func itoaDiv(buf []byte, val uint64, dec int) []byte {
  1834  	i := len(buf) - 1
  1835  	idec := i - dec
  1836  	for val >= 10 || i >= idec {
  1837  		buf[i] = byte(val%10 + '0')
  1838  		i--
  1839  		if i == idec {
  1840  			buf[i] = '.'
  1841  			i--
  1842  		}
  1843  		val /= 10
  1844  	}
  1845  	buf[i] = byte(val + '0')
  1846  	return buf[i:]
  1847  }
  1848  
  1849  // fmtNSAsMS nicely formats ns nanoseconds as milliseconds.
  1850  func fmtNSAsMS(buf []byte, ns uint64) []byte {
  1851  	if ns >= 10e6 {
  1852  		// Format as whole milliseconds.
  1853  		return itoaDiv(buf, ns/1e6, 0)
  1854  	}
  1855  	// Format two digits of precision, with at most three decimal places.
  1856  	x := ns / 1e3
  1857  	if x == 0 {
  1858  		buf[0] = '0'
  1859  		return buf[:1]
  1860  	}
  1861  	dec := 3
  1862  	for x >= 100 {
  1863  		x /= 10
  1864  		dec--
  1865  	}
  1866  	return itoaDiv(buf, x, dec)
  1867  }