github.com/ActiveState/go@v0.0.0-20170614201249-0b81c023a722/src/runtime/mgc.go (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Garbage collector (GC).
     6  //
     7  // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple
     8  // GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is
     9  // non-generational and non-compacting. Allocation is done using size segregated per P allocation
    10  // areas to minimize fragmentation while eliminating locks in the common case.
    11  //
    12  // The algorithm decomposes into several steps.
    13  // This is a high level description of the algorithm being used. For an overview of GC a good
    14  // place to start is Richard Jones' gchandbook.org.
    15  //
    16  // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see
    17  // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978.
    18  // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978),
    19  // 966-975.
    20  // For journal quality proofs that these steps are complete, correct, and terminate see
    21  // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world.
    22  // Concurrency and Computation: Practice and Experience 15(3-5), 2003.
    23  //
    24  // 1. GC performs sweep termination.
    25  //
    26  //    a. Stop the world. This causes all Ps to reach a GC safe-point.
    27  //
    28  //    b. Sweep any unswept spans. There will only be unswept spans if
    29  //    this GC cycle was forced before the expected time.
    30  //
    31  // 2. GC performs the "mark 1" sub-phase. In this sub-phase, Ps are
    32  // allowed to locally cache parts of the work queue.
    33  //
    34  //    a. Prepare for the mark phase by setting gcphase to _GCmark
    35  //    (from _GCoff), enabling the write barrier, enabling mutator
    36  //    assists, and enqueueing root mark jobs. No objects may be
    37  //    scanned until all Ps have enabled the write barrier, which is
    38  //    accomplished using STW.
    39  //
    40  //    b. Start the world. From this point, GC work is done by mark
    41  //    workers started by the scheduler and by assists performed as
    42  //    part of allocation. The write barrier shades both the
    43  //    overwritten pointer and the new pointer value for any pointer
    44  //    writes (see mbarrier.go for details). Newly allocated objects
    45  //    are immediately marked black.
    46  //
    47  //    c. GC performs root marking jobs. This includes scanning all
    48  //    stacks, shading all globals, and shading any heap pointers in
    49  //    off-heap runtime data structures. Scanning a stack stops a
    50  //    goroutine, shades any pointers found on its stack, and then
    51  //    resumes the goroutine.
    52  //
    53  //    d. GC drains the work queue of grey objects, scanning each grey
    54  //    object to black and shading all pointers found in the object
    55  //    (which in turn may add those pointers to the work queue).
    56  //
    57  // 3. Once the global work queue is empty (but local work queue caches
    58  // may still contain work), GC performs the "mark 2" sub-phase.
    59  //
    60  //    a. GC stops all workers, disables local work queue caches,
    61  //    flushes each P's local work queue cache to the global work queue
    62  //    cache, and reenables workers.
    63  //
    64  //    b. GC again drains the work queue, as in 2d above.
    65  //
    66  // 4. Once the work queue is empty, GC performs mark termination.
    67  //
    68  //    a. Stop the world.
    69  //
    70  //    b. Set gcphase to _GCmarktermination, and disable workers and
    71  //    assists.
    72  //
    73  //    c. Drain any remaining work from the work queue (typically there
    74  //    will be none).
    75  //
    76  //    d. Perform other housekeeping like flushing mcaches.
    77  //
    78  // 5. GC performs the sweep phase.
    79  //
    80  //    a. Prepare for the sweep phase by setting gcphase to _GCoff,
    81  //    setting up sweep state and disabling the write barrier.
    82  //
    83  //    b. Start the world. From this point on, newly allocated objects
    84  //    are white, and allocating sweeps spans before use if necessary.
    85  //
    86  //    c. GC does concurrent sweeping in the background and in response
    87  //    to allocation. See description below.
    88  //
    89  // 6. When sufficient allocation has taken place, replay the sequence
    90  // starting with 1 above. See discussion of GC rate below.
    91  
    92  // Concurrent sweep.
    93  //
    94  // The sweep phase proceeds concurrently with normal program execution.
    95  // The heap is swept span-by-span both lazily (when a goroutine needs another span)
    96  // and concurrently in a background goroutine (this helps programs that are not CPU bound).
    97  // At the end of STW mark termination all spans are marked as "needs sweeping".
    98  //
    99  // The background sweeper goroutine simply sweeps spans one-by-one.
   100  //
   101  // To avoid requesting more OS memory while there are unswept spans, when a
   102  // goroutine needs another span, it first attempts to reclaim that much memory
   103  // by sweeping. When a goroutine needs to allocate a new small-object span, it
   104  // sweeps small-object spans for the same object size until it frees at least
   105  // one object. When a goroutine needs to allocate large-object span from heap,
   106  // it sweeps spans until it frees at least that many pages into heap. There is
   107  // one case where this may not suffice: if a goroutine sweeps and frees two
   108  // nonadjacent one-page spans to the heap, it will allocate a new two-page
   109  // span, but there can still be other one-page unswept spans which could be
   110  // combined into a two-page span.
   111  //
   112  // It's critical to ensure that no operations proceed on unswept spans (that would corrupt
   113  // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache,
   114  // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it.
   115  // When a goroutine explicitly frees an object or sets a finalizer, it ensures that
   116  // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish).
   117  // The finalizer goroutine is kicked off only when all spans are swept.
   118  // When the next GC starts, it sweeps all not-yet-swept spans (if any).
   119  
   120  // GC rate.
   121  // Next GC is after we've allocated an extra amount of memory proportional to
   122  // the amount already in use. The proportion is controlled by GOGC environment variable
   123  // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M
   124  // (this mark is tracked in next_gc variable). This keeps the GC cost in linear
   125  // proportion to the allocation cost. Adjusting GOGC just changes the linear constant
   126  // (and also the amount of extra memory used).
   127  
   128  // Oblets
   129  //
   130  // In order to prevent long pauses while scanning large objects and to
   131  // improve parallelism, the garbage collector breaks up scan jobs for
   132  // objects larger than maxObletBytes into "oblets" of at most
   133  // maxObletBytes. When scanning encounters the beginning of a large
   134  // object, it scans only the first oblet and enqueues the remaining
   135  // oblets as new scan jobs.
   136  
   137  package runtime
   138  
   139  import (
   140  	"runtime/internal/atomic"
   141  	"runtime/internal/sys"
   142  	"unsafe"
   143  )
   144  
   145  const (
   146  	_DebugGC         = 0
   147  	_ConcurrentSweep = true
   148  	_FinBlockSize    = 4 * 1024
   149  
   150  	// sweepMinHeapDistance is a lower bound on the heap distance
   151  	// (in bytes) reserved for concurrent sweeping between GC
   152  	// cycles. This will be scaled by gcpercent/100.
   153  	sweepMinHeapDistance = 1024 * 1024
   154  )
   155  
   156  // heapminimum is the minimum heap size at which to trigger GC.
   157  // For small heaps, this overrides the usual GOGC*live set rule.
   158  //
   159  // When there is a very small live set but a lot of allocation, simply
   160  // collecting when the heap reaches GOGC*live results in many GC
   161  // cycles and high total per-GC overhead. This minimum amortizes this
   162  // per-GC overhead while keeping the heap reasonably small.
   163  //
   164  // During initialization this is set to 4MB*GOGC/100. In the case of
   165  // GOGC==0, this will set heapminimum to 0, resulting in constant
   166  // collection even when the heap size is small, which is useful for
   167  // debugging.
   168  var heapminimum uint64 = defaultHeapMinimum
   169  
   170  // defaultHeapMinimum is the value of heapminimum for GOGC==100.
   171  const defaultHeapMinimum = 4 << 20
   172  
   173  // Initialized from $GOGC.  GOGC=off means no GC.
   174  var gcpercent int32
   175  
   176  func gcinit() {
   177  	if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
   178  		throw("size of Workbuf is suboptimal")
   179  	}
   180  
   181  	// No sweep on the first cycle.
   182  	mheap_.sweepdone = 1
   183  
   184  	// Set a reasonable initial GC trigger.
   185  	memstats.triggerRatio = 7 / 8.0
   186  
   187  	// Fake a heap_marked value so it looks like a trigger at
   188  	// heapminimum is the appropriate growth from heap_marked.
   189  	// This will go into computing the initial GC goal.
   190  	memstats.heap_marked = uint64(float64(heapminimum) / (1 + memstats.triggerRatio))
   191  
   192  	// Set gcpercent from the environment. This will also compute
   193  	// and set the GC trigger and goal.
   194  	_ = setGCPercent(readgogc())
   195  
   196  	work.startSema = 1
   197  	work.markDoneSema = 1
   198  }
   199  
   200  func readgogc() int32 {
   201  	p := gogetenv("GOGC")
   202  	if p == "off" {
   203  		return -1
   204  	}
   205  	if n, ok := atoi32(p); ok {
   206  		return n
   207  	}
   208  	return 100
   209  }
   210  
   211  // gcenable is called after the bulk of the runtime initialization,
   212  // just before we're about to start letting user code run.
   213  // It kicks off the background sweeper goroutine and enables GC.
   214  func gcenable() {
   215  	c := make(chan int, 1)
   216  	go bgsweep(c)
   217  	<-c
   218  	memstats.enablegc = true // now that runtime is initialized, GC is okay
   219  }
   220  
   221  //go:linkname setGCPercent runtime/debug.setGCPercent
   222  func setGCPercent(in int32) (out int32) {
   223  	lock(&mheap_.lock)
   224  	out = gcpercent
   225  	if in < 0 {
   226  		in = -1
   227  	}
   228  	gcpercent = in
   229  	heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100
   230  	// Update pacing in response to gcpercent change.
   231  	gcSetTriggerRatio(memstats.triggerRatio)
   232  	unlock(&mheap_.lock)
   233  	return out
   234  }
   235  
   236  // Garbage collector phase.
   237  // Indicates to write barrier and synchronization task to perform.
   238  var gcphase uint32
   239  
   240  // The compiler knows about this variable.
   241  // If you change it, you must change builtin/runtime.go, too.
   242  // If you change the first four bytes, you must also change the write
   243  // barrier insertion code.
   244  var writeBarrier struct {
   245  	enabled bool    // compiler emits a check of this before calling write barrier
   246  	pad     [3]byte // compiler uses 32-bit load for "enabled" field
   247  	needed  bool    // whether we need a write barrier for current GC phase
   248  	cgo     bool    // whether we need a write barrier for a cgo check
   249  	alignme uint64  // guarantee alignment so that compiler can use a 32 or 64-bit load
   250  }
   251  
   252  // gcBlackenEnabled is 1 if mutator assists and background mark
   253  // workers are allowed to blacken objects. This must only be set when
   254  // gcphase == _GCmark.
   255  var gcBlackenEnabled uint32
   256  
   257  // gcBlackenPromptly indicates that optimizations that may
   258  // hide work from the global work queue should be disabled.
   259  //
   260  // If gcBlackenPromptly is true, per-P gcWork caches should
   261  // be flushed immediately and new objects should be allocated black.
   262  //
   263  // There is a tension between allocating objects white and
   264  // allocating them black. If white and the objects die before being
   265  // marked they can be collected during this GC cycle. On the other
   266  // hand allocating them black will reduce _GCmarktermination latency
   267  // since more work is done in the mark phase. This tension is resolved
   268  // by allocating white until the mark phase is approaching its end and
   269  // then allocating black for the remainder of the mark phase.
   270  var gcBlackenPromptly bool
   271  
   272  const (
   273  	_GCoff             = iota // GC not running; sweeping in background, write barrier disabled
   274  	_GCmark                   // GC marking roots and workbufs: allocate black, write barrier ENABLED
   275  	_GCmarktermination        // GC mark termination: allocate black, P's help GC, write barrier ENABLED
   276  )
   277  
   278  //go:nosplit
   279  func setGCPhase(x uint32) {
   280  	atomic.Store(&gcphase, x)
   281  	writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination
   282  	writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo
   283  }
   284  
   285  // gcMarkWorkerMode represents the mode that a concurrent mark worker
   286  // should operate in.
   287  //
   288  // Concurrent marking happens through four different mechanisms. One
   289  // is mutator assists, which happen in response to allocations and are
   290  // not scheduled. The other three are variations in the per-P mark
   291  // workers and are distinguished by gcMarkWorkerMode.
   292  type gcMarkWorkerMode int
   293  
   294  const (
   295  	// gcMarkWorkerDedicatedMode indicates that the P of a mark
   296  	// worker is dedicated to running that mark worker. The mark
   297  	// worker should run without preemption.
   298  	gcMarkWorkerDedicatedMode gcMarkWorkerMode = iota
   299  
   300  	// gcMarkWorkerFractionalMode indicates that a P is currently
   301  	// running the "fractional" mark worker. The fractional worker
   302  	// is necessary when GOMAXPROCS*gcGoalUtilization is not an
   303  	// integer. The fractional worker should run until it is
   304  	// preempted and will be scheduled to pick up the fractional
   305  	// part of GOMAXPROCS*gcGoalUtilization.
   306  	gcMarkWorkerFractionalMode
   307  
   308  	// gcMarkWorkerIdleMode indicates that a P is running the mark
   309  	// worker because it has nothing else to do. The idle worker
   310  	// should run until it is preempted and account its time
   311  	// against gcController.idleMarkTime.
   312  	gcMarkWorkerIdleMode
   313  )
   314  
   315  // gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes
   316  // to use in execution traces.
   317  var gcMarkWorkerModeStrings = [...]string{
   318  	"GC (dedicated)",
   319  	"GC (fractional)",
   320  	"GC (idle)",
   321  }
   322  
   323  // gcController implements the GC pacing controller that determines
   324  // when to trigger concurrent garbage collection and how much marking
   325  // work to do in mutator assists and background marking.
   326  //
   327  // It uses a feedback control algorithm to adjust the memstats.gc_trigger
   328  // trigger based on the heap growth and GC CPU utilization each cycle.
   329  // This algorithm optimizes for heap growth to match GOGC and for CPU
   330  // utilization between assist and background marking to be 25% of
   331  // GOMAXPROCS. The high-level design of this algorithm is documented
   332  // at https://golang.org/s/go15gcpacing.
   333  //
   334  // All fields of gcController are used only during a single mark
   335  // cycle.
   336  var gcController gcControllerState
   337  
   338  type gcControllerState struct {
   339  	// scanWork is the total scan work performed this cycle. This
   340  	// is updated atomically during the cycle. Updates occur in
   341  	// bounded batches, since it is both written and read
   342  	// throughout the cycle. At the end of the cycle, this is how
   343  	// much of the retained heap is scannable.
   344  	//
   345  	// Currently this is the bytes of heap scanned. For most uses,
   346  	// this is an opaque unit of work, but for estimation the
   347  	// definition is important.
   348  	scanWork int64
   349  
   350  	// bgScanCredit is the scan work credit accumulated by the
   351  	// concurrent background scan. This credit is accumulated by
   352  	// the background scan and stolen by mutator assists. This is
   353  	// updated atomically. Updates occur in bounded batches, since
   354  	// it is both written and read throughout the cycle.
   355  	bgScanCredit int64
   356  
   357  	// assistTime is the nanoseconds spent in mutator assists
   358  	// during this cycle. This is updated atomically. Updates
   359  	// occur in bounded batches, since it is both written and read
   360  	// throughout the cycle.
   361  	assistTime int64
   362  
   363  	// dedicatedMarkTime is the nanoseconds spent in dedicated
   364  	// mark workers during this cycle. This is updated atomically
   365  	// at the end of the concurrent mark phase.
   366  	dedicatedMarkTime int64
   367  
   368  	// fractionalMarkTime is the nanoseconds spent in the
   369  	// fractional mark worker during this cycle. This is updated
   370  	// atomically throughout the cycle and will be up-to-date if
   371  	// the fractional mark worker is not currently running.
   372  	fractionalMarkTime int64
   373  
   374  	// idleMarkTime is the nanoseconds spent in idle marking
   375  	// during this cycle. This is updated atomically throughout
   376  	// the cycle.
   377  	idleMarkTime int64
   378  
   379  	// markStartTime is the absolute start time in nanoseconds
   380  	// that assists and background mark workers started.
   381  	markStartTime int64
   382  
   383  	// dedicatedMarkWorkersNeeded is the number of dedicated mark
   384  	// workers that need to be started. This is computed at the
   385  	// beginning of each cycle and decremented atomically as
   386  	// dedicated mark workers get started.
   387  	dedicatedMarkWorkersNeeded int64
   388  
   389  	// assistWorkPerByte is the ratio of scan work to allocated
   390  	// bytes that should be performed by mutator assists. This is
   391  	// computed at the beginning of each cycle and updated every
   392  	// time heap_scan is updated.
   393  	assistWorkPerByte float64
   394  
   395  	// assistBytesPerWork is 1/assistWorkPerByte.
   396  	assistBytesPerWork float64
   397  
   398  	// fractionalUtilizationGoal is the fraction of wall clock
   399  	// time that should be spent in the fractional mark worker.
   400  	// For example, if the overall mark utilization goal is 25%
   401  	// and GOMAXPROCS is 6, one P will be a dedicated mark worker
   402  	// and this will be set to 0.5 so that 50% of the time some P
   403  	// is in a fractional mark worker. This is computed at the
   404  	// beginning of each cycle.
   405  	fractionalUtilizationGoal float64
   406  
   407  	_ [sys.CacheLineSize]byte
   408  
   409  	// fractionalMarkWorkersNeeded is the number of fractional
   410  	// mark workers that need to be started. This is either 0 or
   411  	// 1. This is potentially updated atomically at every
   412  	// scheduling point (hence it gets its own cache line).
   413  	fractionalMarkWorkersNeeded int64
   414  
   415  	_ [sys.CacheLineSize]byte
   416  }
   417  
   418  // startCycle resets the GC controller's state and computes estimates
   419  // for a new GC cycle. The caller must hold worldsema.
   420  func (c *gcControllerState) startCycle() {
   421  	c.scanWork = 0
   422  	c.bgScanCredit = 0
   423  	c.assistTime = 0
   424  	c.dedicatedMarkTime = 0
   425  	c.fractionalMarkTime = 0
   426  	c.idleMarkTime = 0
   427  
   428  	// If this is the first GC cycle or we're operating on a very
   429  	// small heap, fake heap_marked so it looks like gc_trigger is
   430  	// the appropriate growth from heap_marked, even though the
   431  	// real heap_marked may not have a meaningful value (on the
   432  	// first cycle) or may be much smaller (resulting in a large
   433  	// error response).
   434  	if memstats.gc_trigger <= heapminimum {
   435  		memstats.heap_marked = uint64(float64(memstats.gc_trigger) / (1 + memstats.triggerRatio))
   436  	}
   437  
   438  	// Re-compute the heap goal for this cycle in case something
   439  	// changed. This is the same calculation we use elsewhere.
   440  	memstats.next_gc = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
   441  	if gcpercent < 0 {
   442  		memstats.next_gc = ^uint64(0)
   443  	}
   444  
   445  	// Ensure that the heap goal is at least a little larger than
   446  	// the current live heap size. This may not be the case if GC
   447  	// start is delayed or if the allocation that pushed heap_live
   448  	// over gc_trigger is large or if the trigger is really close to
   449  	// GOGC. Assist is proportional to this distance, so enforce a
   450  	// minimum distance, even if it means going over the GOGC goal
   451  	// by a tiny bit.
   452  	if memstats.next_gc < memstats.heap_live+1024*1024 {
   453  		memstats.next_gc = memstats.heap_live + 1024*1024
   454  	}
   455  
   456  	// Compute the total mark utilization goal and divide it among
   457  	// dedicated and fractional workers.
   458  	totalUtilizationGoal := float64(gomaxprocs) * gcGoalUtilization
   459  	c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal)
   460  	c.fractionalUtilizationGoal = totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded)
   461  	if c.fractionalUtilizationGoal > 0 {
   462  		c.fractionalMarkWorkersNeeded = 1
   463  	} else {
   464  		c.fractionalMarkWorkersNeeded = 0
   465  	}
   466  
   467  	// Clear per-P state
   468  	for _, p := range &allp {
   469  		if p == nil {
   470  			break
   471  		}
   472  		p.gcAssistTime = 0
   473  	}
   474  
   475  	// Compute initial values for controls that are updated
   476  	// throughout the cycle.
   477  	c.revise()
   478  
   479  	if debug.gcpacertrace > 0 {
   480  		print("pacer: assist ratio=", c.assistWorkPerByte,
   481  			" (scan ", memstats.heap_scan>>20, " MB in ",
   482  			work.initialHeapLive>>20, "->",
   483  			memstats.next_gc>>20, " MB)",
   484  			" workers=", c.dedicatedMarkWorkersNeeded,
   485  			"+", c.fractionalMarkWorkersNeeded, "\n")
   486  	}
   487  }
   488  
   489  // revise updates the assist ratio during the GC cycle to account for
   490  // improved estimates. This should be called either under STW or
   491  // whenever memstats.heap_scan, memstats.heap_live, or
   492  // memstats.next_gc is updated (with mheap_.lock held).
   493  //
   494  // It should only be called when gcBlackenEnabled != 0 (because this
   495  // is when assists are enabled and the necessary statistics are
   496  // available).
   497  func (c *gcControllerState) revise() {
   498  	// Compute the expected scan work remaining.
   499  	//
   500  	// Note that we currently count allocations during GC as both
   501  	// scannable heap (heap_scan) and scan work completed
   502  	// (scanWork), so this difference won't be changed by
   503  	// allocations during GC.
   504  	//
   505  	// This particular estimate is a strict upper bound on the
   506  	// possible remaining scan work for the current heap.
   507  	// You might consider dividing this by 2 (or by
   508  	// (100+GOGC)/100) to counter this over-estimation, but
   509  	// benchmarks show that this has almost no effect on mean
   510  	// mutator utilization, heap size, or assist time and it
   511  	// introduces the danger of under-estimating and letting the
   512  	// mutator outpace the garbage collector.
   513  	scanWorkExpected := int64(memstats.heap_scan) - c.scanWork
   514  	if scanWorkExpected < 1000 {
   515  		// We set a somewhat arbitrary lower bound on
   516  		// remaining scan work since if we aim a little high,
   517  		// we can miss by a little.
   518  		//
   519  		// We *do* need to enforce that this is at least 1,
   520  		// since marking is racy and double-scanning objects
   521  		// may legitimately make the expected scan work
   522  		// negative.
   523  		scanWorkExpected = 1000
   524  	}
   525  
   526  	// Compute the heap distance remaining.
   527  	heapDistance := int64(memstats.next_gc) - int64(atomic.Load64(&memstats.heap_live))
   528  	if heapDistance <= 0 {
   529  		// This shouldn't happen, but if it does, avoid
   530  		// dividing by zero or setting the assist negative.
   531  		heapDistance = 1
   532  	}
   533  
   534  	// Compute the mutator assist ratio so by the time the mutator
   535  	// allocates the remaining heap bytes up to next_gc, it will
   536  	// have done (or stolen) the remaining amount of scan work.
   537  	c.assistWorkPerByte = float64(scanWorkExpected) / float64(heapDistance)
   538  	c.assistBytesPerWork = float64(heapDistance) / float64(scanWorkExpected)
   539  }
   540  
   541  // endCycle computes the trigger ratio for the next cycle.
   542  func (c *gcControllerState) endCycle() float64 {
   543  	if work.userForced {
   544  		// Forced GC means this cycle didn't start at the
   545  		// trigger, so where it finished isn't good
   546  		// information about how to adjust the trigger.
   547  		// Just leave it where it is.
   548  		return memstats.triggerRatio
   549  	}
   550  
   551  	// Proportional response gain for the trigger controller. Must
   552  	// be in [0, 1]. Lower values smooth out transient effects but
   553  	// take longer to respond to phase changes. Higher values
   554  	// react to phase changes quickly, but are more affected by
   555  	// transient changes. Values near 1 may be unstable.
   556  	const triggerGain = 0.5
   557  
   558  	// Compute next cycle trigger ratio. First, this computes the
   559  	// "error" for this cycle; that is, how far off the trigger
   560  	// was from what it should have been, accounting for both heap
   561  	// growth and GC CPU utilization. We compute the actual heap
   562  	// growth during this cycle and scale that by how far off from
   563  	// the goal CPU utilization we were (to estimate the heap
   564  	// growth if we had the desired CPU utilization). The
   565  	// difference between this estimate and the GOGC-based goal
   566  	// heap growth is the error.
   567  	goalGrowthRatio := float64(gcpercent) / 100
   568  	actualGrowthRatio := float64(memstats.heap_live)/float64(memstats.heap_marked) - 1
   569  	assistDuration := nanotime() - c.markStartTime
   570  
   571  	// Assume background mark hit its utilization goal.
   572  	utilization := gcGoalUtilization
   573  	// Add assist utilization; avoid divide by zero.
   574  	if assistDuration > 0 {
   575  		utilization += float64(c.assistTime) / float64(assistDuration*int64(gomaxprocs))
   576  	}
   577  
   578  	triggerError := goalGrowthRatio - memstats.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-memstats.triggerRatio)
   579  
   580  	// Finally, we adjust the trigger for next time by this error,
   581  	// damped by the proportional gain.
   582  	triggerRatio := memstats.triggerRatio + triggerGain*triggerError
   583  
   584  	if debug.gcpacertrace > 0 {
   585  		// Print controller state in terms of the design
   586  		// document.
   587  		H_m_prev := memstats.heap_marked
   588  		h_t := memstats.triggerRatio
   589  		H_T := memstats.gc_trigger
   590  		h_a := actualGrowthRatio
   591  		H_a := memstats.heap_live
   592  		h_g := goalGrowthRatio
   593  		H_g := int64(float64(H_m_prev) * (1 + h_g))
   594  		u_a := utilization
   595  		u_g := gcGoalUtilization
   596  		W_a := c.scanWork
   597  		print("pacer: H_m_prev=", H_m_prev,
   598  			" h_t=", h_t, " H_T=", H_T,
   599  			" h_a=", h_a, " H_a=", H_a,
   600  			" h_g=", h_g, " H_g=", H_g,
   601  			" u_a=", u_a, " u_g=", u_g,
   602  			" W_a=", W_a,
   603  			" goalΔ=", goalGrowthRatio-h_t,
   604  			" actualΔ=", h_a-h_t,
   605  			" u_a/u_g=", u_a/u_g,
   606  			"\n")
   607  	}
   608  
   609  	return triggerRatio
   610  }
   611  
   612  // enlistWorker encourages another dedicated mark worker to start on
   613  // another P if there are spare worker slots. It is used by putfull
   614  // when more work is made available.
   615  //
   616  //go:nowritebarrier
   617  func (c *gcControllerState) enlistWorker() {
   618  	// If there are idle Ps, wake one so it will run an idle worker.
   619  	// NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112.
   620  	//
   621  	//	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
   622  	//		wakep()
   623  	//		return
   624  	//	}
   625  
   626  	// There are no idle Ps. If we need more dedicated workers,
   627  	// try to preempt a running P so it will switch to a worker.
   628  	if c.dedicatedMarkWorkersNeeded <= 0 {
   629  		return
   630  	}
   631  	// Pick a random other P to preempt.
   632  	if gomaxprocs <= 1 {
   633  		return
   634  	}
   635  	gp := getg()
   636  	if gp == nil || gp.m == nil || gp.m.p == 0 {
   637  		return
   638  	}
   639  	myID := gp.m.p.ptr().id
   640  	for tries := 0; tries < 5; tries++ {
   641  		id := int32(fastrandn(uint32(gomaxprocs - 1)))
   642  		if id >= myID {
   643  			id++
   644  		}
   645  		p := allp[id]
   646  		if p.status != _Prunning {
   647  			continue
   648  		}
   649  		if preemptone(p) {
   650  			return
   651  		}
   652  	}
   653  }
   654  
   655  // findRunnableGCWorker returns the background mark worker for _p_ if it
   656  // should be run. This must only be called when gcBlackenEnabled != 0.
   657  func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
   658  	if gcBlackenEnabled == 0 {
   659  		throw("gcControllerState.findRunnable: blackening not enabled")
   660  	}
   661  	if _p_.gcBgMarkWorker == 0 {
   662  		// The mark worker associated with this P is blocked
   663  		// performing a mark transition. We can't run it
   664  		// because it may be on some other run or wait queue.
   665  		return nil
   666  	}
   667  
   668  	if !gcMarkWorkAvailable(_p_) {
   669  		// No work to be done right now. This can happen at
   670  		// the end of the mark phase when there are still
   671  		// assists tapering off. Don't bother running a worker
   672  		// now because it'll just return immediately.
   673  		return nil
   674  	}
   675  
   676  	decIfPositive := func(ptr *int64) bool {
   677  		if *ptr > 0 {
   678  			if atomic.Xaddint64(ptr, -1) >= 0 {
   679  				return true
   680  			}
   681  			// We lost a race
   682  			atomic.Xaddint64(ptr, +1)
   683  		}
   684  		return false
   685  	}
   686  
   687  	if decIfPositive(&c.dedicatedMarkWorkersNeeded) {
   688  		// This P is now dedicated to marking until the end of
   689  		// the concurrent mark phase.
   690  		_p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode
   691  		// TODO(austin): This P isn't going to run anything
   692  		// else for a while, so kick everything out of its run
   693  		// queue.
   694  	} else {
   695  		if !decIfPositive(&c.fractionalMarkWorkersNeeded) {
   696  			// No more workers are need right now.
   697  			return nil
   698  		}
   699  
   700  		// This P has picked the token for the fractional worker.
   701  		// Is the GC currently under or at the utilization goal?
   702  		// If so, do more work.
   703  		//
   704  		// We used to check whether doing one time slice of work
   705  		// would remain under the utilization goal, but that has the
   706  		// effect of delaying work until the mutator has run for
   707  		// enough time slices to pay for the work. During those time
   708  		// slices, write barriers are enabled, so the mutator is running slower.
   709  		// Now instead we do the work whenever we're under or at the
   710  		// utilization work and pay for it by letting the mutator run later.
   711  		// This doesn't change the overall utilization averages, but it
   712  		// front loads the GC work so that the GC finishes earlier and
   713  		// write barriers can be turned off sooner, effectively giving
   714  		// the mutator a faster machine.
   715  		//
   716  		// The old, slower behavior can be restored by setting
   717  		//	gcForcePreemptNS = forcePreemptNS.
   718  		const gcForcePreemptNS = 0
   719  
   720  		// TODO(austin): We could fast path this and basically
   721  		// eliminate contention on c.fractionalMarkWorkersNeeded by
   722  		// precomputing the minimum time at which it's worth
   723  		// next scheduling the fractional worker. Then Ps
   724  		// don't have to fight in the window where we've
   725  		// passed that deadline and no one has started the
   726  		// worker yet.
   727  		//
   728  		// TODO(austin): Shorter preemption interval for mark
   729  		// worker to improve fairness and give this
   730  		// finer-grained control over schedule?
   731  		now := nanotime() - gcController.markStartTime
   732  		then := now + gcForcePreemptNS
   733  		timeUsed := c.fractionalMarkTime + gcForcePreemptNS
   734  		if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal {
   735  			// Nope, we'd overshoot the utilization goal
   736  			atomic.Xaddint64(&c.fractionalMarkWorkersNeeded, +1)
   737  			return nil
   738  		}
   739  		_p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode
   740  	}
   741  
   742  	// Run the background mark worker
   743  	gp := _p_.gcBgMarkWorker.ptr()
   744  	casgstatus(gp, _Gwaiting, _Grunnable)
   745  	if trace.enabled {
   746  		traceGoUnpark(gp, 0)
   747  	}
   748  	return gp
   749  }
   750  
   751  // gcSetTriggerRatio sets the trigger ratio and updates everything
   752  // derived from it: the absolute trigger, the heap goal, mark pacing,
   753  // and sweep pacing.
   754  //
   755  // This can be called any time. If GC is the in the middle of a
   756  // concurrent phase, it will adjust the pacing of that phase.
   757  //
   758  // This depends on gcpercent, memstats.heap_marked, and
   759  // memstats.heap_live. These must be up to date.
   760  //
   761  // mheap_.lock must be held or the world must be stopped.
   762  func gcSetTriggerRatio(triggerRatio float64) {
   763  	// Set the trigger ratio, capped to reasonable bounds.
   764  	if triggerRatio < 0 {
   765  		// This can happen if the mutator is allocating very
   766  		// quickly or the GC is scanning very slowly.
   767  		triggerRatio = 0
   768  	} else if gcpercent >= 0 {
   769  		// Ensure there's always a little margin so that the
   770  		// mutator assist ratio isn't infinity.
   771  		maxTriggerRatio := 0.95 * float64(gcpercent) / 100
   772  		if triggerRatio > maxTriggerRatio {
   773  			triggerRatio = maxTriggerRatio
   774  		}
   775  	}
   776  	memstats.triggerRatio = triggerRatio
   777  
   778  	// Compute the absolute GC trigger from the trigger ratio.
   779  	//
   780  	// We trigger the next GC cycle when the allocated heap has
   781  	// grown by the trigger ratio over the marked heap size.
   782  	trigger := ^uint64(0)
   783  	if gcpercent >= 0 {
   784  		trigger = uint64(float64(memstats.heap_marked) * (1 + triggerRatio))
   785  		// Don't trigger below the minimum heap size.
   786  		minTrigger := heapminimum
   787  		if !gosweepdone() {
   788  			// Concurrent sweep happens in the heap growth
   789  			// from heap_live to gc_trigger, so ensure
   790  			// that concurrent sweep has some heap growth
   791  			// in which to perform sweeping before we
   792  			// start the next GC cycle.
   793  			sweepMin := atomic.Load64(&memstats.heap_live) + sweepMinHeapDistance*uint64(gcpercent)/100
   794  			if sweepMin > minTrigger {
   795  				minTrigger = sweepMin
   796  			}
   797  		}
   798  		if trigger < minTrigger {
   799  			trigger = minTrigger
   800  		}
   801  		if int64(trigger) < 0 {
   802  			print("runtime: next_gc=", memstats.next_gc, " heap_marked=", memstats.heap_marked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", triggerRatio, " minTrigger=", minTrigger, "\n")
   803  			throw("gc_trigger underflow")
   804  		}
   805  	}
   806  	memstats.gc_trigger = trigger
   807  
   808  	// Compute the next GC goal, which is when the allocated heap
   809  	// has grown by GOGC/100 over the heap marked by the last
   810  	// cycle.
   811  	goal := ^uint64(0)
   812  	if gcpercent >= 0 {
   813  		goal = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100
   814  		if goal < trigger {
   815  			// The trigger ratio is always less than GOGC/100, but
   816  			// other bounds on the trigger may have raised it.
   817  			// Push up the goal, too.
   818  			goal = trigger
   819  		}
   820  	}
   821  	memstats.next_gc = goal
   822  	if trace.enabled {
   823  		traceNextGC()
   824  	}
   825  
   826  	// Update mark pacing.
   827  	if gcphase != _GCoff {
   828  		gcController.revise()
   829  	}
   830  
   831  	// Update sweep pacing.
   832  	if gosweepdone() {
   833  		mheap_.sweepPagesPerByte = 0
   834  	} else {
   835  		// Concurrent sweep needs to sweep all of the in-use
   836  		// pages by the time the allocated heap reaches the GC
   837  		// trigger. Compute the ratio of in-use pages to sweep
   838  		// per byte allocated, accounting for the fact that
   839  		// some might already be swept.
   840  		heapLiveBasis := atomic.Load64(&memstats.heap_live)
   841  		heapDistance := int64(trigger) - int64(heapLiveBasis)
   842  		// Add a little margin so rounding errors and
   843  		// concurrent sweep are less likely to leave pages
   844  		// unswept when GC starts.
   845  		heapDistance -= 1024 * 1024
   846  		if heapDistance < _PageSize {
   847  			// Avoid setting the sweep ratio extremely high
   848  			heapDistance = _PageSize
   849  		}
   850  		pagesSwept := atomic.Load64(&mheap_.pagesSwept)
   851  		sweepDistancePages := int64(mheap_.pagesInUse) - int64(pagesSwept)
   852  		if sweepDistancePages <= 0 {
   853  			mheap_.sweepPagesPerByte = 0
   854  		} else {
   855  			mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
   856  			mheap_.sweepHeapLiveBasis = heapLiveBasis
   857  			// Write pagesSweptBasis last, since this
   858  			// signals concurrent sweeps to recompute
   859  			// their debt.
   860  			atomic.Store64(&mheap_.pagesSweptBasis, pagesSwept)
   861  		}
   862  	}
   863  }
   864  
   865  // gcGoalUtilization is the goal CPU utilization for background
   866  // marking as a fraction of GOMAXPROCS.
   867  const gcGoalUtilization = 0.25
   868  
   869  // gcCreditSlack is the amount of scan work credit that can can
   870  // accumulate locally before updating gcController.scanWork and,
   871  // optionally, gcController.bgScanCredit. Lower values give a more
   872  // accurate assist ratio and make it more likely that assists will
   873  // successfully steal background credit. Higher values reduce memory
   874  // contention.
   875  const gcCreditSlack = 2000
   876  
   877  // gcAssistTimeSlack is the nanoseconds of mutator assist time that
   878  // can accumulate on a P before updating gcController.assistTime.
   879  const gcAssistTimeSlack = 5000
   880  
   881  // gcOverAssistWork determines how many extra units of scan work a GC
   882  // assist does when an assist happens. This amortizes the cost of an
   883  // assist by pre-paying for this many bytes of future allocations.
   884  const gcOverAssistWork = 64 << 10
   885  
   886  var work struct {
   887  	full  lfstack                  // lock-free list of full blocks workbuf
   888  	empty lfstack                  // lock-free list of empty blocks workbuf
   889  	pad0  [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait
   890  
   891  	wbufSpans struct {
   892  		lock mutex
   893  		// free is a list of spans dedicated to workbufs, but
   894  		// that don't currently contain any workbufs.
   895  		free mSpanList
   896  		// busy is a list of all spans containing workbufs on
   897  		// one of the workbuf lists.
   898  		busy mSpanList
   899  	}
   900  
   901  	// Restore 64-bit alignment on 32-bit.
   902  	_ uint32
   903  
   904  	// bytesMarked is the number of bytes marked this cycle. This
   905  	// includes bytes blackened in scanned objects, noscan objects
   906  	// that go straight to black, and permagrey objects scanned by
   907  	// markroot during the concurrent scan phase. This is updated
   908  	// atomically during the cycle. Updates may be batched
   909  	// arbitrarily, since the value is only read at the end of the
   910  	// cycle.
   911  	//
   912  	// Because of benign races during marking, this number may not
   913  	// be the exact number of marked bytes, but it should be very
   914  	// close.
   915  	//
   916  	// Put this field here because it needs 64-bit atomic access
   917  	// (and thus 8-byte alignment even on 32-bit architectures).
   918  	bytesMarked uint64
   919  
   920  	markrootNext uint32 // next markroot job
   921  	markrootJobs uint32 // number of markroot jobs
   922  
   923  	nproc   uint32
   924  	tstart  int64
   925  	nwait   uint32
   926  	ndone   uint32
   927  	alldone note
   928  
   929  	// helperDrainBlock indicates that GC mark termination helpers
   930  	// should pass gcDrainBlock to gcDrain to block in the
   931  	// getfull() barrier. Otherwise, they should pass gcDrainNoBlock.
   932  	//
   933  	// TODO: This is a temporary fallback to work around races
   934  	// that cause early mark termination.
   935  	helperDrainBlock bool
   936  
   937  	// Number of roots of various root types. Set by gcMarkRootPrepare.
   938  	nFlushCacheRoots                               int
   939  	nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
   940  
   941  	// markrootDone indicates that roots have been marked at least
   942  	// once during the current GC cycle. This is checked by root
   943  	// marking operations that have to happen only during the
   944  	// first root marking pass, whether that's during the
   945  	// concurrent mark phase in current GC or mark termination in
   946  	// STW GC.
   947  	markrootDone bool
   948  
   949  	// Each type of GC state transition is protected by a lock.
   950  	// Since multiple threads can simultaneously detect the state
   951  	// transition condition, any thread that detects a transition
   952  	// condition must acquire the appropriate transition lock,
   953  	// re-check the transition condition and return if it no
   954  	// longer holds or perform the transition if it does.
   955  	// Likewise, any transition must invalidate the transition
   956  	// condition before releasing the lock. This ensures that each
   957  	// transition is performed by exactly one thread and threads
   958  	// that need the transition to happen block until it has
   959  	// happened.
   960  	//
   961  	// startSema protects the transition from "off" to mark or
   962  	// mark termination.
   963  	startSema uint32
   964  	// markDoneSema protects transitions from mark 1 to mark 2 and
   965  	// from mark 2 to mark termination.
   966  	markDoneSema uint32
   967  
   968  	bgMarkReady note   // signal background mark worker has started
   969  	bgMarkDone  uint32 // cas to 1 when at a background mark completion point
   970  	// Background mark completion signaling
   971  
   972  	// mode is the concurrency mode of the current GC cycle.
   973  	mode gcMode
   974  
   975  	// userForced indicates the current GC cycle was forced by an
   976  	// explicit user call.
   977  	userForced bool
   978  
   979  	// totaltime is the CPU nanoseconds spent in GC since the
   980  	// program started if debug.gctrace > 0.
   981  	totaltime int64
   982  
   983  	// initialHeapLive is the value of memstats.heap_live at the
   984  	// beginning of this GC cycle.
   985  	initialHeapLive uint64
   986  
   987  	// assistQueue is a queue of assists that are blocked because
   988  	// there was neither enough credit to steal or enough work to
   989  	// do.
   990  	assistQueue struct {
   991  		lock       mutex
   992  		head, tail guintptr
   993  	}
   994  
   995  	// sweepWaiters is a list of blocked goroutines to wake when
   996  	// we transition from mark termination to sweep.
   997  	sweepWaiters struct {
   998  		lock mutex
   999  		head guintptr
  1000  	}
  1001  
  1002  	// cycles is the number of completed GC cycles, where a GC
  1003  	// cycle is sweep termination, mark, mark termination, and
  1004  	// sweep. This differs from memstats.numgc, which is
  1005  	// incremented at mark termination.
  1006  	cycles uint32
  1007  
  1008  	// Timing/utilization stats for this cycle.
  1009  	stwprocs, maxprocs                 int32
  1010  	tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start
  1011  
  1012  	pauseNS    int64 // total STW time this cycle
  1013  	pauseStart int64 // nanotime() of last STW
  1014  
  1015  	// debug.gctrace heap sizes for this cycle.
  1016  	heap0, heap1, heap2, heapGoal uint64
  1017  }
  1018  
  1019  // GC runs a garbage collection and blocks the caller until the
  1020  // garbage collection is complete. It may also block the entire
  1021  // program.
  1022  func GC() {
  1023  	// We consider a cycle to be: sweep termination, mark, mark
  1024  	// termination, and sweep. This function shouldn't return
  1025  	// until a full cycle has been completed, from beginning to
  1026  	// end. Hence, we always want to finish up the current cycle
  1027  	// and start a new one. That means:
  1028  	//
  1029  	// 1. In sweep termination, mark, or mark termination of cycle
  1030  	// N, wait until mark termination N completes and transitions
  1031  	// to sweep N.
  1032  	//
  1033  	// 2. In sweep N, help with sweep N.
  1034  	//
  1035  	// At this point we can begin a full cycle N+1.
  1036  	//
  1037  	// 3. Trigger cycle N+1 by starting sweep termination N+1.
  1038  	//
  1039  	// 4. Wait for mark termination N+1 to complete.
  1040  	//
  1041  	// 5. Help with sweep N+1 until it's done.
  1042  	//
  1043  	// This all has to be written to deal with the fact that the
  1044  	// GC may move ahead on its own. For example, when we block
  1045  	// until mark termination N, we may wake up in cycle N+2.
  1046  
  1047  	gp := getg()
  1048  
  1049  	// Prevent the GC phase or cycle count from changing.
  1050  	lock(&work.sweepWaiters.lock)
  1051  	n := atomic.Load(&work.cycles)
  1052  	if gcphase == _GCmark {
  1053  		// Wait until sweep termination, mark, and mark
  1054  		// termination of cycle N complete.
  1055  		gp.schedlink = work.sweepWaiters.head
  1056  		work.sweepWaiters.head.set(gp)
  1057  		goparkunlock(&work.sweepWaiters.lock, "wait for GC cycle", traceEvGoBlock, 1)
  1058  	} else {
  1059  		// We're in sweep N already.
  1060  		unlock(&work.sweepWaiters.lock)
  1061  	}
  1062  
  1063  	// We're now in sweep N or later. Trigger GC cycle N+1, which
  1064  	// will first finish sweep N if necessary and then enter sweep
  1065  	// termination N+1.
  1066  	gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerCycle, n: n + 1})
  1067  
  1068  	// Wait for mark termination N+1 to complete.
  1069  	lock(&work.sweepWaiters.lock)
  1070  	if gcphase == _GCmark && atomic.Load(&work.cycles) == n+1 {
  1071  		gp.schedlink = work.sweepWaiters.head
  1072  		work.sweepWaiters.head.set(gp)
  1073  		goparkunlock(&work.sweepWaiters.lock, "wait for GC cycle", traceEvGoBlock, 1)
  1074  	} else {
  1075  		unlock(&work.sweepWaiters.lock)
  1076  	}
  1077  
  1078  	// Finish sweep N+1 before returning. We do this both to
  1079  	// complete the cycle and because runtime.GC() is often used
  1080  	// as part of tests and benchmarks to get the system into a
  1081  	// relatively stable and isolated state.
  1082  	for atomic.Load(&work.cycles) == n+1 && gosweepone() != ^uintptr(0) {
  1083  		sweep.nbgsweep++
  1084  		Gosched()
  1085  	}
  1086  
  1087  	// Callers may assume that the heap profile reflects the
  1088  	// just-completed cycle when this returns (historically this
  1089  	// happened because this was a STW GC), but right now the
  1090  	// profile still reflects mark termination N, not N+1.
  1091  	//
  1092  	// As soon as all of the sweep frees from cycle N+1 are done,
  1093  	// we can go ahead and publish the heap profile.
  1094  	//
  1095  	// First, wait for sweeping to finish. (We know there are no
  1096  	// more spans on the sweep queue, but we may be concurrently
  1097  	// sweeping spans, so we have to wait.)
  1098  	for atomic.Load(&work.cycles) == n+1 && atomic.Load(&mheap_.sweepers) != 0 {
  1099  		Gosched()
  1100  	}
  1101  
  1102  	// Now we're really done with sweeping, so we can publish the
  1103  	// stable heap profile. Only do this if we haven't already hit
  1104  	// another mark termination.
  1105  	mp := acquirem()
  1106  	cycle := atomic.Load(&work.cycles)
  1107  	if cycle == n+1 || (gcphase == _GCmark && cycle == n+2) {
  1108  		mProf_PostSweep()
  1109  	}
  1110  	releasem(mp)
  1111  }
  1112  
  1113  // gcMode indicates how concurrent a GC cycle should be.
  1114  type gcMode int
  1115  
  1116  const (
  1117  	gcBackgroundMode gcMode = iota // concurrent GC and sweep
  1118  	gcForceMode                    // stop-the-world GC now, concurrent sweep
  1119  	gcForceBlockMode               // stop-the-world GC now and STW sweep (forced by user)
  1120  )
  1121  
  1122  // A gcTrigger is a predicate for starting a GC cycle. Specifically,
  1123  // it is an exit condition for the _GCoff phase.
  1124  type gcTrigger struct {
  1125  	kind gcTriggerKind
  1126  	now  int64  // gcTriggerTime: current time
  1127  	n    uint32 // gcTriggerCycle: cycle number to start
  1128  }
  1129  
  1130  type gcTriggerKind int
  1131  
  1132  const (
  1133  	// gcTriggerAlways indicates that a cycle should be started
  1134  	// unconditionally, even if GOGC is off or we're in a cycle
  1135  	// right now. This cannot be consolidated with other cycles.
  1136  	gcTriggerAlways gcTriggerKind = iota
  1137  
  1138  	// gcTriggerHeap indicates that a cycle should be started when
  1139  	// the heap size reaches the trigger heap size computed by the
  1140  	// controller.
  1141  	gcTriggerHeap
  1142  
  1143  	// gcTriggerTime indicates that a cycle should be started when
  1144  	// it's been more than forcegcperiod nanoseconds since the
  1145  	// previous GC cycle.
  1146  	gcTriggerTime
  1147  
  1148  	// gcTriggerCycle indicates that a cycle should be started if
  1149  	// we have not yet started cycle number gcTrigger.n (relative
  1150  	// to work.cycles).
  1151  	gcTriggerCycle
  1152  )
  1153  
  1154  // test returns true if the trigger condition is satisfied, meaning
  1155  // that the exit condition for the _GCoff phase has been met. The exit
  1156  // condition should be tested when allocating.
  1157  func (t gcTrigger) test() bool {
  1158  	if !memstats.enablegc || panicking != 0 {
  1159  		return false
  1160  	}
  1161  	if t.kind == gcTriggerAlways {
  1162  		return true
  1163  	}
  1164  	if gcphase != _GCoff || gcpercent < 0 {
  1165  		return false
  1166  	}
  1167  	switch t.kind {
  1168  	case gcTriggerHeap:
  1169  		// Non-atomic access to heap_live for performance. If
  1170  		// we are going to trigger on this, this thread just
  1171  		// atomically wrote heap_live anyway and we'll see our
  1172  		// own write.
  1173  		return memstats.heap_live >= memstats.gc_trigger
  1174  	case gcTriggerTime:
  1175  		lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
  1176  		return lastgc != 0 && t.now-lastgc > forcegcperiod
  1177  	case gcTriggerCycle:
  1178  		// t.n > work.cycles, but accounting for wraparound.
  1179  		return int32(t.n-work.cycles) > 0
  1180  	}
  1181  	return true
  1182  }
  1183  
  1184  // gcStart transitions the GC from _GCoff to _GCmark (if
  1185  // !mode.stwMark) or _GCmarktermination (if mode.stwMark) by
  1186  // performing sweep termination and GC initialization.
  1187  //
  1188  // This may return without performing this transition in some cases,
  1189  // such as when called on a system stack or with locks held.
  1190  func gcStart(mode gcMode, trigger gcTrigger) {
  1191  	// Since this is called from malloc and malloc is called in
  1192  	// the guts of a number of libraries that might be holding
  1193  	// locks, don't attempt to start GC in non-preemptible or
  1194  	// potentially unstable situations.
  1195  	mp := acquirem()
  1196  	if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" {
  1197  		releasem(mp)
  1198  		return
  1199  	}
  1200  	releasem(mp)
  1201  	mp = nil
  1202  
  1203  	// Pick up the remaining unswept/not being swept spans concurrently
  1204  	//
  1205  	// This shouldn't happen if we're being invoked in background
  1206  	// mode since proportional sweep should have just finished
  1207  	// sweeping everything, but rounding errors, etc, may leave a
  1208  	// few spans unswept. In forced mode, this is necessary since
  1209  	// GC can be forced at any point in the sweeping cycle.
  1210  	//
  1211  	// We check the transition condition continuously here in case
  1212  	// this G gets delayed in to the next GC cycle.
  1213  	for trigger.test() && gosweepone() != ^uintptr(0) {
  1214  		sweep.nbgsweep++
  1215  	}
  1216  
  1217  	// Perform GC initialization and the sweep termination
  1218  	// transition.
  1219  	semacquire(&work.startSema)
  1220  	// Re-check transition condition under transition lock.
  1221  	if !trigger.test() {
  1222  		semrelease(&work.startSema)
  1223  		return
  1224  	}
  1225  
  1226  	// For stats, check if this GC was forced by the user.
  1227  	work.userForced = trigger.kind == gcTriggerAlways || trigger.kind == gcTriggerCycle
  1228  
  1229  	// In gcstoptheworld debug mode, upgrade the mode accordingly.
  1230  	// We do this after re-checking the transition condition so
  1231  	// that multiple goroutines that detect the heap trigger don't
  1232  	// start multiple STW GCs.
  1233  	if mode == gcBackgroundMode {
  1234  		if debug.gcstoptheworld == 1 {
  1235  			mode = gcForceMode
  1236  		} else if debug.gcstoptheworld == 2 {
  1237  			mode = gcForceBlockMode
  1238  		}
  1239  	}
  1240  
  1241  	// Ok, we're doing it!  Stop everybody else
  1242  	semacquire(&worldsema)
  1243  
  1244  	if trace.enabled {
  1245  		traceGCStart()
  1246  	}
  1247  
  1248  	if mode == gcBackgroundMode {
  1249  		gcBgMarkStartWorkers()
  1250  	}
  1251  
  1252  	gcResetMarkState()
  1253  
  1254  	now := nanotime()
  1255  	work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs
  1256  	work.tSweepTerm = now
  1257  	work.heap0 = atomic.Load64(&memstats.heap_live)
  1258  	work.pauseNS = 0
  1259  	work.mode = mode
  1260  
  1261  	work.pauseStart = now
  1262  	systemstack(stopTheWorldWithSema)
  1263  	// Finish sweep before we start concurrent scan.
  1264  	systemstack(func() {
  1265  		finishsweep_m()
  1266  	})
  1267  	// clearpools before we start the GC. If we wait they memory will not be
  1268  	// reclaimed until the next GC cycle.
  1269  	clearpools()
  1270  
  1271  	work.cycles++
  1272  	if mode == gcBackgroundMode { // Do as much work concurrently as possible
  1273  		gcController.startCycle()
  1274  		work.heapGoal = memstats.next_gc
  1275  
  1276  		// Enter concurrent mark phase and enable
  1277  		// write barriers.
  1278  		//
  1279  		// Because the world is stopped, all Ps will
  1280  		// observe that write barriers are enabled by
  1281  		// the time we start the world and begin
  1282  		// scanning.
  1283  		//
  1284  		// Write barriers must be enabled before assists are
  1285  		// enabled because they must be enabled before
  1286  		// any non-leaf heap objects are marked. Since
  1287  		// allocations are blocked until assists can
  1288  		// happen, we want enable assists as early as
  1289  		// possible.
  1290  		setGCPhase(_GCmark)
  1291  
  1292  		gcBgMarkPrepare() // Must happen before assist enable.
  1293  		gcMarkRootPrepare()
  1294  
  1295  		// Mark all active tinyalloc blocks. Since we're
  1296  		// allocating from these, they need to be black like
  1297  		// other allocations. The alternative is to blacken
  1298  		// the tiny block on every allocation from it, which
  1299  		// would slow down the tiny allocator.
  1300  		gcMarkTinyAllocs()
  1301  
  1302  		// At this point all Ps have enabled the write
  1303  		// barrier, thus maintaining the no white to
  1304  		// black invariant. Enable mutator assists to
  1305  		// put back-pressure on fast allocating
  1306  		// mutators.
  1307  		atomic.Store(&gcBlackenEnabled, 1)
  1308  
  1309  		// Assists and workers can start the moment we start
  1310  		// the world.
  1311  		gcController.markStartTime = now
  1312  
  1313  		// Concurrent mark.
  1314  		systemstack(startTheWorldWithSema)
  1315  		now = nanotime()
  1316  		work.pauseNS += now - work.pauseStart
  1317  		work.tMark = now
  1318  	} else {
  1319  		t := nanotime()
  1320  		work.tMark, work.tMarkTerm = t, t
  1321  		work.heapGoal = work.heap0
  1322  
  1323  		// Perform mark termination. This will restart the world.
  1324  		gcMarkTermination(memstats.triggerRatio)
  1325  	}
  1326  
  1327  	semrelease(&work.startSema)
  1328  }
  1329  
  1330  // gcMarkDone transitions the GC from mark 1 to mark 2 and from mark 2
  1331  // to mark termination.
  1332  //
  1333  // This should be called when all mark work has been drained. In mark
  1334  // 1, this includes all root marking jobs, global work buffers, and
  1335  // active work buffers in assists and background workers; however,
  1336  // work may still be cached in per-P work buffers. In mark 2, per-P
  1337  // caches are disabled.
  1338  //
  1339  // The calling context must be preemptible.
  1340  //
  1341  // Note that it is explicitly okay to have write barriers in this
  1342  // function because completion of concurrent mark is best-effort
  1343  // anyway. Any work created by write barriers here will be cleaned up
  1344  // by mark termination.
  1345  func gcMarkDone() {
  1346  top:
  1347  	semacquire(&work.markDoneSema)
  1348  
  1349  	// Re-check transition condition under transition lock.
  1350  	if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) {
  1351  		semrelease(&work.markDoneSema)
  1352  		return
  1353  	}
  1354  
  1355  	// Disallow starting new workers so that any remaining workers
  1356  	// in the current mark phase will drain out.
  1357  	//
  1358  	// TODO(austin): Should dedicated workers keep an eye on this
  1359  	// and exit gcDrain promptly?
  1360  	atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff)
  1361  	atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff)
  1362  
  1363  	if !gcBlackenPromptly {
  1364  		// Transition from mark 1 to mark 2.
  1365  		//
  1366  		// The global work list is empty, but there can still be work
  1367  		// sitting in the per-P work caches.
  1368  		// Flush and disable work caches.
  1369  
  1370  		// Disallow caching workbufs and indicate that we're in mark 2.
  1371  		gcBlackenPromptly = true
  1372  
  1373  		// Prevent completion of mark 2 until we've flushed
  1374  		// cached workbufs.
  1375  		atomic.Xadd(&work.nwait, -1)
  1376  
  1377  		// GC is set up for mark 2. Let Gs blocked on the
  1378  		// transition lock go while we flush caches.
  1379  		semrelease(&work.markDoneSema)
  1380  
  1381  		systemstack(func() {
  1382  			// Flush all currently cached workbufs and
  1383  			// ensure all Ps see gcBlackenPromptly. This
  1384  			// also blocks until any remaining mark 1
  1385  			// workers have exited their loop so we can
  1386  			// start new mark 2 workers.
  1387  			forEachP(func(_p_ *p) {
  1388  				_p_.gcw.dispose()
  1389  			})
  1390  		})
  1391  
  1392  		// Check that roots are marked. We should be able to
  1393  		// do this before the forEachP, but based on issue
  1394  		// #16083 there may be a (harmless) race where we can
  1395  		// enter mark 2 while some workers are still scanning
  1396  		// stacks. The forEachP ensures these scans are done.
  1397  		//
  1398  		// TODO(austin): Figure out the race and fix this
  1399  		// properly.
  1400  		gcMarkRootCheck()
  1401  
  1402  		// Now we can start up mark 2 workers.
  1403  		atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff)
  1404  		atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff)
  1405  
  1406  		incnwait := atomic.Xadd(&work.nwait, +1)
  1407  		if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
  1408  			// This loop will make progress because
  1409  			// gcBlackenPromptly is now true, so it won't
  1410  			// take this same "if" branch.
  1411  			goto top
  1412  		}
  1413  	} else {
  1414  		// Transition to mark termination.
  1415  		now := nanotime()
  1416  		work.tMarkTerm = now
  1417  		work.pauseStart = now
  1418  		getg().m.preemptoff = "gcing"
  1419  		systemstack(stopTheWorldWithSema)
  1420  		// The gcphase is _GCmark, it will transition to _GCmarktermination
  1421  		// below. The important thing is that the wb remains active until
  1422  		// all marking is complete. This includes writes made by the GC.
  1423  
  1424  		// Record that one root marking pass has completed.
  1425  		work.markrootDone = true
  1426  
  1427  		// Disable assists and background workers. We must do
  1428  		// this before waking blocked assists.
  1429  		atomic.Store(&gcBlackenEnabled, 0)
  1430  
  1431  		// Wake all blocked assists. These will run when we
  1432  		// start the world again.
  1433  		gcWakeAllAssists()
  1434  
  1435  		// Likewise, release the transition lock. Blocked
  1436  		// workers and assists will run when we start the
  1437  		// world again.
  1438  		semrelease(&work.markDoneSema)
  1439  
  1440  		// endCycle depends on all gcWork cache stats being
  1441  		// flushed. This is ensured by mark 2.
  1442  		nextTriggerRatio := gcController.endCycle()
  1443  
  1444  		// Perform mark termination. This will restart the world.
  1445  		gcMarkTermination(nextTriggerRatio)
  1446  	}
  1447  }
  1448  
  1449  func gcMarkTermination(nextTriggerRatio float64) {
  1450  	// World is stopped.
  1451  	// Start marktermination which includes enabling the write barrier.
  1452  	atomic.Store(&gcBlackenEnabled, 0)
  1453  	gcBlackenPromptly = false
  1454  	setGCPhase(_GCmarktermination)
  1455  
  1456  	work.heap1 = memstats.heap_live
  1457  	startTime := nanotime()
  1458  
  1459  	mp := acquirem()
  1460  	mp.preemptoff = "gcing"
  1461  	_g_ := getg()
  1462  	_g_.m.traceback = 2
  1463  	gp := _g_.m.curg
  1464  	casgstatus(gp, _Grunning, _Gwaiting)
  1465  	gp.waitreason = "garbage collection"
  1466  
  1467  	// Run gc on the g0 stack. We do this so that the g stack
  1468  	// we're currently running on will no longer change. Cuts
  1469  	// the root set down a bit (g0 stacks are not scanned, and
  1470  	// we don't need to scan gc's internal state).  We also
  1471  	// need to switch to g0 so we can shrink the stack.
  1472  	systemstack(func() {
  1473  		gcMark(startTime)
  1474  		// Must return immediately.
  1475  		// The outer function's stack may have moved
  1476  		// during gcMark (it shrinks stacks, including the
  1477  		// outer function's stack), so we must not refer
  1478  		// to any of its variables. Return back to the
  1479  		// non-system stack to pick up the new addresses
  1480  		// before continuing.
  1481  	})
  1482  
  1483  	systemstack(func() {
  1484  		work.heap2 = work.bytesMarked
  1485  		if debug.gccheckmark > 0 {
  1486  			// Run a full stop-the-world mark using checkmark bits,
  1487  			// to check that we didn't forget to mark anything during
  1488  			// the concurrent mark process.
  1489  			gcResetMarkState()
  1490  			initCheckmarks()
  1491  			gcMark(startTime)
  1492  			clearCheckmarks()
  1493  		}
  1494  
  1495  		// marking is complete so we can turn the write barrier off
  1496  		setGCPhase(_GCoff)
  1497  		gcSweep(work.mode)
  1498  
  1499  		if debug.gctrace > 1 {
  1500  			startTime = nanotime()
  1501  			// The g stacks have been scanned so
  1502  			// they have gcscanvalid==true and gcworkdone==true.
  1503  			// Reset these so that all stacks will be rescanned.
  1504  			gcResetMarkState()
  1505  			finishsweep_m()
  1506  
  1507  			// Still in STW but gcphase is _GCoff, reset to _GCmarktermination
  1508  			// At this point all objects will be found during the gcMark which
  1509  			// does a complete STW mark and object scan.
  1510  			setGCPhase(_GCmarktermination)
  1511  			gcMark(startTime)
  1512  			setGCPhase(_GCoff) // marking is done, turn off wb.
  1513  			gcSweep(work.mode)
  1514  		}
  1515  	})
  1516  
  1517  	_g_.m.traceback = 0
  1518  	casgstatus(gp, _Gwaiting, _Grunning)
  1519  
  1520  	if trace.enabled {
  1521  		traceGCDone()
  1522  	}
  1523  
  1524  	// all done
  1525  	mp.preemptoff = ""
  1526  
  1527  	if gcphase != _GCoff {
  1528  		throw("gc done but gcphase != _GCoff")
  1529  	}
  1530  
  1531  	// Update GC trigger and pacing for the next cycle.
  1532  	gcSetTriggerRatio(nextTriggerRatio)
  1533  
  1534  	// Update timing memstats
  1535  	now := nanotime()
  1536  	sec, nsec, _ := time_now()
  1537  	unixNow := sec*1e9 + int64(nsec)
  1538  	work.pauseNS += now - work.pauseStart
  1539  	work.tEnd = now
  1540  	atomic.Store64(&memstats.last_gc_unix, uint64(unixNow)) // must be Unix time to make sense to user
  1541  	atomic.Store64(&memstats.last_gc_nanotime, uint64(now)) // monotonic time for us
  1542  	memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS)
  1543  	memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow)
  1544  	memstats.pause_total_ns += uint64(work.pauseNS)
  1545  
  1546  	// Update work.totaltime.
  1547  	sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm)
  1548  	// We report idle marking time below, but omit it from the
  1549  	// overall utilization here since it's "free".
  1550  	markCpu := gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime
  1551  	markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm)
  1552  	cycleCpu := sweepTermCpu + markCpu + markTermCpu
  1553  	work.totaltime += cycleCpu
  1554  
  1555  	// Compute overall GC CPU utilization.
  1556  	totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs)
  1557  	memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu)
  1558  
  1559  	// Reset sweep state.
  1560  	sweep.nbgsweep = 0
  1561  	sweep.npausesweep = 0
  1562  
  1563  	if work.userForced {
  1564  		memstats.numforcedgc++
  1565  	}
  1566  
  1567  	// Bump GC cycle count and wake goroutines waiting on sweep.
  1568  	lock(&work.sweepWaiters.lock)
  1569  	memstats.numgc++
  1570  	injectglist(work.sweepWaiters.head.ptr())
  1571  	work.sweepWaiters.head = 0
  1572  	unlock(&work.sweepWaiters.lock)
  1573  
  1574  	// Finish the current heap profiling cycle and start a new
  1575  	// heap profiling cycle. We do this before starting the world
  1576  	// so events don't leak into the wrong cycle.
  1577  	mProf_NextCycle()
  1578  
  1579  	systemstack(startTheWorldWithSema)
  1580  
  1581  	// Flush the heap profile so we can start a new cycle next GC.
  1582  	// This is relatively expensive, so we don't do it with the
  1583  	// world stopped.
  1584  	mProf_Flush()
  1585  
  1586  	// Prepare workbufs for freeing by the sweeper. We do this
  1587  	// asynchronously because it can take non-trivial time.
  1588  	prepareFreeWorkbufs()
  1589  
  1590  	// Free stack spans. This must be done between GC cycles.
  1591  	systemstack(freeStackSpans)
  1592  
  1593  	// Print gctrace before dropping worldsema. As soon as we drop
  1594  	// worldsema another cycle could start and smash the stats
  1595  	// we're trying to print.
  1596  	if debug.gctrace > 0 {
  1597  		util := int(memstats.gc_cpu_fraction * 100)
  1598  
  1599  		var sbuf [24]byte
  1600  		printlock()
  1601  		print("gc ", memstats.numgc,
  1602  			" @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ",
  1603  			util, "%: ")
  1604  		prev := work.tSweepTerm
  1605  		for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} {
  1606  			if i != 0 {
  1607  				print("+")
  1608  			}
  1609  			print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev))))
  1610  			prev = ns
  1611  		}
  1612  		print(" ms clock, ")
  1613  		for i, ns := range []int64{sweepTermCpu, gcController.assistTime, gcController.dedicatedMarkTime + gcController.fractionalMarkTime, gcController.idleMarkTime, markTermCpu} {
  1614  			if i == 2 || i == 3 {
  1615  				// Separate mark time components with /.
  1616  				print("/")
  1617  			} else if i != 0 {
  1618  				print("+")
  1619  			}
  1620  			print(string(fmtNSAsMS(sbuf[:], uint64(ns))))
  1621  		}
  1622  		print(" ms cpu, ",
  1623  			work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ",
  1624  			work.heapGoal>>20, " MB goal, ",
  1625  			work.maxprocs, " P")
  1626  		if work.userForced {
  1627  			print(" (forced)")
  1628  		}
  1629  		print("\n")
  1630  		printunlock()
  1631  	}
  1632  
  1633  	semrelease(&worldsema)
  1634  	// Careful: another GC cycle may start now.
  1635  
  1636  	releasem(mp)
  1637  	mp = nil
  1638  
  1639  	// now that gc is done, kick off finalizer thread if needed
  1640  	if !concurrentSweep {
  1641  		// give the queued finalizers, if any, a chance to run
  1642  		Gosched()
  1643  	}
  1644  }
  1645  
  1646  // gcBgMarkStartWorkers prepares background mark worker goroutines.
  1647  // These goroutines will not run until the mark phase, but they must
  1648  // be started while the work is not stopped and from a regular G
  1649  // stack. The caller must hold worldsema.
  1650  func gcBgMarkStartWorkers() {
  1651  	// Background marking is performed by per-P G's. Ensure that
  1652  	// each P has a background GC G.
  1653  	for _, p := range &allp {
  1654  		if p == nil || p.status == _Pdead {
  1655  			break
  1656  		}
  1657  		if p.gcBgMarkWorker == 0 {
  1658  			go gcBgMarkWorker(p)
  1659  			notetsleepg(&work.bgMarkReady, -1)
  1660  			noteclear(&work.bgMarkReady)
  1661  		}
  1662  	}
  1663  }
  1664  
  1665  // gcBgMarkPrepare sets up state for background marking.
  1666  // Mutator assists must not yet be enabled.
  1667  func gcBgMarkPrepare() {
  1668  	// Background marking will stop when the work queues are empty
  1669  	// and there are no more workers (note that, since this is
  1670  	// concurrent, this may be a transient state, but mark
  1671  	// termination will clean it up). Between background workers
  1672  	// and assists, we don't really know how many workers there
  1673  	// will be, so we pretend to have an arbitrarily large number
  1674  	// of workers, almost all of which are "waiting". While a
  1675  	// worker is working it decrements nwait. If nproc == nwait,
  1676  	// there are no workers.
  1677  	work.nproc = ^uint32(0)
  1678  	work.nwait = ^uint32(0)
  1679  }
  1680  
  1681  func gcBgMarkWorker(_p_ *p) {
  1682  	gp := getg()
  1683  
  1684  	type parkInfo struct {
  1685  		m      muintptr // Release this m on park.
  1686  		attach puintptr // If non-nil, attach to this p on park.
  1687  	}
  1688  	// We pass park to a gopark unlock function, so it can't be on
  1689  	// the stack (see gopark). Prevent deadlock from recursively
  1690  	// starting GC by disabling preemption.
  1691  	gp.m.preemptoff = "GC worker init"
  1692  	park := new(parkInfo)
  1693  	gp.m.preemptoff = ""
  1694  
  1695  	park.m.set(acquirem())
  1696  	park.attach.set(_p_)
  1697  	// Inform gcBgMarkStartWorkers that this worker is ready.
  1698  	// After this point, the background mark worker is scheduled
  1699  	// cooperatively by gcController.findRunnable. Hence, it must
  1700  	// never be preempted, as this would put it into _Grunnable
  1701  	// and put it on a run queue. Instead, when the preempt flag
  1702  	// is set, this puts itself into _Gwaiting to be woken up by
  1703  	// gcController.findRunnable at the appropriate time.
  1704  	notewakeup(&work.bgMarkReady)
  1705  
  1706  	for {
  1707  		// Go to sleep until woken by gcController.findRunnable.
  1708  		// We can't releasem yet since even the call to gopark
  1709  		// may be preempted.
  1710  		gopark(func(g *g, parkp unsafe.Pointer) bool {
  1711  			park := (*parkInfo)(parkp)
  1712  
  1713  			// The worker G is no longer running, so it's
  1714  			// now safe to allow preemption.
  1715  			releasem(park.m.ptr())
  1716  
  1717  			// If the worker isn't attached to its P,
  1718  			// attach now. During initialization and after
  1719  			// a phase change, the worker may have been
  1720  			// running on a different P. As soon as we
  1721  			// attach, the owner P may schedule the
  1722  			// worker, so this must be done after the G is
  1723  			// stopped.
  1724  			if park.attach != 0 {
  1725  				p := park.attach.ptr()
  1726  				park.attach.set(nil)
  1727  				// cas the worker because we may be
  1728  				// racing with a new worker starting
  1729  				// on this P.
  1730  				if !p.gcBgMarkWorker.cas(0, guintptr(unsafe.Pointer(g))) {
  1731  					// The P got a new worker.
  1732  					// Exit this worker.
  1733  					return false
  1734  				}
  1735  			}
  1736  			return true
  1737  		}, unsafe.Pointer(park), "GC worker (idle)", traceEvGoBlock, 0)
  1738  
  1739  		// Loop until the P dies and disassociates this
  1740  		// worker (the P may later be reused, in which case
  1741  		// it will get a new worker) or we failed to associate.
  1742  		if _p_.gcBgMarkWorker.ptr() != gp {
  1743  			break
  1744  		}
  1745  
  1746  		// Disable preemption so we can use the gcw. If the
  1747  		// scheduler wants to preempt us, we'll stop draining,
  1748  		// dispose the gcw, and then preempt.
  1749  		park.m.set(acquirem())
  1750  
  1751  		if gcBlackenEnabled == 0 {
  1752  			throw("gcBgMarkWorker: blackening not enabled")
  1753  		}
  1754  
  1755  		startTime := nanotime()
  1756  
  1757  		decnwait := atomic.Xadd(&work.nwait, -1)
  1758  		if decnwait == work.nproc {
  1759  			println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
  1760  			throw("work.nwait was > work.nproc")
  1761  		}
  1762  
  1763  		systemstack(func() {
  1764  			// Mark our goroutine preemptible so its stack
  1765  			// can be scanned. This lets two mark workers
  1766  			// scan each other (otherwise, they would
  1767  			// deadlock). We must not modify anything on
  1768  			// the G stack. However, stack shrinking is
  1769  			// disabled for mark workers, so it is safe to
  1770  			// read from the G stack.
  1771  			casgstatus(gp, _Grunning, _Gwaiting)
  1772  			switch _p_.gcMarkWorkerMode {
  1773  			default:
  1774  				throw("gcBgMarkWorker: unexpected gcMarkWorkerMode")
  1775  			case gcMarkWorkerDedicatedMode:
  1776  				gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit)
  1777  			case gcMarkWorkerFractionalMode:
  1778  				gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit)
  1779  			case gcMarkWorkerIdleMode:
  1780  				gcDrain(&_p_.gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit)
  1781  			}
  1782  			casgstatus(gp, _Gwaiting, _Grunning)
  1783  		})
  1784  
  1785  		// If we are nearing the end of mark, dispose
  1786  		// of the cache promptly. We must do this
  1787  		// before signaling that we're no longer
  1788  		// working so that other workers can't observe
  1789  		// no workers and no work while we have this
  1790  		// cached, and before we compute done.
  1791  		if gcBlackenPromptly {
  1792  			_p_.gcw.dispose()
  1793  		}
  1794  
  1795  		// Account for time.
  1796  		duration := nanotime() - startTime
  1797  		switch _p_.gcMarkWorkerMode {
  1798  		case gcMarkWorkerDedicatedMode:
  1799  			atomic.Xaddint64(&gcController.dedicatedMarkTime, duration)
  1800  			atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1)
  1801  		case gcMarkWorkerFractionalMode:
  1802  			atomic.Xaddint64(&gcController.fractionalMarkTime, duration)
  1803  			atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 1)
  1804  		case gcMarkWorkerIdleMode:
  1805  			atomic.Xaddint64(&gcController.idleMarkTime, duration)
  1806  		}
  1807  
  1808  		// Was this the last worker and did we run out
  1809  		// of work?
  1810  		incnwait := atomic.Xadd(&work.nwait, +1)
  1811  		if incnwait > work.nproc {
  1812  			println("runtime: p.gcMarkWorkerMode=", _p_.gcMarkWorkerMode,
  1813  				"work.nwait=", incnwait, "work.nproc=", work.nproc)
  1814  			throw("work.nwait > work.nproc")
  1815  		}
  1816  
  1817  		// If this worker reached a background mark completion
  1818  		// point, signal the main GC goroutine.
  1819  		if incnwait == work.nproc && !gcMarkWorkAvailable(nil) {
  1820  			// Make this G preemptible and disassociate it
  1821  			// as the worker for this P so
  1822  			// findRunnableGCWorker doesn't try to
  1823  			// schedule it.
  1824  			_p_.gcBgMarkWorker.set(nil)
  1825  			releasem(park.m.ptr())
  1826  
  1827  			gcMarkDone()
  1828  
  1829  			// Disable preemption and prepare to reattach
  1830  			// to the P.
  1831  			//
  1832  			// We may be running on a different P at this
  1833  			// point, so we can't reattach until this G is
  1834  			// parked.
  1835  			park.m.set(acquirem())
  1836  			park.attach.set(_p_)
  1837  		}
  1838  	}
  1839  }
  1840  
  1841  // gcMarkWorkAvailable returns true if executing a mark worker
  1842  // on p is potentially useful. p may be nil, in which case it only
  1843  // checks the global sources of work.
  1844  func gcMarkWorkAvailable(p *p) bool {
  1845  	if p != nil && !p.gcw.empty() {
  1846  		return true
  1847  	}
  1848  	if !work.full.empty() {
  1849  		return true // global work available
  1850  	}
  1851  	if work.markrootNext < work.markrootJobs {
  1852  		return true // root scan work available
  1853  	}
  1854  	return false
  1855  }
  1856  
  1857  // gcMark runs the mark (or, for concurrent GC, mark termination)
  1858  // All gcWork caches must be empty.
  1859  // STW is in effect at this point.
  1860  //TODO go:nowritebarrier
  1861  func gcMark(start_time int64) {
  1862  	if debug.allocfreetrace > 0 {
  1863  		tracegc()
  1864  	}
  1865  
  1866  	if gcphase != _GCmarktermination {
  1867  		throw("in gcMark expecting to see gcphase as _GCmarktermination")
  1868  	}
  1869  	work.tstart = start_time
  1870  
  1871  	// Queue root marking jobs.
  1872  	gcMarkRootPrepare()
  1873  
  1874  	work.nwait = 0
  1875  	work.ndone = 0
  1876  	work.nproc = uint32(gcprocs())
  1877  
  1878  	if work.full == 0 && work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots == 0 {
  1879  		// There's no work on the work queue and no root jobs
  1880  		// that can produce work, so don't bother entering the
  1881  		// getfull() barrier.
  1882  		//
  1883  		// This will be the situation the vast majority of the
  1884  		// time after concurrent mark. However, we still need
  1885  		// a fallback for STW GC and because there are some
  1886  		// known races that occasionally leave work around for
  1887  		// mark termination.
  1888  		//
  1889  		// We're still hedging our bets here: if we do
  1890  		// accidentally produce some work, we'll still process
  1891  		// it, just not necessarily in parallel.
  1892  		//
  1893  		// TODO(austin): Fix the races and and remove
  1894  		// work draining from mark termination so we don't
  1895  		// need the fallback path.
  1896  		work.helperDrainBlock = false
  1897  	} else {
  1898  		work.helperDrainBlock = true
  1899  	}
  1900  
  1901  	if trace.enabled {
  1902  		traceGCScanStart()
  1903  	}
  1904  
  1905  	if work.nproc > 1 {
  1906  		noteclear(&work.alldone)
  1907  		helpgc(int32(work.nproc))
  1908  	}
  1909  
  1910  	gchelperstart()
  1911  
  1912  	gcw := &getg().m.p.ptr().gcw
  1913  	if work.helperDrainBlock {
  1914  		gcDrain(gcw, gcDrainBlock)
  1915  	} else {
  1916  		gcDrain(gcw, gcDrainNoBlock)
  1917  	}
  1918  	gcw.dispose()
  1919  
  1920  	if debug.gccheckmark > 0 {
  1921  		// This is expensive when there's a large number of
  1922  		// Gs, so only do it if checkmark is also enabled.
  1923  		gcMarkRootCheck()
  1924  	}
  1925  	if work.full != 0 {
  1926  		throw("work.full != 0")
  1927  	}
  1928  
  1929  	if work.nproc > 1 {
  1930  		notesleep(&work.alldone)
  1931  	}
  1932  
  1933  	// Record that at least one root marking pass has completed.
  1934  	work.markrootDone = true
  1935  
  1936  	// Double-check that all gcWork caches are empty. This should
  1937  	// be ensured by mark 2 before we enter mark termination.
  1938  	for i := 0; i < int(gomaxprocs); i++ {
  1939  		gcw := &allp[i].gcw
  1940  		if !gcw.empty() {
  1941  			throw("P has cached GC work at end of mark termination")
  1942  		}
  1943  		if gcw.scanWork != 0 || gcw.bytesMarked != 0 {
  1944  			throw("P has unflushed stats at end of mark termination")
  1945  		}
  1946  	}
  1947  
  1948  	if trace.enabled {
  1949  		traceGCScanDone()
  1950  	}
  1951  
  1952  	cachestats()
  1953  
  1954  	// Update the marked heap stat.
  1955  	memstats.heap_marked = work.bytesMarked
  1956  
  1957  	// Update other GC heap size stats. This must happen after
  1958  	// cachestats (which flushes local statistics to these) and
  1959  	// flushallmcaches (which modifies heap_live).
  1960  	memstats.heap_live = work.bytesMarked
  1961  	memstats.heap_scan = uint64(gcController.scanWork)
  1962  
  1963  	if trace.enabled {
  1964  		traceHeapAlloc()
  1965  	}
  1966  }
  1967  
  1968  func gcSweep(mode gcMode) {
  1969  	if gcphase != _GCoff {
  1970  		throw("gcSweep being done but phase is not GCoff")
  1971  	}
  1972  
  1973  	lock(&mheap_.lock)
  1974  	mheap_.sweepgen += 2
  1975  	mheap_.sweepdone = 0
  1976  	if mheap_.sweepSpans[mheap_.sweepgen/2%2].index != 0 {
  1977  		// We should have drained this list during the last
  1978  		// sweep phase. We certainly need to start this phase
  1979  		// with an empty swept list.
  1980  		throw("non-empty swept list")
  1981  	}
  1982  	mheap_.pagesSwept = 0
  1983  	unlock(&mheap_.lock)
  1984  
  1985  	if !_ConcurrentSweep || mode == gcForceBlockMode {
  1986  		// Special case synchronous sweep.
  1987  		// Record that no proportional sweeping has to happen.
  1988  		lock(&mheap_.lock)
  1989  		mheap_.sweepPagesPerByte = 0
  1990  		unlock(&mheap_.lock)
  1991  		// Sweep all spans eagerly.
  1992  		for sweepone() != ^uintptr(0) {
  1993  			sweep.npausesweep++
  1994  		}
  1995  		// Free workbufs eagerly.
  1996  		prepareFreeWorkbufs()
  1997  		for freeSomeWbufs(false) {
  1998  		}
  1999  		// All "free" events for this mark/sweep cycle have
  2000  		// now happened, so we can make this profile cycle
  2001  		// available immediately.
  2002  		mProf_NextCycle()
  2003  		mProf_Flush()
  2004  		return
  2005  	}
  2006  
  2007  	// Background sweep.
  2008  	lock(&sweep.lock)
  2009  	if sweep.parked {
  2010  		sweep.parked = false
  2011  		ready(sweep.g, 0, true)
  2012  	}
  2013  	unlock(&sweep.lock)
  2014  }
  2015  
  2016  // gcResetMarkState resets global state prior to marking (concurrent
  2017  // or STW) and resets the stack scan state of all Gs.
  2018  //
  2019  // This is safe to do without the world stopped because any Gs created
  2020  // during or after this will start out in the reset state.
  2021  func gcResetMarkState() {
  2022  	// This may be called during a concurrent phase, so make sure
  2023  	// allgs doesn't change.
  2024  	lock(&allglock)
  2025  	for _, gp := range allgs {
  2026  		gp.gcscandone = false  // set to true in gcphasework
  2027  		gp.gcscanvalid = false // stack has not been scanned
  2028  		gp.gcAssistBytes = 0
  2029  	}
  2030  	unlock(&allglock)
  2031  
  2032  	work.bytesMarked = 0
  2033  	work.initialHeapLive = atomic.Load64(&memstats.heap_live)
  2034  	work.markrootDone = false
  2035  }
  2036  
  2037  // Hooks for other packages
  2038  
  2039  var poolcleanup func()
  2040  
  2041  //go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup
  2042  func sync_runtime_registerPoolCleanup(f func()) {
  2043  	poolcleanup = f
  2044  }
  2045  
  2046  func clearpools() {
  2047  	// clear sync.Pools
  2048  	if poolcleanup != nil {
  2049  		poolcleanup()
  2050  	}
  2051  
  2052  	// Clear central sudog cache.
  2053  	// Leave per-P caches alone, they have strictly bounded size.
  2054  	// Disconnect cached list before dropping it on the floor,
  2055  	// so that a dangling ref to one entry does not pin all of them.
  2056  	lock(&sched.sudoglock)
  2057  	var sg, sgnext *sudog
  2058  	for sg = sched.sudogcache; sg != nil; sg = sgnext {
  2059  		sgnext = sg.next
  2060  		sg.next = nil
  2061  	}
  2062  	sched.sudogcache = nil
  2063  	unlock(&sched.sudoglock)
  2064  
  2065  	// Clear central defer pools.
  2066  	// Leave per-P pools alone, they have strictly bounded size.
  2067  	lock(&sched.deferlock)
  2068  	for i := range sched.deferpool {
  2069  		// disconnect cached list before dropping it on the floor,
  2070  		// so that a dangling ref to one entry does not pin all of them.
  2071  		var d, dlink *_defer
  2072  		for d = sched.deferpool[i]; d != nil; d = dlink {
  2073  			dlink = d.link
  2074  			d.link = nil
  2075  		}
  2076  		sched.deferpool[i] = nil
  2077  	}
  2078  	unlock(&sched.deferlock)
  2079  }
  2080  
  2081  // Timing
  2082  
  2083  //go:nowritebarrier
  2084  func gchelper() {
  2085  	_g_ := getg()
  2086  	_g_.m.traceback = 2
  2087  	gchelperstart()
  2088  
  2089  	if trace.enabled {
  2090  		traceGCScanStart()
  2091  	}
  2092  
  2093  	// Parallel mark over GC roots and heap
  2094  	if gcphase == _GCmarktermination {
  2095  		gcw := &_g_.m.p.ptr().gcw
  2096  		if work.helperDrainBlock {
  2097  			gcDrain(gcw, gcDrainBlock) // blocks in getfull
  2098  		} else {
  2099  			gcDrain(gcw, gcDrainNoBlock)
  2100  		}
  2101  		gcw.dispose()
  2102  	}
  2103  
  2104  	if trace.enabled {
  2105  		traceGCScanDone()
  2106  	}
  2107  
  2108  	nproc := atomic.Load(&work.nproc) // work.nproc can change right after we increment work.ndone
  2109  	if atomic.Xadd(&work.ndone, +1) == nproc-1 {
  2110  		notewakeup(&work.alldone)
  2111  	}
  2112  	_g_.m.traceback = 0
  2113  }
  2114  
  2115  func gchelperstart() {
  2116  	_g_ := getg()
  2117  
  2118  	if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
  2119  		throw("gchelperstart: bad m->helpgc")
  2120  	}
  2121  	if _g_ != _g_.m.g0 {
  2122  		throw("gchelper not running on g0 stack")
  2123  	}
  2124  }
  2125  
  2126  // itoaDiv formats val/(10**dec) into buf.
  2127  func itoaDiv(buf []byte, val uint64, dec int) []byte {
  2128  	i := len(buf) - 1
  2129  	idec := i - dec
  2130  	for val >= 10 || i >= idec {
  2131  		buf[i] = byte(val%10 + '0')
  2132  		i--
  2133  		if i == idec {
  2134  			buf[i] = '.'
  2135  			i--
  2136  		}
  2137  		val /= 10
  2138  	}
  2139  	buf[i] = byte(val + '0')
  2140  	return buf[i:]
  2141  }
  2142  
  2143  // fmtNSAsMS nicely formats ns nanoseconds as milliseconds.
  2144  func fmtNSAsMS(buf []byte, ns uint64) []byte {
  2145  	if ns >= 10e6 {
  2146  		// Format as whole milliseconds.
  2147  		return itoaDiv(buf, ns/1e6, 0)
  2148  	}
  2149  	// Format two digits of precision, with at most three decimal places.
  2150  	x := ns / 1e3
  2151  	if x == 0 {
  2152  		buf[0] = '0'
  2153  		return buf[:1]
  2154  	}
  2155  	dec := 3
  2156  	for x >= 100 {
  2157  		x /= 10
  2158  		dec--
  2159  	}
  2160  	return itoaDiv(buf, x, dec)
  2161  }