github.com/c0deoo1/golang1.5@v0.0.0-20220525150107-c87c805d4593/src/runtime/mgc.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // TODO(rsc): The code having to do with the heap bitmap needs very serious cleanup. 6 // It has gotten completely out of control. 7 8 // Garbage collector (GC). 9 // 10 // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple 11 // GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is 12 // non-generational and non-compacting. Allocation is done using size segregated per P allocation 13 // areas to minimize fragmentation while eliminating locks in the common case. 14 // 15 // The algorithm decomposes into several steps. 16 // This is a high level description of the algorithm being used. For an overview of GC a good 17 // place to start is Richard Jones' gchandbook.org. 18 // 19 // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see 20 // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978. 21 // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 22 // 966-975. 23 // For journal quality proofs that these steps are complete, correct, and terminate see 24 // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world. 25 // Concurrency and Computation: Practice and Experience 15(3-5), 2003. 26 // 27 // 0. Set phase = GCscan from GCoff. 28 // 1. Wait for all P's to acknowledge phase change. 29 // At this point all goroutines have passed through a GC safepoint and 30 // know we are in the GCscan phase. 31 // 2. GC scans all goroutine stacks, mark and enqueues all encountered pointers 32 // (marking avoids most duplicate enqueuing but races may produce benign duplication). 33 // Preempted goroutines are scanned before P schedules next goroutine. 34 // 3. Set phase = GCmark. 35 // 4. Wait for all P's to acknowledge phase change. 36 // 5. Now write barrier marks and enqueues black, grey, or white to white pointers. 37 // Malloc still allocates white (non-marked) objects. 38 // 6. Meanwhile GC transitively walks the heap marking reachable objects. 39 // 7. When GC finishes marking heap, it preempts P's one-by-one and 40 // retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine 41 // currently scheduled on the P). 42 // 8. Once the GC has exhausted all available marking work it sets phase = marktermination. 43 // 9. Wait for all P's to acknowledge phase change. 44 // 10. Malloc now allocates black objects, so number of unmarked reachable objects 45 // monotonically decreases. 46 // 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet 47 // reachable objects. 48 // 12. When GC completes a full cycle over P's and discovers no new grey 49 // objects, (which means all reachable objects are marked) set phase = GCoff. 50 // 13. Wait for all P's to acknowledge phase change. 51 // 14. Now malloc allocates white (but sweeps spans before use). 52 // Write barrier becomes nop. 53 // 15. GC does background sweeping, see description below. 54 // 16. When sufficient allocation has taken place replay the sequence starting at 0 above, 55 // see discussion of GC rate below. 56 57 // Changing phases. 58 // Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase. 59 // All phase action must be benign in the presence of a change. 60 // Starting with GCoff 61 // GCoff to GCscan 62 // GSscan scans stacks and globals greying them and never marks an object black. 63 // Once all the P's are aware of the new phase they will scan gs on preemption. 64 // This means that the scanning of preempted gs can't start until all the Ps 65 // have acknowledged. 66 // When a stack is scanned, this phase also installs stack barriers to 67 // track how much of the stack has been active. 68 // This transition enables write barriers because stack barriers 69 // assume that writes to higher frames will be tracked by write 70 // barriers. Technically this only needs write barriers for writes 71 // to stack slots, but we enable write barriers in general. 72 // GCscan to GCmark 73 // In GCmark, work buffers are drained until there are no more 74 // pointers to scan. 75 // No scanning of objects (making them black) can happen until all 76 // Ps have enabled the write barrier, but that already happened in 77 // the transition to GCscan. 78 // GCmark to GCmarktermination 79 // The only change here is that we start allocating black so the Ps must acknowledge 80 // the change before we begin the termination algorithm 81 // GCmarktermination to GSsweep 82 // Object currently on the freelist must be marked black for this to work. 83 // Are things on the free lists black or white? How does the sweep phase work? 84 85 // Concurrent sweep. 86 // 87 // The sweep phase proceeds concurrently with normal program execution. 88 // The heap is swept span-by-span both lazily (when a goroutine needs another span) 89 // and concurrently in a background goroutine (this helps programs that are not CPU bound). 90 // At the end of STW mark termination all spans are marked as "needs sweeping". 91 // 92 // The background sweeper goroutine simply sweeps spans one-by-one. 93 // 94 // To avoid requesting more OS memory while there are unswept spans, when a 95 // goroutine needs another span, it first attempts to reclaim that much memory 96 // by sweeping. When a goroutine needs to allocate a new small-object span, it 97 // sweeps small-object spans for the same object size until it frees at least 98 // one object. When a goroutine needs to allocate large-object span from heap, 99 // it sweeps spans until it frees at least that many pages into heap. There is 100 // one case where this may not suffice: if a goroutine sweeps and frees two 101 // nonadjacent one-page spans to the heap, it will allocate a new two-page 102 // span, but there can still be other one-page unswept spans which could be 103 // combined into a two-page span. 104 // 105 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt 106 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache, 107 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it. 108 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that 109 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish). 110 // The finalizer goroutine is kicked off only when all spans are swept. 111 // When the next GC starts, it sweeps all not-yet-swept spans (if any). 112 113 // GC rate. 114 // Next GC is after we've allocated an extra amount of memory proportional to 115 // the amount already in use. The proportion is controlled by GOGC environment variable 116 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M 117 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear 118 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant 119 // (and also the amount of extra memory used). 120 121 package runtime 122 123 import "unsafe" 124 125 const ( 126 _DebugGC = 0 127 _ConcurrentSweep = true 128 _FinBlockSize = 4 * 1024 129 _RootData = 0 130 _RootBss = 1 131 _RootFinalizers = 2 132 _RootSpans = 3 133 _RootFlushCaches = 4 134 _RootCount = 5 135 136 // firstStackBarrierOffset is the approximate byte offset at 137 // which to place the first stack barrier from the current SP. 138 // This is a lower bound on how much stack will have to be 139 // re-scanned during mark termination. Subsequent barriers are 140 // placed at firstStackBarrierOffset * 2^n offsets. 141 // 142 // For debugging, this can be set to 0, which will install a 143 // stack barrier at every frame. If you do this, you may also 144 // have to raise _StackMin, since the stack barrier 145 // bookkeeping will use a large amount of each stack. 146 firstStackBarrierOffset = 1024 147 debugStackBarrier = false 148 149 // sweepMinHeapDistance is a lower bound on the heap distance 150 // (in bytes) reserved for concurrent sweeping between GC 151 // cycles. This will be scaled by gcpercent/100. 152 sweepMinHeapDistance = 1024 * 1024 153 ) 154 155 // heapminimum is the minimum heap size at which to trigger GC. 156 // For small heaps, this overrides the usual GOGC*live set rule. 157 // 158 // When there is a very small live set but a lot of allocation, simply 159 // collecting when the heap reaches GOGC*live results in many GC 160 // cycles and high total per-GC overhead. This minimum amortizes this 161 // per-GC overhead while keeping the heap reasonably small. 162 // 163 // During initialization this is set to 4MB*GOGC/100. In the case of 164 // GOGC==0, this will set heapminimum to 0, resulting in constant 165 // collection even when the heap size is small, which is useful for 166 // debugging. 167 var heapminimum uint64 = defaultHeapMinimum 168 169 // defaultHeapMinimum is the value of heapminimum for GOGC==100. 170 const defaultHeapMinimum = 4 << 20 171 172 // Initialized from $GOGC. GOGC=off means no GC. 173 var gcpercent int32 174 175 func gcinit() { 176 if unsafe.Sizeof(workbuf{}) != _WorkbufSize { 177 throw("size of Workbuf is suboptimal") 178 } 179 180 work.markfor = parforalloc(_MaxGcproc) 181 _ = setGCPercent(readgogc()) 182 for datap := &firstmoduledata; datap != nil; datap = datap.next { 183 datap.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(datap.gcdata)), datap.edata-datap.data) 184 datap.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(datap.gcbss)), datap.ebss-datap.bss) 185 } 186 memstats.next_gc = heapminimum 187 } 188 189 func readgogc() int32 { 190 p := gogetenv("GOGC") 191 if p == "" { 192 return 100 193 } 194 if p == "off" { 195 return -1 196 } 197 return int32(atoi(p)) 198 } 199 200 // gcenable is called after the bulk of the runtime initialization, 201 // just before we're about to start letting user code run. 202 // It kicks off the background sweeper goroutine and enables GC. 203 func gcenable() { 204 c := make(chan int, 1) 205 go bgsweep(c) 206 <-c 207 memstats.enablegc = true // now that runtime is initialized, GC is okay 208 } 209 210 func setGCPercent(in int32) (out int32) { 211 lock(&mheap_.lock) 212 out = gcpercent 213 if in < 0 { 214 in = -1 215 } 216 gcpercent = in 217 heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100 218 unlock(&mheap_.lock) 219 return out 220 } 221 222 // Garbage collector phase. 223 // Indicates to write barrier and sychronization task to preform. 224 var gcphase uint32 225 var writeBarrierEnabled bool // compiler emits references to this in write barriers 226 227 // gcBlackenEnabled is 1 if mutator assists and background mark 228 // workers are allowed to blacken objects. This must only be set when 229 // gcphase == _GCmark. 230 var gcBlackenEnabled uint32 231 232 // gcBlackenPromptly indicates that optimizations that may 233 // hide work from the global work queue should be disabled. 234 // 235 // If gcBlackenPromptly is true, per-P gcWork caches should 236 // be flushed immediately and new objects should be allocated black. 237 // 238 // There is a tension between allocating objects white and 239 // allocating them black. If white and the objects die before being 240 // marked they can be collected during this GC cycle. On the other 241 // hand allocating them black will reduce _GCmarktermination latency 242 // since more work is done in the mark phase. This tension is resolved 243 // by allocating white until the mark phase is approaching its end and 244 // then allocating black for the remainder of the mark phase. 245 var gcBlackenPromptly bool 246 247 const ( 248 _GCoff = iota // GC not running; sweeping in background, write barrier disabled 249 _GCstw // unused state 250 _GCscan // GC collecting roots into workbufs, write barrier ENABLED 251 _GCmark // GC marking from workbufs, write barrier ENABLED 252 _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED 253 ) 254 255 //go:nosplit 256 func setGCPhase(x uint32) { 257 atomicstore(&gcphase, x) 258 writeBarrierEnabled = gcphase == _GCmark || gcphase == _GCmarktermination || gcphase == _GCscan 259 } 260 261 // gcMarkWorkerMode represents the mode that a concurrent mark worker 262 // should operate in. 263 // 264 // Concurrent marking happens through four different mechanisms. One 265 // is mutator assists, which happen in response to allocations and are 266 // not scheduled. The other three are variations in the per-P mark 267 // workers and are distinguished by gcMarkWorkerMode. 268 type gcMarkWorkerMode int 269 270 const ( 271 // gcMarkWorkerDedicatedMode indicates that the P of a mark 272 // worker is dedicated to running that mark worker. The mark 273 // worker should run without preemption until concurrent mark 274 // is done. 275 gcMarkWorkerDedicatedMode gcMarkWorkerMode = iota 276 277 // gcMarkWorkerFractionalMode indicates that a P is currently 278 // running the "fractional" mark worker. The fractional worker 279 // is necessary when GOMAXPROCS*gcGoalUtilization is not an 280 // integer. The fractional worker should run until it is 281 // preempted and will be scheduled to pick up the fractional 282 // part of GOMAXPROCS*gcGoalUtilization. 283 gcMarkWorkerFractionalMode 284 285 // gcMarkWorkerIdleMode indicates that a P is running the mark 286 // worker because it has nothing else to do. The idle worker 287 // should run until it is preempted and account its time 288 // against gcController.idleMarkTime. 289 gcMarkWorkerIdleMode 290 ) 291 292 // gcController implements the GC pacing controller that determines 293 // when to trigger concurrent garbage collection and how much marking 294 // work to do in mutator assists and background marking. 295 // 296 // It uses a feedback control algorithm to adjust the memstats.next_gc 297 // trigger based on the heap growth and GC CPU utilization each cycle. 298 // This algorithm optimizes for heap growth to match GOGC and for CPU 299 // utilization between assist and background marking to be 25% of 300 // GOMAXPROCS. The high-level design of this algorithm is documented 301 // at https://golang.org/s/go15gcpacing. 302 var gcController = gcControllerState{ 303 // Initial trigger ratio guess. 304 triggerRatio: 7 / 8.0, 305 } 306 307 type gcControllerState struct { 308 // scanWork is the total scan work performed this cycle. This 309 // is updated atomically during the cycle. Updates may be 310 // batched arbitrarily, since the value is only read at the 311 // end of the cycle. 312 // 313 // Currently this is the bytes of heap scanned. For most uses, 314 // this is an opaque unit of work, but for estimation the 315 // definition is important. 316 scanWork int64 317 318 // bgScanCredit is the scan work credit accumulated by the 319 // concurrent background scan. This credit is accumulated by 320 // the background scan and stolen by mutator assists. This is 321 // updated atomically. Updates occur in bounded batches, since 322 // it is both written and read throughout the cycle. 323 bgScanCredit int64 324 325 // assistTime is the nanoseconds spent in mutator assists 326 // during this cycle. This is updated atomically. Updates 327 // occur in bounded batches, since it is both written and read 328 // throughout the cycle. 329 assistTime int64 330 331 // dedicatedMarkTime is the nanoseconds spent in dedicated 332 // mark workers during this cycle. This is updated atomically 333 // at the end of the concurrent mark phase. 334 dedicatedMarkTime int64 335 336 // fractionalMarkTime is the nanoseconds spent in the 337 // fractional mark worker during this cycle. This is updated 338 // atomically throughout the cycle and will be up-to-date if 339 // the fractional mark worker is not currently running. 340 fractionalMarkTime int64 341 342 // idleMarkTime is the nanoseconds spent in idle marking 343 // during this cycle. This is updated atomically throughout 344 // the cycle. 345 idleMarkTime int64 346 347 // bgMarkStartTime is the absolute start time in nanoseconds 348 // that the background mark phase started. 349 bgMarkStartTime int64 350 351 // assistTime is the absolute start time in nanoseconds that 352 // mutator assists were enabled. 353 assistStartTime int64 354 355 // heapGoal is the goal memstats.heap_live for when this cycle 356 // ends. This is computed at the beginning of each cycle. 357 heapGoal uint64 358 359 // dedicatedMarkWorkersNeeded is the number of dedicated mark 360 // workers that need to be started. This is computed at the 361 // beginning of each cycle and decremented atomically as 362 // dedicated mark workers get started. 363 dedicatedMarkWorkersNeeded int64 364 365 // assistRatio is the ratio of allocated bytes to scan work 366 // that should be performed by mutator assists. This is 367 // computed at the beginning of each cycle and updated every 368 // time heap_scan is updated. 369 assistRatio float64 370 371 // fractionalUtilizationGoal is the fraction of wall clock 372 // time that should be spent in the fractional mark worker. 373 // For example, if the overall mark utilization goal is 25% 374 // and GOMAXPROCS is 6, one P will be a dedicated mark worker 375 // and this will be set to 0.5 so that 50% of the time some P 376 // is in a fractional mark worker. This is computed at the 377 // beginning of each cycle. 378 fractionalUtilizationGoal float64 379 380 // triggerRatio is the heap growth ratio at which the garbage 381 // collection cycle should start. E.g., if this is 0.6, then 382 // GC should start when the live heap has reached 1.6 times 383 // the heap size marked by the previous cycle. This is updated 384 // at the end of of each cycle. 385 triggerRatio float64 386 387 _ [_CacheLineSize]byte 388 389 // fractionalMarkWorkersNeeded is the number of fractional 390 // mark workers that need to be started. This is either 0 or 391 // 1. This is potentially updated atomically at every 392 // scheduling point (hence it gets its own cache line). 393 fractionalMarkWorkersNeeded int64 394 395 _ [_CacheLineSize]byte 396 } 397 398 // startCycle resets the GC controller's state and computes estimates 399 // for a new GC cycle. The caller must hold worldsema. 400 func (c *gcControllerState) startCycle() { 401 c.scanWork = 0 402 c.bgScanCredit = 0 403 c.assistTime = 0 404 c.dedicatedMarkTime = 0 405 c.fractionalMarkTime = 0 406 c.idleMarkTime = 0 407 408 // If this is the first GC cycle or we're operating on a very 409 // small heap, fake heap_marked so it looks like next_gc is 410 // the appropriate growth from heap_marked, even though the 411 // real heap_marked may not have a meaningful value (on the 412 // first cycle) or may be much smaller (resulting in a large 413 // error response). 414 if memstats.next_gc <= heapminimum { 415 memstats.heap_marked = uint64(float64(memstats.next_gc) / (1 + c.triggerRatio)) 416 memstats.heap_reachable = memstats.heap_marked 417 } 418 419 // Compute the heap goal for this cycle 420 c.heapGoal = memstats.heap_reachable + memstats.heap_reachable*uint64(gcpercent)/100 421 422 // Compute the total mark utilization goal and divide it among 423 // dedicated and fractional workers. 424 totalUtilizationGoal := float64(gomaxprocs) * gcGoalUtilization 425 c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal) 426 c.fractionalUtilizationGoal = totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded) 427 if c.fractionalUtilizationGoal > 0 { 428 c.fractionalMarkWorkersNeeded = 1 429 } else { 430 c.fractionalMarkWorkersNeeded = 0 431 } 432 433 // Clear per-P state 434 for _, p := range &allp { 435 if p == nil { 436 break 437 } 438 p.gcAssistTime = 0 439 } 440 441 // Compute initial values for controls that are updated 442 // throughout the cycle. 443 c.revise() 444 445 if debug.gcpacertrace > 0 { 446 print("pacer: assist ratio=", c.assistRatio, 447 " (scan ", memstats.heap_scan>>20, " MB in ", 448 work.initialHeapLive>>20, "->", 449 c.heapGoal>>20, " MB)", 450 " workers=", c.dedicatedMarkWorkersNeeded, 451 "+", c.fractionalMarkWorkersNeeded, "\n") 452 } 453 } 454 455 // revise updates the assist ratio during the GC cycle to account for 456 // improved estimates. This should be called either under STW or 457 // whenever memstats.heap_scan is updated (with mheap_.lock held). 458 func (c *gcControllerState) revise() { 459 // Compute the expected scan work. This is a strict upper 460 // bound on the possible scan work in the current heap. 461 // 462 // You might consider dividing this by 2 (or by 463 // (100+GOGC)/100) to counter this over-estimation, but 464 // benchmarks show that this has almost no effect on mean 465 // mutator utilization, heap size, or assist time and it 466 // introduces the danger of under-estimating and letting the 467 // mutator outpace the garbage collector. 468 scanWorkExpected := memstats.heap_scan 469 470 // Compute the mutator assist ratio so by the time the mutator 471 // allocates the remaining heap bytes up to next_gc, it will 472 // have done (or stolen) the estimated amount of scan work. 473 heapDistance := int64(c.heapGoal) - int64(work.initialHeapLive) 474 if heapDistance <= 1024*1024 { 475 // heapDistance can be negative if GC start is delayed 476 // or if the allocation that pushed heap_live over 477 // next_gc is large or if the trigger is really close 478 // to GOGC. We don't want to set the assist negative 479 // (or divide by zero, or set it really high), so 480 // enforce a minimum on the distance. 481 heapDistance = 1024 * 1024 482 } 483 c.assistRatio = float64(scanWorkExpected) / float64(heapDistance) 484 } 485 486 // endCycle updates the GC controller state at the end of the 487 // concurrent part of the GC cycle. 488 func (c *gcControllerState) endCycle() { 489 h_t := c.triggerRatio // For debugging 490 491 // Proportional response gain for the trigger controller. Must 492 // be in [0, 1]. Lower values smooth out transient effects but 493 // take longer to respond to phase changes. Higher values 494 // react to phase changes quickly, but are more affected by 495 // transient changes. Values near 1 may be unstable. 496 const triggerGain = 0.5 497 498 // Compute next cycle trigger ratio. First, this computes the 499 // "error" for this cycle; that is, how far off the trigger 500 // was from what it should have been, accounting for both heap 501 // growth and GC CPU utilization. We compute the actual heap 502 // growth during this cycle and scale that by how far off from 503 // the goal CPU utilization we were (to estimate the heap 504 // growth if we had the desired CPU utilization). The 505 // difference between this estimate and the GOGC-based goal 506 // heap growth is the error. 507 // 508 // TODO(austin): next_gc is based on heap_reachable, not 509 // heap_marked, which means the actual growth ratio 510 // technically isn't comparable to the trigger ratio. 511 goalGrowthRatio := float64(gcpercent) / 100 512 actualGrowthRatio := float64(memstats.heap_live)/float64(memstats.heap_marked) - 1 513 assistDuration := nanotime() - c.assistStartTime 514 515 // Assume background mark hit its utilization goal. 516 utilization := gcGoalUtilization 517 // Add assist utilization; avoid divide by zero. 518 if assistDuration > 0 { 519 utilization += float64(c.assistTime) / float64(assistDuration*int64(gomaxprocs)) 520 } 521 522 triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio) 523 524 // Finally, we adjust the trigger for next time by this error, 525 // damped by the proportional gain. 526 c.triggerRatio += triggerGain * triggerError 527 if c.triggerRatio < 0 { 528 // This can happen if the mutator is allocating very 529 // quickly or the GC is scanning very slowly. 530 c.triggerRatio = 0 531 } else if c.triggerRatio > goalGrowthRatio*0.95 { 532 // Ensure there's always a little margin so that the 533 // mutator assist ratio isn't infinity. 534 c.triggerRatio = goalGrowthRatio * 0.95 535 } 536 537 if debug.gcpacertrace > 0 { 538 // Print controller state in terms of the design 539 // document. 540 H_m_prev := memstats.heap_marked 541 H_T := memstats.next_gc 542 h_a := actualGrowthRatio 543 H_a := memstats.heap_live 544 h_g := goalGrowthRatio 545 H_g := int64(float64(H_m_prev) * (1 + h_g)) 546 u_a := utilization 547 u_g := gcGoalUtilization 548 W_a := c.scanWork 549 print("pacer: H_m_prev=", H_m_prev, 550 " h_t=", h_t, " H_T=", H_T, 551 " h_a=", h_a, " H_a=", H_a, 552 " h_g=", h_g, " H_g=", H_g, 553 " u_a=", u_a, " u_g=", u_g, 554 " W_a=", W_a, 555 " goalΔ=", goalGrowthRatio-h_t, 556 " actualΔ=", h_a-h_t, 557 " u_a/u_g=", u_a/u_g, 558 "\n") 559 } 560 } 561 562 // findRunnableGCWorker returns the background mark worker for _p_ if it 563 // should be run. This must only be called when gcBlackenEnabled != 0. 564 func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g { 565 if gcBlackenEnabled == 0 { 566 throw("gcControllerState.findRunnable: blackening not enabled") 567 } 568 if _p_.gcBgMarkWorker == nil { 569 throw("gcControllerState.findRunnable: no background mark worker") 570 } 571 if work.bgMark1.done != 0 && work.bgMark2.done != 0 { 572 // Background mark is done. Don't schedule background 573 // mark worker any more. (This is not just an 574 // optimization. Without this we can spin scheduling 575 // the background worker and having it return 576 // immediately with no work to do.) 577 return nil 578 } 579 580 decIfPositive := func(ptr *int64) bool { 581 if *ptr > 0 { 582 if xaddint64(ptr, -1) >= 0 { 583 return true 584 } 585 // We lost a race 586 xaddint64(ptr, +1) 587 } 588 return false 589 } 590 591 if decIfPositive(&c.dedicatedMarkWorkersNeeded) { 592 // This P is now dedicated to marking until the end of 593 // the concurrent mark phase. 594 _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode 595 // TODO(austin): This P isn't going to run anything 596 // else for a while, so kick everything out of its run 597 // queue. 598 } else { 599 if _p_.gcw.wbuf == 0 && work.full == 0 && work.partial == 0 { 600 // No work to be done right now. This can 601 // happen at the end of the mark phase when 602 // there are still assists tapering off. Don't 603 // bother running background mark because 604 // it'll just return immediately. 605 if work.nwait == work.nproc { 606 // There are also no workers, which 607 // means we've reached a completion point. 608 // There may not be any workers to 609 // signal it, so signal it here. 610 readied := false 611 if gcBlackenPromptly { 612 if work.bgMark1.done == 0 { 613 throw("completing mark 2, but bgMark1.done == 0") 614 } 615 readied = work.bgMark2.complete() 616 } else { 617 readied = work.bgMark1.complete() 618 } 619 if readied { 620 // complete just called ready, 621 // but we're inside the 622 // scheduler. Let it know that 623 // that's okay. 624 resetspinning() 625 } 626 } 627 return nil 628 } 629 if !decIfPositive(&c.fractionalMarkWorkersNeeded) { 630 // No more workers are need right now. 631 return nil 632 } 633 634 // This P has picked the token for the fractional worker. 635 // Is the GC currently under or at the utilization goal? 636 // If so, do more work. 637 // 638 // We used to check whether doing one time slice of work 639 // would remain under the utilization goal, but that has the 640 // effect of delaying work until the mutator has run for 641 // enough time slices to pay for the work. During those time 642 // slices, write barriers are enabled, so the mutator is running slower. 643 // Now instead we do the work whenever we're under or at the 644 // utilization work and pay for it by letting the mutator run later. 645 // This doesn't change the overall utilization averages, but it 646 // front loads the GC work so that the GC finishes earlier and 647 // write barriers can be turned off sooner, effectively giving 648 // the mutator a faster machine. 649 // 650 // The old, slower behavior can be restored by setting 651 // gcForcePreemptNS = forcePreemptNS. 652 const gcForcePreemptNS = 0 653 654 // TODO(austin): We could fast path this and basically 655 // eliminate contention on c.fractionalMarkWorkersNeeded by 656 // precomputing the minimum time at which it's worth 657 // next scheduling the fractional worker. Then Ps 658 // don't have to fight in the window where we've 659 // passed that deadline and no one has started the 660 // worker yet. 661 // 662 // TODO(austin): Shorter preemption interval for mark 663 // worker to improve fairness and give this 664 // finer-grained control over schedule? 665 now := nanotime() - gcController.bgMarkStartTime 666 then := now + gcForcePreemptNS 667 timeUsed := c.fractionalMarkTime + gcForcePreemptNS 668 if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal { 669 // Nope, we'd overshoot the utilization goal 670 xaddint64(&c.fractionalMarkWorkersNeeded, +1) 671 return nil 672 } 673 _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode 674 } 675 676 // Run the background mark worker 677 gp := _p_.gcBgMarkWorker 678 casgstatus(gp, _Gwaiting, _Grunnable) 679 if trace.enabled { 680 traceGoUnpark(gp, 0) 681 } 682 return gp 683 } 684 685 // gcGoalUtilization is the goal CPU utilization for background 686 // marking as a fraction of GOMAXPROCS. 687 const gcGoalUtilization = 0.25 688 689 // gcBgCreditSlack is the amount of scan work credit background 690 // scanning can accumulate locally before updating 691 // gcController.bgScanCredit. Lower values give mutator assists more 692 // accurate accounting of background scanning. Higher values reduce 693 // memory contention. 694 const gcBgCreditSlack = 2000 695 696 // gcAssistTimeSlack is the nanoseconds of mutator assist time that 697 // can accumulate on a P before updating gcController.assistTime. 698 const gcAssistTimeSlack = 5000 699 700 // Determine whether to initiate a GC. 701 // If the GC is already working no need to trigger another one. 702 // This should establish a feedback loop where if the GC does not 703 // have sufficient time to complete then more memory will be 704 // requested from the OS increasing heap size thus allow future 705 // GCs more time to complete. 706 // memstat.heap_live read has a benign race. 707 // A false negative simple does not start a GC, a false positive 708 // will start a GC needlessly. Neither have correctness issues. 709 func shouldtriggergc() bool { 710 return memstats.heap_live >= memstats.next_gc && atomicloaduint(&bggc.working) == 0 711 } 712 713 // bgMarkSignal synchronizes the GC coordinator and background mark workers. 714 type bgMarkSignal struct { 715 // Workers race to cas to 1. Winner signals coordinator. 716 done uint32 717 // Coordinator to wake up. 718 lock mutex 719 g *g 720 wake bool 721 } 722 723 func (s *bgMarkSignal) wait() { 724 lock(&s.lock) 725 if s.wake { 726 // Wakeup already happened 727 unlock(&s.lock) 728 } else { 729 s.g = getg() 730 goparkunlock(&s.lock, "mark wait (idle)", traceEvGoBlock, 1) 731 } 732 s.wake = false 733 s.g = nil 734 } 735 736 // complete signals the completion of this phase of marking. This can 737 // be called multiple times during a cycle; only the first call has 738 // any effect. 739 // 740 // The caller should arrange to deschedule itself as soon as possible 741 // after calling complete in order to let the coordinator goroutine 742 // run. 743 func (s *bgMarkSignal) complete() bool { 744 if cas(&s.done, 0, 1) { 745 // This is the first worker to reach this completion point. 746 // Signal the main GC goroutine. 747 lock(&s.lock) 748 if s.g == nil { 749 // It hasn't parked yet. 750 s.wake = true 751 } else { 752 ready(s.g, 0) 753 } 754 unlock(&s.lock) 755 return true 756 } 757 return false 758 } 759 760 func (s *bgMarkSignal) clear() { 761 s.done = 0 762 } 763 764 var work struct { 765 full uint64 // lock-free list of full blocks workbuf 766 empty uint64 // lock-free list of empty blocks workbuf 767 // TODO(rlh): partial no longer used, remove. (issue #11922) 768 partial uint64 // lock-free list of partially filled blocks workbuf 769 pad0 [_CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait 770 nproc uint32 771 tstart int64 772 nwait uint32 773 ndone uint32 774 alldone note 775 markfor *parfor 776 777 bgMarkReady note // signal background mark worker has started 778 bgMarkDone uint32 // cas to 1 when at a background mark completion point 779 // Background mark completion signaling 780 781 // Coordination for the 2 parts of the mark phase. 782 bgMark1 bgMarkSignal 783 bgMark2 bgMarkSignal 784 785 // Copy of mheap.allspans for marker or sweeper. 786 spans []*mspan 787 788 // totaltime is the CPU nanoseconds spent in GC since the 789 // program started if debug.gctrace > 0. 790 totaltime int64 791 792 // bytesMarked is the number of bytes marked this cycle. This 793 // includes bytes blackened in scanned objects, noscan objects 794 // that go straight to black, and permagrey objects scanned by 795 // markroot during the concurrent scan phase. This is updated 796 // atomically during the cycle. Updates may be batched 797 // arbitrarily, since the value is only read at the end of the 798 // cycle. 799 // 800 // Because of benign races during marking, this number may not 801 // be the exact number of marked bytes, but it should be very 802 // close. 803 bytesMarked uint64 804 805 // initialHeapLive is the value of memstats.heap_live at the 806 // beginning of this GC cycle. 807 initialHeapLive uint64 808 } 809 810 // GC runs a garbage collection and blocks the caller until the 811 // garbage collection is complete. It may also block the entire 812 // program. 813 func GC() { 814 startGC(gcForceBlockMode, false) 815 } 816 817 const ( 818 gcBackgroundMode = iota // concurrent GC 819 gcForceMode // stop-the-world GC now 820 gcForceBlockMode // stop-the-world GC now and wait for sweep 821 ) 822 823 // startGC starts a GC cycle. If mode is gcBackgroundMode, this will 824 // start GC in the background and return. Otherwise, this will block 825 // until the new GC cycle is started and finishes. If forceTrigger is 826 // true, it indicates that GC should be started regardless of the 827 // current heap size. 828 func startGC(mode int, forceTrigger bool) { 829 // The gc is turned off (via enablegc) until the bootstrap has completed. 830 // Also, malloc gets called in the guts of a number of libraries that might be 831 // holding locks. To avoid deadlocks during stop-the-world, don't bother 832 // trying to run gc while holding a lock. The next mallocgc without a lock 833 // will do the gc instead. 834 mp := acquirem() 835 if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" || !memstats.enablegc || panicking != 0 || gcpercent < 0 { 836 releasem(mp) 837 return 838 } 839 releasem(mp) 840 mp = nil 841 842 if debug.gcstoptheworld == 1 { 843 mode = gcForceMode 844 } else if debug.gcstoptheworld == 2 { 845 mode = gcForceBlockMode 846 } 847 848 if mode != gcBackgroundMode { 849 // special synchronous cases 850 gc(mode) 851 return 852 } 853 854 // trigger concurrent GC 855 readied := false 856 lock(&bggc.lock) 857 // The trigger was originally checked speculatively, so 858 // recheck that this really should trigger GC. (For example, 859 // we may have gone through a whole GC cycle since the 860 // speculative check.) 861 if !(forceTrigger || shouldtriggergc()) { 862 unlock(&bggc.lock) 863 return 864 } 865 if !bggc.started { 866 bggc.working = 1 867 bggc.started = true 868 readied = true 869 go backgroundgc() 870 } else if bggc.working == 0 { 871 bggc.working = 1 872 readied = true 873 ready(bggc.g, 0) 874 } 875 unlock(&bggc.lock) 876 if readied { 877 // This G just started or ready()d the GC goroutine. 878 // Switch directly to it by yielding. 879 Gosched() 880 } 881 } 882 883 // State of the background concurrent GC goroutine. 884 var bggc struct { 885 lock mutex 886 g *g 887 working uint 888 started bool 889 } 890 891 // backgroundgc is running in a goroutine and does the concurrent GC work. 892 // bggc holds the state of the backgroundgc. 893 func backgroundgc() { 894 bggc.g = getg() 895 for { 896 gc(gcBackgroundMode) 897 lock(&bggc.lock) 898 bggc.working = 0 899 goparkunlock(&bggc.lock, "Concurrent GC wait", traceEvGoBlock, 1) 900 } 901 } 902 903 func gc(mode int) { 904 // Timing/utilization tracking 905 var stwprocs, maxprocs int32 906 var tSweepTerm, tScan, tInstallWB, tMark, tMarkTerm int64 907 908 // debug.gctrace variables 909 var heap0, heap1, heap2, heapGoal uint64 910 911 // memstats statistics 912 var now, pauseStart, pauseNS int64 913 914 // Ok, we're doing it! Stop everybody else 915 semacquire(&worldsema, false) 916 917 // Pick up the remaining unswept/not being swept spans concurrently 918 // 919 // This shouldn't happen if we're being invoked in background 920 // mode since proportional sweep should have just finished 921 // sweeping everything, but rounding errors, etc, may leave a 922 // few spans unswept. In forced mode, this is necessary since 923 // GC can be forced at any point in the sweeping cycle. 924 for gosweepone() != ^uintptr(0) { 925 sweep.nbgsweep++ 926 } 927 928 if trace.enabled { 929 traceGCStart() 930 } 931 932 if mode == gcBackgroundMode { 933 gcBgMarkStartWorkers() 934 } 935 now = nanotime() 936 stwprocs, maxprocs = gcprocs(), gomaxprocs 937 tSweepTerm = now 938 heap0 = memstats.heap_live 939 940 pauseStart = now 941 systemstack(stopTheWorldWithSema) 942 systemstack(finishsweep_m) // finish sweep before we start concurrent scan. 943 // clearpools before we start the GC. If we wait they memory will not be 944 // reclaimed until the next GC cycle. 945 clearpools() 946 947 gcResetMarkState() 948 949 if mode == gcBackgroundMode { // Do as much work concurrently as possible 950 gcController.startCycle() 951 heapGoal = gcController.heapGoal 952 953 systemstack(func() { 954 // Enter scan phase. This enables write 955 // barriers to track changes to stack frames 956 // above the stack barrier. 957 // 958 // TODO: This has evolved to the point where 959 // we carefully ensure invariants we no longer 960 // depend on. Either: 961 // 962 // 1) Enable full write barriers for the scan, 963 // but eliminate the ragged barrier below 964 // (since the start the world ensures all Ps 965 // have observed the write barrier enable) and 966 // consider draining during the scan. 967 // 968 // 2) Only enable write barriers for writes to 969 // the stack at this point, and then enable 970 // write barriers for heap writes when we 971 // enter the mark phase. This means we cannot 972 // drain in the scan phase and must perform a 973 // ragged barrier to ensure all Ps have 974 // enabled heap write barriers before we drain 975 // or enable assists. 976 // 977 // 3) Don't install stack barriers over frame 978 // boundaries where there are up-pointers. 979 setGCPhase(_GCscan) 980 981 gcBgMarkPrepare() // Must happen before assist enable. 982 983 // At this point all Ps have enabled the write 984 // barrier, thus maintaining the no white to 985 // black invariant. Enable mutator assists to 986 // put back-pressure on fast allocating 987 // mutators. 988 atomicstore(&gcBlackenEnabled, 1) 989 990 // Concurrent scan. 991 startTheWorldWithSema() 992 now = nanotime() 993 pauseNS += now - pauseStart 994 tScan = now 995 gcController.assistStartTime = now 996 gcscan_m() 997 998 // Enter mark phase. 999 tInstallWB = nanotime() 1000 setGCPhase(_GCmark) 1001 // Ensure all Ps have observed the phase 1002 // change and have write barriers enabled 1003 // before any blackening occurs. 1004 forEachP(func(*p) {}) 1005 }) 1006 // Concurrent mark. 1007 tMark = nanotime() 1008 1009 // Enable background mark workers and wait for 1010 // background mark completion. 1011 gcController.bgMarkStartTime = nanotime() 1012 work.bgMark1.clear() 1013 work.bgMark1.wait() 1014 1015 // The global work list is empty, but there can still be work 1016 // sitting in the per-P work caches and there can be more 1017 // objects reachable from global roots since they don't have write 1018 // barriers. Rescan some roots and flush work caches. 1019 systemstack(func() { 1020 // rescan global data and bss. 1021 markroot(nil, _RootData) 1022 markroot(nil, _RootBss) 1023 1024 // Disallow caching workbufs. 1025 gcBlackenPromptly = true 1026 1027 // Flush all currently cached workbufs. This 1028 // also forces any remaining background 1029 // workers out of their loop. 1030 forEachP(func(_p_ *p) { 1031 _p_.gcw.dispose() 1032 }) 1033 }) 1034 1035 // Wait for this more aggressive background mark to complete. 1036 work.bgMark2.clear() 1037 work.bgMark2.wait() 1038 1039 // Begin mark termination. 1040 now = nanotime() 1041 tMarkTerm = now 1042 pauseStart = now 1043 systemstack(stopTheWorldWithSema) 1044 // The gcphase is _GCmark, it will transition to _GCmarktermination 1045 // below. The important thing is that the wb remains active until 1046 // all marking is complete. This includes writes made by the GC. 1047 1048 // Flush the gcWork caches. This must be done before 1049 // endCycle since endCycle depends on statistics kept 1050 // in these caches. 1051 gcFlushGCWork() 1052 1053 gcController.endCycle() 1054 } else { 1055 // For non-concurrent GC (mode != gcBackgroundMode) 1056 // The g stacks have not been scanned so clear g state 1057 // such that mark termination scans all stacks. 1058 gcResetGState() 1059 1060 t := nanotime() 1061 tScan, tInstallWB, tMark, tMarkTerm = t, t, t, t 1062 heapGoal = heap0 1063 } 1064 1065 // World is stopped. 1066 // Start marktermination which includes enabling the write barrier. 1067 atomicstore(&gcBlackenEnabled, 0) 1068 gcBlackenPromptly = false 1069 setGCPhase(_GCmarktermination) 1070 1071 heap1 = memstats.heap_live 1072 startTime := nanotime() 1073 1074 mp := acquirem() 1075 mp.preemptoff = "gcing" 1076 _g_ := getg() 1077 _g_.m.traceback = 2 1078 gp := _g_.m.curg 1079 casgstatus(gp, _Grunning, _Gwaiting) 1080 gp.waitreason = "garbage collection" 1081 1082 // Run gc on the g0 stack. We do this so that the g stack 1083 // we're currently running on will no longer change. Cuts 1084 // the root set down a bit (g0 stacks are not scanned, and 1085 // we don't need to scan gc's internal state). We also 1086 // need to switch to g0 so we can shrink the stack. 1087 systemstack(func() { 1088 gcMark(startTime) 1089 // Must return immediately. 1090 // The outer function's stack may have moved 1091 // during gcMark (it shrinks stacks, including the 1092 // outer function's stack), so we must not refer 1093 // to any of its variables. Return back to the 1094 // non-system stack to pick up the new addresses 1095 // before continuing. 1096 }) 1097 1098 systemstack(func() { 1099 heap2 = work.bytesMarked 1100 if debug.gccheckmark > 0 { 1101 // Run a full stop-the-world mark using checkmark bits, 1102 // to check that we didn't forget to mark anything during 1103 // the concurrent mark process. 1104 gcResetGState() // Rescan stacks 1105 gcResetMarkState() 1106 initCheckmarks() 1107 gcMark(startTime) 1108 clearCheckmarks() 1109 } 1110 1111 // marking is complete so we can turn the write barrier off 1112 setGCPhase(_GCoff) 1113 gcSweep(mode) 1114 1115 if debug.gctrace > 1 { 1116 startTime = nanotime() 1117 // The g stacks have been scanned so 1118 // they have gcscanvalid==true and gcworkdone==true. 1119 // Reset these so that all stacks will be rescanned. 1120 gcResetGState() 1121 gcResetMarkState() 1122 finishsweep_m() 1123 1124 // Still in STW but gcphase is _GCoff, reset to _GCmarktermination 1125 // At this point all objects will be found during the gcMark which 1126 // does a complete STW mark and object scan. 1127 setGCPhase(_GCmarktermination) 1128 gcMark(startTime) 1129 setGCPhase(_GCoff) // marking is done, turn off wb. 1130 gcSweep(mode) 1131 } 1132 }) 1133 1134 _g_.m.traceback = 0 1135 casgstatus(gp, _Gwaiting, _Grunning) 1136 1137 if trace.enabled { 1138 traceGCDone() 1139 } 1140 1141 // all done 1142 mp.preemptoff = "" 1143 1144 if gcphase != _GCoff { 1145 throw("gc done but gcphase != _GCoff") 1146 } 1147 1148 // Update timing memstats 1149 now, unixNow := nanotime(), unixnanotime() 1150 pauseNS += now - pauseStart 1151 atomicstore64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user 1152 memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(pauseNS) 1153 memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow) 1154 memstats.pause_total_ns += uint64(pauseNS) 1155 1156 // Update work.totaltime. 1157 sweepTermCpu := int64(stwprocs) * (tScan - tSweepTerm) 1158 scanCpu := tInstallWB - tScan 1159 installWBCpu := int64(0) 1160 // We report idle marking time below, but omit it from the 1161 // overall utilization here since it's "free". 1162 markCpu := gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime 1163 markTermCpu := int64(stwprocs) * (now - tMarkTerm) 1164 cycleCpu := sweepTermCpu + scanCpu + installWBCpu + markCpu + markTermCpu 1165 work.totaltime += cycleCpu 1166 1167 // Compute overall GC CPU utilization. 1168 totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs) 1169 memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu) 1170 1171 memstats.numgc++ 1172 1173 systemstack(startTheWorldWithSema) 1174 semrelease(&worldsema) 1175 1176 releasem(mp) 1177 mp = nil 1178 1179 if debug.gctrace > 0 { 1180 tEnd := now 1181 util := int(memstats.gc_cpu_fraction * 100) 1182 1183 var sbuf [24]byte 1184 printlock() 1185 print("gc ", memstats.numgc, 1186 " @", string(itoaDiv(sbuf[:], uint64(tSweepTerm-runtimeInitTime)/1e6, 3)), "s ", 1187 util, "%: ") 1188 prev := tSweepTerm 1189 for i, ns := range []int64{tScan, tInstallWB, tMark, tMarkTerm, tEnd} { 1190 if i != 0 { 1191 print("+") 1192 } 1193 print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev)))) 1194 prev = ns 1195 } 1196 print(" ms clock, ") 1197 for i, ns := range []int64{sweepTermCpu, scanCpu, installWBCpu, gcController.assistTime, gcController.dedicatedMarkTime + gcController.fractionalMarkTime, gcController.idleMarkTime, markTermCpu} { 1198 if i == 4 || i == 5 { 1199 // Separate mark time components with /. 1200 print("/") 1201 } else if i != 0 { 1202 print("+") 1203 } 1204 print(string(fmtNSAsMS(sbuf[:], uint64(ns)))) 1205 } 1206 print(" ms cpu, ", 1207 heap0>>20, "->", heap1>>20, "->", heap2>>20, " MB, ", 1208 heapGoal>>20, " MB goal, ", 1209 maxprocs, " P") 1210 if mode != gcBackgroundMode { 1211 print(" (forced)") 1212 } 1213 print("\n") 1214 printunlock() 1215 } 1216 sweep.nbgsweep = 0 1217 sweep.npausesweep = 0 1218 1219 // now that gc is done, kick off finalizer thread if needed 1220 if !concurrentSweep { 1221 // give the queued finalizers, if any, a chance to run 1222 Gosched() 1223 } 1224 } 1225 1226 // gcBgMarkStartWorkers prepares background mark worker goroutines. 1227 // These goroutines will not run until the mark phase, but they must 1228 // be started while the work is not stopped and from a regular G 1229 // stack. The caller must hold worldsema. 1230 func gcBgMarkStartWorkers() { 1231 // Background marking is performed by per-P G's. Ensure that 1232 // each P has a background GC G. 1233 for _, p := range &allp { 1234 if p == nil || p.status == _Pdead { 1235 break 1236 } 1237 if p.gcBgMarkWorker == nil { 1238 go gcBgMarkWorker(p) 1239 notetsleepg(&work.bgMarkReady, -1) 1240 noteclear(&work.bgMarkReady) 1241 } 1242 } 1243 } 1244 1245 // gcBgMarkPrepare sets up state for background marking. 1246 // Mutator assists must not yet be enabled. 1247 func gcBgMarkPrepare() { 1248 // Background marking will stop when the work queues are empty 1249 // and there are no more workers (note that, since this is 1250 // concurrent, this may be a transient state, but mark 1251 // termination will clean it up). Between background workers 1252 // and assists, we don't really know how many workers there 1253 // will be, so we pretend to have an arbitrarily large number 1254 // of workers, almost all of which are "waiting". While a 1255 // worker is working it decrements nwait. If nproc == nwait, 1256 // there are no workers. 1257 work.nproc = ^uint32(0) 1258 work.nwait = ^uint32(0) 1259 1260 // Reset background mark completion points. 1261 work.bgMark1.done = 1 1262 work.bgMark2.done = 1 1263 } 1264 1265 func gcBgMarkWorker(p *p) { 1266 // Register this G as the background mark worker for p. 1267 if p.gcBgMarkWorker != nil { 1268 throw("P already has a background mark worker") 1269 } 1270 gp := getg() 1271 1272 mp := acquirem() 1273 p.gcBgMarkWorker = gp 1274 // After this point, the background mark worker is scheduled 1275 // cooperatively by gcController.findRunnable. Hence, it must 1276 // never be preempted, as this would put it into _Grunnable 1277 // and put it on a run queue. Instead, when the preempt flag 1278 // is set, this puts itself into _Gwaiting to be woken up by 1279 // gcController.findRunnable at the appropriate time. 1280 notewakeup(&work.bgMarkReady) 1281 for { 1282 // Go to sleep until woken by gcContoller.findRunnable. 1283 // We can't releasem yet since even the call to gopark 1284 // may be preempted. 1285 gopark(func(g *g, mp unsafe.Pointer) bool { 1286 releasem((*m)(mp)) 1287 return true 1288 }, unsafe.Pointer(mp), "mark worker (idle)", traceEvGoBlock, 0) 1289 1290 // Loop until the P dies and disassociates this 1291 // worker. (The P may later be reused, in which case 1292 // it will get a new worker.) 1293 if p.gcBgMarkWorker != gp { 1294 break 1295 } 1296 1297 // Disable preemption so we can use the gcw. If the 1298 // scheduler wants to preempt us, we'll stop draining, 1299 // dispose the gcw, and then preempt. 1300 mp = acquirem() 1301 1302 if gcBlackenEnabled == 0 { 1303 throw("gcBgMarkWorker: blackening not enabled") 1304 } 1305 1306 startTime := nanotime() 1307 1308 decnwait := xadd(&work.nwait, -1) 1309 if decnwait == work.nproc { 1310 println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc) 1311 throw("work.nwait was > work.nproc") 1312 } 1313 1314 done := false 1315 switch p.gcMarkWorkerMode { 1316 default: 1317 throw("gcBgMarkWorker: unexpected gcMarkWorkerMode") 1318 case gcMarkWorkerDedicatedMode: 1319 gcDrain(&p.gcw, gcBgCreditSlack) 1320 // gcDrain did the xadd(&work.nwait +1) to 1321 // match the decrement above. It only returns 1322 // at a mark completion point. 1323 done = true 1324 if !p.gcw.empty() { 1325 throw("gcDrain returned with buffer") 1326 } 1327 case gcMarkWorkerFractionalMode, gcMarkWorkerIdleMode: 1328 gcDrainUntilPreempt(&p.gcw, gcBgCreditSlack) 1329 1330 // If we are nearing the end of mark, dispose 1331 // of the cache promptly. We must do this 1332 // before signaling that we're no longer 1333 // working so that other workers can't observe 1334 // no workers and no work while we have this 1335 // cached, and before we compute done. 1336 if gcBlackenPromptly { 1337 p.gcw.dispose() 1338 } 1339 1340 // Was this the last worker and did we run out 1341 // of work? 1342 incnwait := xadd(&work.nwait, +1) 1343 if incnwait > work.nproc { 1344 println("runtime: p.gcMarkWorkerMode=", p.gcMarkWorkerMode, 1345 "work.nwait=", incnwait, "work.nproc=", work.nproc) 1346 throw("work.nwait > work.nproc") 1347 } 1348 done = incnwait == work.nproc && work.full == 0 && work.partial == 0 1349 } 1350 1351 // If this worker reached a background mark completion 1352 // point, signal the main GC goroutine. 1353 if done { 1354 if gcBlackenPromptly { 1355 if work.bgMark1.done == 0 { 1356 throw("completing mark 2, but bgMark1.done == 0") 1357 } 1358 work.bgMark2.complete() 1359 } else { 1360 work.bgMark1.complete() 1361 } 1362 } 1363 1364 duration := nanotime() - startTime 1365 switch p.gcMarkWorkerMode { 1366 case gcMarkWorkerDedicatedMode: 1367 xaddint64(&gcController.dedicatedMarkTime, duration) 1368 xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1) 1369 case gcMarkWorkerFractionalMode: 1370 xaddint64(&gcController.fractionalMarkTime, duration) 1371 xaddint64(&gcController.fractionalMarkWorkersNeeded, 1) 1372 case gcMarkWorkerIdleMode: 1373 xaddint64(&gcController.idleMarkTime, duration) 1374 } 1375 } 1376 } 1377 1378 // gcMarkWorkAvailable returns true if executing a mark worker 1379 // on p is potentially useful. 1380 func gcMarkWorkAvailable(p *p) bool { 1381 if !p.gcw.empty() { 1382 return true 1383 } 1384 if atomicload64(&work.full) != 0 || atomicload64(&work.partial) != 0 { 1385 return true // global work available 1386 } 1387 return false 1388 } 1389 1390 // gcFlushGCWork disposes the gcWork caches of all Ps. The world must 1391 // be stopped. 1392 //go:nowritebarrier 1393 func gcFlushGCWork() { 1394 // Gather all cached GC work. All other Ps are stopped, so 1395 // it's safe to manipulate their GC work caches. 1396 for i := 0; i < int(gomaxprocs); i++ { 1397 allp[i].gcw.dispose() 1398 } 1399 } 1400 1401 // gcMark runs the mark (or, for concurrent GC, mark termination) 1402 // STW is in effect at this point. 1403 //TODO go:nowritebarrier 1404 func gcMark(start_time int64) { 1405 if debug.allocfreetrace > 0 { 1406 tracegc() 1407 } 1408 1409 if gcphase != _GCmarktermination { 1410 throw("in gcMark expecting to see gcphase as _GCmarktermination") 1411 } 1412 work.tstart = start_time 1413 1414 gcCopySpans() // TODO(rlh): should this be hoisted and done only once? Right now it is done for normal marking and also for checkmarking. 1415 1416 // Make sure the per-P gcWork caches are empty. During mark 1417 // termination, these caches can still be used temporarily, 1418 // but must be disposed to the global lists immediately. 1419 gcFlushGCWork() 1420 1421 work.nwait = 0 1422 work.ndone = 0 1423 work.nproc = uint32(gcprocs()) 1424 1425 if trace.enabled { 1426 traceGCScanStart() 1427 } 1428 1429 parforsetup(work.markfor, work.nproc, uint32(_RootCount+allglen), false, markroot) 1430 if work.nproc > 1 { 1431 noteclear(&work.alldone) 1432 helpgc(int32(work.nproc)) 1433 } 1434 1435 gchelperstart() 1436 parfordo(work.markfor) 1437 1438 var gcw gcWork 1439 gcDrain(&gcw, -1) 1440 gcw.dispose() 1441 1442 if work.full != 0 { 1443 throw("work.full != 0") 1444 } 1445 if work.partial != 0 { 1446 throw("work.partial != 0") 1447 } 1448 1449 if work.nproc > 1 { 1450 notesleep(&work.alldone) 1451 } 1452 1453 for i := 0; i < int(gomaxprocs); i++ { 1454 if allp[i].gcw.wbuf != 0 { 1455 throw("P has cached GC work at end of mark termination") 1456 } 1457 } 1458 1459 if trace.enabled { 1460 traceGCScanDone() 1461 } 1462 1463 // TODO(austin): This doesn't have to be done during STW, as 1464 // long as we block the next GC cycle until this is done. Move 1465 // it after we start the world, but before dropping worldsema. 1466 // (See issue #11465.) 1467 freeStackSpans() 1468 1469 cachestats() 1470 1471 // Compute the reachable heap size at the beginning of the 1472 // cycle. This is approximately the marked heap size at the 1473 // end (which we know) minus the amount of marked heap that 1474 // was allocated after marking began (which we don't know, but 1475 // is approximately the amount of heap that was allocated 1476 // since marking began). 1477 allocatedDuringCycle := memstats.heap_live - work.initialHeapLive 1478 if work.bytesMarked >= allocatedDuringCycle { 1479 memstats.heap_reachable = work.bytesMarked - allocatedDuringCycle 1480 } else { 1481 // This can happen if most of the allocation during 1482 // the cycle never became reachable from the heap. 1483 // Just set the reachable heap approximation to 0 and 1484 // let the heapminimum kick in below. 1485 memstats.heap_reachable = 0 1486 } 1487 1488 // Trigger the next GC cycle when the allocated heap has grown 1489 // by triggerRatio over the reachable heap size. Assume that 1490 // we're in steady state, so the reachable heap size is the 1491 // same now as it was at the beginning of the GC cycle. 1492 memstats.next_gc = uint64(float64(memstats.heap_reachable) * (1 + gcController.triggerRatio)) 1493 if memstats.next_gc < heapminimum { 1494 memstats.next_gc = heapminimum 1495 } 1496 if int64(memstats.next_gc) < 0 { 1497 print("next_gc=", memstats.next_gc, " bytesMarked=", work.bytesMarked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "\n") 1498 throw("next_gc underflow") 1499 } 1500 1501 // Update other GC heap size stats. 1502 memstats.heap_live = work.bytesMarked 1503 memstats.heap_marked = work.bytesMarked 1504 memstats.heap_scan = uint64(gcController.scanWork) 1505 1506 minNextGC := memstats.heap_live + sweepMinHeapDistance*uint64(gcpercent)/100 1507 if memstats.next_gc < minNextGC { 1508 // The allocated heap is already past the trigger. 1509 // This can happen if the triggerRatio is very low and 1510 // the reachable heap estimate is less than the live 1511 // heap size. 1512 // 1513 // Concurrent sweep happens in the heap growth from 1514 // heap_live to next_gc, so bump next_gc up to ensure 1515 // that concurrent sweep has some heap growth in which 1516 // to perform sweeping before we start the next GC 1517 // cycle. 1518 memstats.next_gc = minNextGC 1519 } 1520 1521 if trace.enabled { 1522 traceHeapAlloc() 1523 traceNextGC() 1524 } 1525 } 1526 1527 func gcSweep(mode int) { 1528 if gcphase != _GCoff { 1529 throw("gcSweep being done but phase is not GCoff") 1530 } 1531 gcCopySpans() 1532 1533 lock(&mheap_.lock) 1534 mheap_.sweepgen += 2 1535 mheap_.sweepdone = 0 1536 sweep.spanidx = 0 1537 unlock(&mheap_.lock) 1538 1539 if !_ConcurrentSweep || mode == gcForceBlockMode { 1540 // Special case synchronous sweep. 1541 // Record that no proportional sweeping has to happen. 1542 lock(&mheap_.lock) 1543 mheap_.sweepPagesPerByte = 0 1544 mheap_.pagesSwept = 0 1545 unlock(&mheap_.lock) 1546 // Sweep all spans eagerly. 1547 for sweepone() != ^uintptr(0) { 1548 sweep.npausesweep++ 1549 } 1550 // Do an additional mProf_GC, because all 'free' events are now real as well. 1551 mProf_GC() 1552 mProf_GC() 1553 return 1554 } 1555 1556 // Account how much sweeping needs to be done before the next 1557 // GC cycle and set up proportional sweep statistics. 1558 var pagesToSweep uintptr 1559 for _, s := range work.spans { 1560 if s.state == mSpanInUse { 1561 pagesToSweep += s.npages 1562 } 1563 } 1564 heapDistance := int64(memstats.next_gc) - int64(memstats.heap_live) 1565 // Add a little margin so rounding errors and concurrent 1566 // sweep are less likely to leave pages unswept when GC starts. 1567 heapDistance -= 1024 * 1024 1568 if heapDistance < _PageSize { 1569 // Avoid setting the sweep ratio extremely high 1570 heapDistance = _PageSize 1571 } 1572 lock(&mheap_.lock) 1573 mheap_.sweepPagesPerByte = float64(pagesToSweep) / float64(heapDistance) 1574 mheap_.pagesSwept = 0 1575 mheap_.spanBytesAlloc = 0 1576 unlock(&mheap_.lock) 1577 1578 // Background sweep. 1579 lock(&sweep.lock) 1580 if sweep.parked { 1581 sweep.parked = false 1582 ready(sweep.g, 0) 1583 } 1584 unlock(&sweep.lock) 1585 mProf_GC() 1586 } 1587 1588 func gcCopySpans() { 1589 // Cache runtime.mheap_.allspans in work.spans to avoid conflicts with 1590 // resizing/freeing allspans. 1591 // New spans can be created while GC progresses, but they are not garbage for 1592 // this round: 1593 // - new stack spans can be created even while the world is stopped. 1594 // - new malloc spans can be created during the concurrent sweep 1595 // Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap. 1596 lock(&mheap_.lock) 1597 // Free the old cached mark array if necessary. 1598 if work.spans != nil && &work.spans[0] != &h_allspans[0] { 1599 sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys) 1600 } 1601 // Cache the current array for sweeping. 1602 mheap_.gcspans = mheap_.allspans 1603 work.spans = h_allspans 1604 unlock(&mheap_.lock) 1605 } 1606 1607 // gcResetGState resets the GC state of all G's and returns the length 1608 // of allgs. 1609 func gcResetGState() (numgs int) { 1610 // This may be called during a concurrent phase, so make sure 1611 // allgs doesn't change. 1612 lock(&allglock) 1613 for _, gp := range allgs { 1614 gp.gcscandone = false // set to true in gcphasework 1615 gp.gcscanvalid = false // stack has not been scanned 1616 gp.gcalloc = 0 1617 gp.gcscanwork = 0 1618 } 1619 numgs = len(allgs) 1620 unlock(&allglock) 1621 return 1622 } 1623 1624 // gcResetMarkState resets state prior to marking (concurrent or STW). 1625 // 1626 // TODO(austin): Merge with gcResetGState. See issue #11427. 1627 func gcResetMarkState() { 1628 work.bytesMarked = 0 1629 work.initialHeapLive = memstats.heap_live 1630 } 1631 1632 // Hooks for other packages 1633 1634 var poolcleanup func() 1635 1636 //go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup 1637 func sync_runtime_registerPoolCleanup(f func()) { 1638 poolcleanup = f 1639 } 1640 1641 func clearpools() { 1642 // clear sync.Pools 1643 if poolcleanup != nil { 1644 poolcleanup() 1645 } 1646 1647 // Clear central sudog cache. 1648 // Leave per-P caches alone, they have strictly bounded size. 1649 // Disconnect cached list before dropping it on the floor, 1650 // so that a dangling ref to one entry does not pin all of them. 1651 lock(&sched.sudoglock) 1652 var sg, sgnext *sudog 1653 for sg = sched.sudogcache; sg != nil; sg = sgnext { 1654 sgnext = sg.next 1655 sg.next = nil 1656 } 1657 sched.sudogcache = nil 1658 unlock(&sched.sudoglock) 1659 1660 // Clear central defer pools. 1661 // Leave per-P pools alone, they have strictly bounded size. 1662 lock(&sched.deferlock) 1663 for i := range sched.deferpool { 1664 // disconnect cached list before dropping it on the floor, 1665 // so that a dangling ref to one entry does not pin all of them. 1666 var d, dlink *_defer 1667 for d = sched.deferpool[i]; d != nil; d = dlink { 1668 dlink = d.link 1669 d.link = nil 1670 } 1671 sched.deferpool[i] = nil 1672 } 1673 unlock(&sched.deferlock) 1674 1675 for _, p := range &allp { 1676 if p == nil { 1677 break 1678 } 1679 // clear tinyalloc pool 1680 if c := p.mcache; c != nil { 1681 c.tiny = nil 1682 c.tinyoffset = 0 1683 } 1684 } 1685 } 1686 1687 // Timing 1688 1689 //go:nowritebarrier 1690 func gchelper() { 1691 _g_ := getg() 1692 _g_.m.traceback = 2 1693 gchelperstart() 1694 1695 if trace.enabled { 1696 traceGCScanStart() 1697 } 1698 1699 // parallel mark for over GC roots 1700 parfordo(work.markfor) 1701 if gcphase != _GCscan { 1702 var gcw gcWork 1703 gcDrain(&gcw, -1) // blocks in getfull 1704 gcw.dispose() 1705 } 1706 1707 if trace.enabled { 1708 traceGCScanDone() 1709 } 1710 1711 nproc := work.nproc // work.nproc can change right after we increment work.ndone 1712 if xadd(&work.ndone, +1) == nproc-1 { 1713 notewakeup(&work.alldone) 1714 } 1715 _g_.m.traceback = 0 1716 } 1717 1718 func gchelperstart() { 1719 _g_ := getg() 1720 1721 if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc { 1722 throw("gchelperstart: bad m->helpgc") 1723 } 1724 if _g_ != _g_.m.g0 { 1725 throw("gchelper not running on g0 stack") 1726 } 1727 } 1728 1729 // itoaDiv formats val/(10**dec) into buf. 1730 func itoaDiv(buf []byte, val uint64, dec int) []byte { 1731 i := len(buf) - 1 1732 idec := i - dec 1733 for val >= 10 || i >= idec { 1734 buf[i] = byte(val%10 + '0') 1735 i-- 1736 if i == idec { 1737 buf[i] = '.' 1738 i-- 1739 } 1740 val /= 10 1741 } 1742 buf[i] = byte(val + '0') 1743 return buf[i:] 1744 } 1745 1746 // fmtNSAsMS nicely formats ns nanoseconds as milliseconds. 1747 func fmtNSAsMS(buf []byte, ns uint64) []byte { 1748 if ns >= 10e6 { 1749 // Format as whole milliseconds. 1750 return itoaDiv(buf, ns/1e6, 0) 1751 } 1752 // Format two digits of precision, with at most three decimal places. 1753 x := ns / 1e3 1754 if x == 0 { 1755 buf[0] = '0' 1756 return buf[:1] 1757 } 1758 dec := 3 1759 for x >= 100 { 1760 x /= 10 1761 dec-- 1762 } 1763 return itoaDiv(buf, x, dec) 1764 }