github.com/filosottile/go@v0.0.0-20170906193555-dbed9972d994/src/runtime/mgc.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector (GC). 6 // 7 // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple 8 // GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is 9 // non-generational and non-compacting. Allocation is done using size segregated per P allocation 10 // areas to minimize fragmentation while eliminating locks in the common case. 11 // 12 // The algorithm decomposes into several steps. 13 // This is a high level description of the algorithm being used. For an overview of GC a good 14 // place to start is Richard Jones' gchandbook.org. 15 // 16 // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see 17 // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978. 18 // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 19 // 966-975. 20 // For journal quality proofs that these steps are complete, correct, and terminate see 21 // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world. 22 // Concurrency and Computation: Practice and Experience 15(3-5), 2003. 23 // 24 // 1. GC performs sweep termination. 25 // 26 // a. Stop the world. This causes all Ps to reach a GC safe-point. 27 // 28 // b. Sweep any unswept spans. There will only be unswept spans if 29 // this GC cycle was forced before the expected time. 30 // 31 // 2. GC performs the "mark 1" sub-phase. In this sub-phase, Ps are 32 // allowed to locally cache parts of the work queue. 33 // 34 // a. Prepare for the mark phase by setting gcphase to _GCmark 35 // (from _GCoff), enabling the write barrier, enabling mutator 36 // assists, and enqueueing root mark jobs. No objects may be 37 // scanned until all Ps have enabled the write barrier, which is 38 // accomplished using STW. 39 // 40 // b. Start the world. From this point, GC work is done by mark 41 // workers started by the scheduler and by assists performed as 42 // part of allocation. The write barrier shades both the 43 // overwritten pointer and the new pointer value for any pointer 44 // writes (see mbarrier.go for details). Newly allocated objects 45 // are immediately marked black. 46 // 47 // c. GC performs root marking jobs. This includes scanning all 48 // stacks, shading all globals, and shading any heap pointers in 49 // off-heap runtime data structures. Scanning a stack stops a 50 // goroutine, shades any pointers found on its stack, and then 51 // resumes the goroutine. 52 // 53 // d. GC drains the work queue of grey objects, scanning each grey 54 // object to black and shading all pointers found in the object 55 // (which in turn may add those pointers to the work queue). 56 // 57 // 3. Once the global work queue is empty (but local work queue caches 58 // may still contain work), GC performs the "mark 2" sub-phase. 59 // 60 // a. GC stops all workers, disables local work queue caches, 61 // flushes each P's local work queue cache to the global work queue 62 // cache, and reenables workers. 63 // 64 // b. GC again drains the work queue, as in 2d above. 65 // 66 // 4. Once the work queue is empty, GC performs mark termination. 67 // 68 // a. Stop the world. 69 // 70 // b. Set gcphase to _GCmarktermination, and disable workers and 71 // assists. 72 // 73 // c. Drain any remaining work from the work queue (typically there 74 // will be none). 75 // 76 // d. Perform other housekeeping like flushing mcaches. 77 // 78 // 5. GC performs the sweep phase. 79 // 80 // a. Prepare for the sweep phase by setting gcphase to _GCoff, 81 // setting up sweep state and disabling the write barrier. 82 // 83 // b. Start the world. From this point on, newly allocated objects 84 // are white, and allocating sweeps spans before use if necessary. 85 // 86 // c. GC does concurrent sweeping in the background and in response 87 // to allocation. See description below. 88 // 89 // 6. When sufficient allocation has taken place, replay the sequence 90 // starting with 1 above. See discussion of GC rate below. 91 92 // Concurrent sweep. 93 // 94 // The sweep phase proceeds concurrently with normal program execution. 95 // The heap is swept span-by-span both lazily (when a goroutine needs another span) 96 // and concurrently in a background goroutine (this helps programs that are not CPU bound). 97 // At the end of STW mark termination all spans are marked as "needs sweeping". 98 // 99 // The background sweeper goroutine simply sweeps spans one-by-one. 100 // 101 // To avoid requesting more OS memory while there are unswept spans, when a 102 // goroutine needs another span, it first attempts to reclaim that much memory 103 // by sweeping. When a goroutine needs to allocate a new small-object span, it 104 // sweeps small-object spans for the same object size until it frees at least 105 // one object. When a goroutine needs to allocate large-object span from heap, 106 // it sweeps spans until it frees at least that many pages into heap. There is 107 // one case where this may not suffice: if a goroutine sweeps and frees two 108 // nonadjacent one-page spans to the heap, it will allocate a new two-page 109 // span, but there can still be other one-page unswept spans which could be 110 // combined into a two-page span. 111 // 112 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt 113 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache, 114 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it. 115 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that 116 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish). 117 // The finalizer goroutine is kicked off only when all spans are swept. 118 // When the next GC starts, it sweeps all not-yet-swept spans (if any). 119 120 // GC rate. 121 // Next GC is after we've allocated an extra amount of memory proportional to 122 // the amount already in use. The proportion is controlled by GOGC environment variable 123 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M 124 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear 125 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant 126 // (and also the amount of extra memory used). 127 128 // Oblets 129 // 130 // In order to prevent long pauses while scanning large objects and to 131 // improve parallelism, the garbage collector breaks up scan jobs for 132 // objects larger than maxObletBytes into "oblets" of at most 133 // maxObletBytes. When scanning encounters the beginning of a large 134 // object, it scans only the first oblet and enqueues the remaining 135 // oblets as new scan jobs. 136 137 package runtime 138 139 import ( 140 "runtime/internal/atomic" 141 "runtime/internal/sys" 142 "unsafe" 143 ) 144 145 const ( 146 _DebugGC = 0 147 _ConcurrentSweep = true 148 _FinBlockSize = 4 * 1024 149 150 // sweepMinHeapDistance is a lower bound on the heap distance 151 // (in bytes) reserved for concurrent sweeping between GC 152 // cycles. This will be scaled by gcpercent/100. 153 sweepMinHeapDistance = 1024 * 1024 154 ) 155 156 // heapminimum is the minimum heap size at which to trigger GC. 157 // For small heaps, this overrides the usual GOGC*live set rule. 158 // 159 // When there is a very small live set but a lot of allocation, simply 160 // collecting when the heap reaches GOGC*live results in many GC 161 // cycles and high total per-GC overhead. This minimum amortizes this 162 // per-GC overhead while keeping the heap reasonably small. 163 // 164 // During initialization this is set to 4MB*GOGC/100. In the case of 165 // GOGC==0, this will set heapminimum to 0, resulting in constant 166 // collection even when the heap size is small, which is useful for 167 // debugging. 168 var heapminimum uint64 = defaultHeapMinimum 169 170 // defaultHeapMinimum is the value of heapminimum for GOGC==100. 171 const defaultHeapMinimum = 4 << 20 172 173 // Initialized from $GOGC. GOGC=off means no GC. 174 var gcpercent int32 175 176 func gcinit() { 177 if unsafe.Sizeof(workbuf{}) != _WorkbufSize { 178 throw("size of Workbuf is suboptimal") 179 } 180 181 // No sweep on the first cycle. 182 mheap_.sweepdone = 1 183 184 // Set a reasonable initial GC trigger. 185 memstats.triggerRatio = 7 / 8.0 186 187 // Fake a heap_marked value so it looks like a trigger at 188 // heapminimum is the appropriate growth from heap_marked. 189 // This will go into computing the initial GC goal. 190 memstats.heap_marked = uint64(float64(heapminimum) / (1 + memstats.triggerRatio)) 191 192 // Set gcpercent from the environment. This will also compute 193 // and set the GC trigger and goal. 194 _ = setGCPercent(readgogc()) 195 196 work.startSema = 1 197 work.markDoneSema = 1 198 } 199 200 func readgogc() int32 { 201 p := gogetenv("GOGC") 202 if p == "off" { 203 return -1 204 } 205 if n, ok := atoi32(p); ok { 206 return n 207 } 208 return 100 209 } 210 211 // gcenable is called after the bulk of the runtime initialization, 212 // just before we're about to start letting user code run. 213 // It kicks off the background sweeper goroutine and enables GC. 214 func gcenable() { 215 c := make(chan int, 1) 216 go bgsweep(c) 217 <-c 218 memstats.enablegc = true // now that runtime is initialized, GC is okay 219 } 220 221 //go:linkname setGCPercent runtime/debug.setGCPercent 222 func setGCPercent(in int32) (out int32) { 223 lock(&mheap_.lock) 224 out = gcpercent 225 if in < 0 { 226 in = -1 227 } 228 gcpercent = in 229 heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100 230 // Update pacing in response to gcpercent change. 231 gcSetTriggerRatio(memstats.triggerRatio) 232 unlock(&mheap_.lock) 233 return out 234 } 235 236 // Garbage collector phase. 237 // Indicates to write barrier and synchronization task to perform. 238 var gcphase uint32 239 240 // The compiler knows about this variable. 241 // If you change it, you must change builtin/runtime.go, too. 242 // If you change the first four bytes, you must also change the write 243 // barrier insertion code. 244 var writeBarrier struct { 245 enabled bool // compiler emits a check of this before calling write barrier 246 pad [3]byte // compiler uses 32-bit load for "enabled" field 247 needed bool // whether we need a write barrier for current GC phase 248 cgo bool // whether we need a write barrier for a cgo check 249 alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load 250 } 251 252 // gcBlackenEnabled is 1 if mutator assists and background mark 253 // workers are allowed to blacken objects. This must only be set when 254 // gcphase == _GCmark. 255 var gcBlackenEnabled uint32 256 257 // gcBlackenPromptly indicates that optimizations that may 258 // hide work from the global work queue should be disabled. 259 // 260 // If gcBlackenPromptly is true, per-P gcWork caches should 261 // be flushed immediately and new objects should be allocated black. 262 // 263 // There is a tension between allocating objects white and 264 // allocating them black. If white and the objects die before being 265 // marked they can be collected during this GC cycle. On the other 266 // hand allocating them black will reduce _GCmarktermination latency 267 // since more work is done in the mark phase. This tension is resolved 268 // by allocating white until the mark phase is approaching its end and 269 // then allocating black for the remainder of the mark phase. 270 var gcBlackenPromptly bool 271 272 const ( 273 _GCoff = iota // GC not running; sweeping in background, write barrier disabled 274 _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED 275 _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED 276 ) 277 278 //go:nosplit 279 func setGCPhase(x uint32) { 280 atomic.Store(&gcphase, x) 281 writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination 282 writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo 283 } 284 285 // gcMarkWorkerMode represents the mode that a concurrent mark worker 286 // should operate in. 287 // 288 // Concurrent marking happens through four different mechanisms. One 289 // is mutator assists, which happen in response to allocations and are 290 // not scheduled. The other three are variations in the per-P mark 291 // workers and are distinguished by gcMarkWorkerMode. 292 type gcMarkWorkerMode int 293 294 const ( 295 // gcMarkWorkerDedicatedMode indicates that the P of a mark 296 // worker is dedicated to running that mark worker. The mark 297 // worker should run without preemption. 298 gcMarkWorkerDedicatedMode gcMarkWorkerMode = iota 299 300 // gcMarkWorkerFractionalMode indicates that a P is currently 301 // running the "fractional" mark worker. The fractional worker 302 // is necessary when GOMAXPROCS*gcGoalUtilization is not an 303 // integer. The fractional worker should run until it is 304 // preempted and will be scheduled to pick up the fractional 305 // part of GOMAXPROCS*gcGoalUtilization. 306 gcMarkWorkerFractionalMode 307 308 // gcMarkWorkerIdleMode indicates that a P is running the mark 309 // worker because it has nothing else to do. The idle worker 310 // should run until it is preempted and account its time 311 // against gcController.idleMarkTime. 312 gcMarkWorkerIdleMode 313 ) 314 315 // gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes 316 // to use in execution traces. 317 var gcMarkWorkerModeStrings = [...]string{ 318 "GC (dedicated)", 319 "GC (fractional)", 320 "GC (idle)", 321 } 322 323 // gcController implements the GC pacing controller that determines 324 // when to trigger concurrent garbage collection and how much marking 325 // work to do in mutator assists and background marking. 326 // 327 // It uses a feedback control algorithm to adjust the memstats.gc_trigger 328 // trigger based on the heap growth and GC CPU utilization each cycle. 329 // This algorithm optimizes for heap growth to match GOGC and for CPU 330 // utilization between assist and background marking to be 25% of 331 // GOMAXPROCS. The high-level design of this algorithm is documented 332 // at https://golang.org/s/go15gcpacing. 333 // 334 // All fields of gcController are used only during a single mark 335 // cycle. 336 var gcController gcControllerState 337 338 type gcControllerState struct { 339 // scanWork is the total scan work performed this cycle. This 340 // is updated atomically during the cycle. Updates occur in 341 // bounded batches, since it is both written and read 342 // throughout the cycle. At the end of the cycle, this is how 343 // much of the retained heap is scannable. 344 // 345 // Currently this is the bytes of heap scanned. For most uses, 346 // this is an opaque unit of work, but for estimation the 347 // definition is important. 348 scanWork int64 349 350 // bgScanCredit is the scan work credit accumulated by the 351 // concurrent background scan. This credit is accumulated by 352 // the background scan and stolen by mutator assists. This is 353 // updated atomically. Updates occur in bounded batches, since 354 // it is both written and read throughout the cycle. 355 bgScanCredit int64 356 357 // assistTime is the nanoseconds spent in mutator assists 358 // during this cycle. This is updated atomically. Updates 359 // occur in bounded batches, since it is both written and read 360 // throughout the cycle. 361 assistTime int64 362 363 // dedicatedMarkTime is the nanoseconds spent in dedicated 364 // mark workers during this cycle. This is updated atomically 365 // at the end of the concurrent mark phase. 366 dedicatedMarkTime int64 367 368 // fractionalMarkTime is the nanoseconds spent in the 369 // fractional mark worker during this cycle. This is updated 370 // atomically throughout the cycle and will be up-to-date if 371 // the fractional mark worker is not currently running. 372 fractionalMarkTime int64 373 374 // idleMarkTime is the nanoseconds spent in idle marking 375 // during this cycle. This is updated atomically throughout 376 // the cycle. 377 idleMarkTime int64 378 379 // markStartTime is the absolute start time in nanoseconds 380 // that assists and background mark workers started. 381 markStartTime int64 382 383 // dedicatedMarkWorkersNeeded is the number of dedicated mark 384 // workers that need to be started. This is computed at the 385 // beginning of each cycle and decremented atomically as 386 // dedicated mark workers get started. 387 dedicatedMarkWorkersNeeded int64 388 389 // assistWorkPerByte is the ratio of scan work to allocated 390 // bytes that should be performed by mutator assists. This is 391 // computed at the beginning of each cycle and updated every 392 // time heap_scan is updated. 393 assistWorkPerByte float64 394 395 // assistBytesPerWork is 1/assistWorkPerByte. 396 assistBytesPerWork float64 397 398 // fractionalUtilizationGoal is the fraction of wall clock 399 // time that should be spent in the fractional mark worker. 400 // For example, if the overall mark utilization goal is 25% 401 // and GOMAXPROCS is 6, one P will be a dedicated mark worker 402 // and this will be set to 0.5 so that 50% of the time some P 403 // is in a fractional mark worker. This is computed at the 404 // beginning of each cycle. 405 fractionalUtilizationGoal float64 406 407 _ [sys.CacheLineSize]byte 408 409 // fractionalMarkWorkersNeeded is the number of fractional 410 // mark workers that need to be started. This is either 0 or 411 // 1. This is potentially updated atomically at every 412 // scheduling point (hence it gets its own cache line). 413 fractionalMarkWorkersNeeded int64 414 415 _ [sys.CacheLineSize]byte 416 } 417 418 // startCycle resets the GC controller's state and computes estimates 419 // for a new GC cycle. The caller must hold worldsema. 420 func (c *gcControllerState) startCycle() { 421 c.scanWork = 0 422 c.bgScanCredit = 0 423 c.assistTime = 0 424 c.dedicatedMarkTime = 0 425 c.fractionalMarkTime = 0 426 c.idleMarkTime = 0 427 428 // If this is the first GC cycle or we're operating on a very 429 // small heap, fake heap_marked so it looks like gc_trigger is 430 // the appropriate growth from heap_marked, even though the 431 // real heap_marked may not have a meaningful value (on the 432 // first cycle) or may be much smaller (resulting in a large 433 // error response). 434 if memstats.gc_trigger <= heapminimum { 435 memstats.heap_marked = uint64(float64(memstats.gc_trigger) / (1 + memstats.triggerRatio)) 436 } 437 438 // Re-compute the heap goal for this cycle in case something 439 // changed. This is the same calculation we use elsewhere. 440 memstats.next_gc = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100 441 if gcpercent < 0 { 442 memstats.next_gc = ^uint64(0) 443 } 444 445 // Ensure that the heap goal is at least a little larger than 446 // the current live heap size. This may not be the case if GC 447 // start is delayed or if the allocation that pushed heap_live 448 // over gc_trigger is large or if the trigger is really close to 449 // GOGC. Assist is proportional to this distance, so enforce a 450 // minimum distance, even if it means going over the GOGC goal 451 // by a tiny bit. 452 if memstats.next_gc < memstats.heap_live+1024*1024 { 453 memstats.next_gc = memstats.heap_live + 1024*1024 454 } 455 456 // Compute the total mark utilization goal and divide it among 457 // dedicated and fractional workers. 458 totalUtilizationGoal := float64(gomaxprocs) * gcGoalUtilization 459 c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal) 460 c.fractionalUtilizationGoal = totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded) 461 if c.fractionalUtilizationGoal > 0 { 462 c.fractionalMarkWorkersNeeded = 1 463 } else { 464 c.fractionalMarkWorkersNeeded = 0 465 } 466 467 // Clear per-P state 468 for _, p := range &allp { 469 if p == nil { 470 break 471 } 472 p.gcAssistTime = 0 473 } 474 475 // Compute initial values for controls that are updated 476 // throughout the cycle. 477 c.revise() 478 479 if debug.gcpacertrace > 0 { 480 print("pacer: assist ratio=", c.assistWorkPerByte, 481 " (scan ", memstats.heap_scan>>20, " MB in ", 482 work.initialHeapLive>>20, "->", 483 memstats.next_gc>>20, " MB)", 484 " workers=", c.dedicatedMarkWorkersNeeded, 485 "+", c.fractionalMarkWorkersNeeded, "\n") 486 } 487 } 488 489 // revise updates the assist ratio during the GC cycle to account for 490 // improved estimates. This should be called either under STW or 491 // whenever memstats.heap_scan, memstats.heap_live, or 492 // memstats.next_gc is updated (with mheap_.lock held). 493 // 494 // It should only be called when gcBlackenEnabled != 0 (because this 495 // is when assists are enabled and the necessary statistics are 496 // available). 497 func (c *gcControllerState) revise() { 498 // Compute the expected scan work remaining. 499 // 500 // Note that we currently count allocations during GC as both 501 // scannable heap (heap_scan) and scan work completed 502 // (scanWork), so this difference won't be changed by 503 // allocations during GC. 504 // 505 // This particular estimate is a strict upper bound on the 506 // possible remaining scan work for the current heap. 507 // You might consider dividing this by 2 (or by 508 // (100+GOGC)/100) to counter this over-estimation, but 509 // benchmarks show that this has almost no effect on mean 510 // mutator utilization, heap size, or assist time and it 511 // introduces the danger of under-estimating and letting the 512 // mutator outpace the garbage collector. 513 scanWorkExpected := int64(memstats.heap_scan) - c.scanWork 514 if scanWorkExpected < 1000 { 515 // We set a somewhat arbitrary lower bound on 516 // remaining scan work since if we aim a little high, 517 // we can miss by a little. 518 // 519 // We *do* need to enforce that this is at least 1, 520 // since marking is racy and double-scanning objects 521 // may legitimately make the expected scan work 522 // negative. 523 scanWorkExpected = 1000 524 } 525 526 // Compute the heap distance remaining. 527 heapDistance := int64(memstats.next_gc) - int64(atomic.Load64(&memstats.heap_live)) 528 if heapDistance <= 0 { 529 // This shouldn't happen, but if it does, avoid 530 // dividing by zero or setting the assist negative. 531 heapDistance = 1 532 } 533 534 // Compute the mutator assist ratio so by the time the mutator 535 // allocates the remaining heap bytes up to next_gc, it will 536 // have done (or stolen) the remaining amount of scan work. 537 c.assistWorkPerByte = float64(scanWorkExpected) / float64(heapDistance) 538 c.assistBytesPerWork = float64(heapDistance) / float64(scanWorkExpected) 539 } 540 541 // endCycle computes the trigger ratio for the next cycle. 542 func (c *gcControllerState) endCycle() float64 { 543 if work.userForced { 544 // Forced GC means this cycle didn't start at the 545 // trigger, so where it finished isn't good 546 // information about how to adjust the trigger. 547 // Just leave it where it is. 548 return memstats.triggerRatio 549 } 550 551 // Proportional response gain for the trigger controller. Must 552 // be in [0, 1]. Lower values smooth out transient effects but 553 // take longer to respond to phase changes. Higher values 554 // react to phase changes quickly, but are more affected by 555 // transient changes. Values near 1 may be unstable. 556 const triggerGain = 0.5 557 558 // Compute next cycle trigger ratio. First, this computes the 559 // "error" for this cycle; that is, how far off the trigger 560 // was from what it should have been, accounting for both heap 561 // growth and GC CPU utilization. We compute the actual heap 562 // growth during this cycle and scale that by how far off from 563 // the goal CPU utilization we were (to estimate the heap 564 // growth if we had the desired CPU utilization). The 565 // difference between this estimate and the GOGC-based goal 566 // heap growth is the error. 567 goalGrowthRatio := float64(gcpercent) / 100 568 actualGrowthRatio := float64(memstats.heap_live)/float64(memstats.heap_marked) - 1 569 assistDuration := nanotime() - c.markStartTime 570 571 // Assume background mark hit its utilization goal. 572 utilization := gcGoalUtilization 573 // Add assist utilization; avoid divide by zero. 574 if assistDuration > 0 { 575 utilization += float64(c.assistTime) / float64(assistDuration*int64(gomaxprocs)) 576 } 577 578 triggerError := goalGrowthRatio - memstats.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-memstats.triggerRatio) 579 580 // Finally, we adjust the trigger for next time by this error, 581 // damped by the proportional gain. 582 triggerRatio := memstats.triggerRatio + triggerGain*triggerError 583 584 if debug.gcpacertrace > 0 { 585 // Print controller state in terms of the design 586 // document. 587 H_m_prev := memstats.heap_marked 588 h_t := memstats.triggerRatio 589 H_T := memstats.gc_trigger 590 h_a := actualGrowthRatio 591 H_a := memstats.heap_live 592 h_g := goalGrowthRatio 593 H_g := int64(float64(H_m_prev) * (1 + h_g)) 594 u_a := utilization 595 u_g := gcGoalUtilization 596 W_a := c.scanWork 597 print("pacer: H_m_prev=", H_m_prev, 598 " h_t=", h_t, " H_T=", H_T, 599 " h_a=", h_a, " H_a=", H_a, 600 " h_g=", h_g, " H_g=", H_g, 601 " u_a=", u_a, " u_g=", u_g, 602 " W_a=", W_a, 603 " goalΔ=", goalGrowthRatio-h_t, 604 " actualΔ=", h_a-h_t, 605 " u_a/u_g=", u_a/u_g, 606 "\n") 607 } 608 609 return triggerRatio 610 } 611 612 // enlistWorker encourages another dedicated mark worker to start on 613 // another P if there are spare worker slots. It is used by putfull 614 // when more work is made available. 615 // 616 //go:nowritebarrier 617 func (c *gcControllerState) enlistWorker() { 618 // If there are idle Ps, wake one so it will run an idle worker. 619 // NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112. 620 // 621 // if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 622 // wakep() 623 // return 624 // } 625 626 // There are no idle Ps. If we need more dedicated workers, 627 // try to preempt a running P so it will switch to a worker. 628 if c.dedicatedMarkWorkersNeeded <= 0 { 629 return 630 } 631 // Pick a random other P to preempt. 632 if gomaxprocs <= 1 { 633 return 634 } 635 gp := getg() 636 if gp == nil || gp.m == nil || gp.m.p == 0 { 637 return 638 } 639 myID := gp.m.p.ptr().id 640 for tries := 0; tries < 5; tries++ { 641 id := int32(fastrandn(uint32(gomaxprocs - 1))) 642 if id >= myID { 643 id++ 644 } 645 p := allp[id] 646 if p.status != _Prunning { 647 continue 648 } 649 if preemptone(p) { 650 return 651 } 652 } 653 } 654 655 // findRunnableGCWorker returns the background mark worker for _p_ if it 656 // should be run. This must only be called when gcBlackenEnabled != 0. 657 func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g { 658 if gcBlackenEnabled == 0 { 659 throw("gcControllerState.findRunnable: blackening not enabled") 660 } 661 if _p_.gcBgMarkWorker == 0 { 662 // The mark worker associated with this P is blocked 663 // performing a mark transition. We can't run it 664 // because it may be on some other run or wait queue. 665 return nil 666 } 667 668 if !gcMarkWorkAvailable(_p_) { 669 // No work to be done right now. This can happen at 670 // the end of the mark phase when there are still 671 // assists tapering off. Don't bother running a worker 672 // now because it'll just return immediately. 673 return nil 674 } 675 676 decIfPositive := func(ptr *int64) bool { 677 if *ptr > 0 { 678 if atomic.Xaddint64(ptr, -1) >= 0 { 679 return true 680 } 681 // We lost a race 682 atomic.Xaddint64(ptr, +1) 683 } 684 return false 685 } 686 687 if decIfPositive(&c.dedicatedMarkWorkersNeeded) { 688 // This P is now dedicated to marking until the end of 689 // the concurrent mark phase. 690 _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode 691 } else { 692 if !decIfPositive(&c.fractionalMarkWorkersNeeded) { 693 // No more workers are need right now. 694 return nil 695 } 696 697 // This P has picked the token for the fractional worker. 698 // Is the GC currently under or at the utilization goal? 699 // If so, do more work. 700 // 701 // We used to check whether doing one time slice of work 702 // would remain under the utilization goal, but that has the 703 // effect of delaying work until the mutator has run for 704 // enough time slices to pay for the work. During those time 705 // slices, write barriers are enabled, so the mutator is running slower. 706 // Now instead we do the work whenever we're under or at the 707 // utilization work and pay for it by letting the mutator run later. 708 // This doesn't change the overall utilization averages, but it 709 // front loads the GC work so that the GC finishes earlier and 710 // write barriers can be turned off sooner, effectively giving 711 // the mutator a faster machine. 712 // 713 // The old, slower behavior can be restored by setting 714 // gcForcePreemptNS = forcePreemptNS. 715 const gcForcePreemptNS = 0 716 717 // TODO(austin): We could fast path this and basically 718 // eliminate contention on c.fractionalMarkWorkersNeeded by 719 // precomputing the minimum time at which it's worth 720 // next scheduling the fractional worker. Then Ps 721 // don't have to fight in the window where we've 722 // passed that deadline and no one has started the 723 // worker yet. 724 // 725 // TODO(austin): Shorter preemption interval for mark 726 // worker to improve fairness and give this 727 // finer-grained control over schedule? 728 now := nanotime() - gcController.markStartTime 729 then := now + gcForcePreemptNS 730 timeUsed := c.fractionalMarkTime + gcForcePreemptNS 731 if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal { 732 // Nope, we'd overshoot the utilization goal 733 atomic.Xaddint64(&c.fractionalMarkWorkersNeeded, +1) 734 return nil 735 } 736 _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode 737 } 738 739 // Run the background mark worker 740 gp := _p_.gcBgMarkWorker.ptr() 741 casgstatus(gp, _Gwaiting, _Grunnable) 742 if trace.enabled { 743 traceGoUnpark(gp, 0) 744 } 745 return gp 746 } 747 748 // gcSetTriggerRatio sets the trigger ratio and updates everything 749 // derived from it: the absolute trigger, the heap goal, mark pacing, 750 // and sweep pacing. 751 // 752 // This can be called any time. If GC is the in the middle of a 753 // concurrent phase, it will adjust the pacing of that phase. 754 // 755 // This depends on gcpercent, memstats.heap_marked, and 756 // memstats.heap_live. These must be up to date. 757 // 758 // mheap_.lock must be held or the world must be stopped. 759 func gcSetTriggerRatio(triggerRatio float64) { 760 // Set the trigger ratio, capped to reasonable bounds. 761 if triggerRatio < 0 { 762 // This can happen if the mutator is allocating very 763 // quickly or the GC is scanning very slowly. 764 triggerRatio = 0 765 } else if gcpercent >= 0 { 766 // Ensure there's always a little margin so that the 767 // mutator assist ratio isn't infinity. 768 maxTriggerRatio := 0.95 * float64(gcpercent) / 100 769 if triggerRatio > maxTriggerRatio { 770 triggerRatio = maxTriggerRatio 771 } 772 } 773 memstats.triggerRatio = triggerRatio 774 775 // Compute the absolute GC trigger from the trigger ratio. 776 // 777 // We trigger the next GC cycle when the allocated heap has 778 // grown by the trigger ratio over the marked heap size. 779 trigger := ^uint64(0) 780 if gcpercent >= 0 { 781 trigger = uint64(float64(memstats.heap_marked) * (1 + triggerRatio)) 782 // Don't trigger below the minimum heap size. 783 minTrigger := heapminimum 784 if !gosweepdone() { 785 // Concurrent sweep happens in the heap growth 786 // from heap_live to gc_trigger, so ensure 787 // that concurrent sweep has some heap growth 788 // in which to perform sweeping before we 789 // start the next GC cycle. 790 sweepMin := atomic.Load64(&memstats.heap_live) + sweepMinHeapDistance*uint64(gcpercent)/100 791 if sweepMin > minTrigger { 792 minTrigger = sweepMin 793 } 794 } 795 if trigger < minTrigger { 796 trigger = minTrigger 797 } 798 if int64(trigger) < 0 { 799 print("runtime: next_gc=", memstats.next_gc, " heap_marked=", memstats.heap_marked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "triggerRatio=", triggerRatio, " minTrigger=", minTrigger, "\n") 800 throw("gc_trigger underflow") 801 } 802 } 803 memstats.gc_trigger = trigger 804 805 // Compute the next GC goal, which is when the allocated heap 806 // has grown by GOGC/100 over the heap marked by the last 807 // cycle. 808 goal := ^uint64(0) 809 if gcpercent >= 0 { 810 goal = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100 811 if goal < trigger { 812 // The trigger ratio is always less than GOGC/100, but 813 // other bounds on the trigger may have raised it. 814 // Push up the goal, too. 815 goal = trigger 816 } 817 } 818 memstats.next_gc = goal 819 if trace.enabled { 820 traceNextGC() 821 } 822 823 // Update mark pacing. 824 if gcphase != _GCoff { 825 gcController.revise() 826 } 827 828 // Update sweep pacing. 829 if gosweepdone() { 830 mheap_.sweepPagesPerByte = 0 831 } else { 832 // Concurrent sweep needs to sweep all of the in-use 833 // pages by the time the allocated heap reaches the GC 834 // trigger. Compute the ratio of in-use pages to sweep 835 // per byte allocated, accounting for the fact that 836 // some might already be swept. 837 heapLiveBasis := atomic.Load64(&memstats.heap_live) 838 heapDistance := int64(trigger) - int64(heapLiveBasis) 839 // Add a little margin so rounding errors and 840 // concurrent sweep are less likely to leave pages 841 // unswept when GC starts. 842 heapDistance -= 1024 * 1024 843 if heapDistance < _PageSize { 844 // Avoid setting the sweep ratio extremely high 845 heapDistance = _PageSize 846 } 847 pagesSwept := atomic.Load64(&mheap_.pagesSwept) 848 sweepDistancePages := int64(mheap_.pagesInUse) - int64(pagesSwept) 849 if sweepDistancePages <= 0 { 850 mheap_.sweepPagesPerByte = 0 851 } else { 852 mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance) 853 mheap_.sweepHeapLiveBasis = heapLiveBasis 854 // Write pagesSweptBasis last, since this 855 // signals concurrent sweeps to recompute 856 // their debt. 857 atomic.Store64(&mheap_.pagesSweptBasis, pagesSwept) 858 } 859 } 860 } 861 862 // gcGoalUtilization is the goal CPU utilization for background 863 // marking as a fraction of GOMAXPROCS. 864 const gcGoalUtilization = 0.25 865 866 // gcCreditSlack is the amount of scan work credit that can can 867 // accumulate locally before updating gcController.scanWork and, 868 // optionally, gcController.bgScanCredit. Lower values give a more 869 // accurate assist ratio and make it more likely that assists will 870 // successfully steal background credit. Higher values reduce memory 871 // contention. 872 const gcCreditSlack = 2000 873 874 // gcAssistTimeSlack is the nanoseconds of mutator assist time that 875 // can accumulate on a P before updating gcController.assistTime. 876 const gcAssistTimeSlack = 5000 877 878 // gcOverAssistWork determines how many extra units of scan work a GC 879 // assist does when an assist happens. This amortizes the cost of an 880 // assist by pre-paying for this many bytes of future allocations. 881 const gcOverAssistWork = 64 << 10 882 883 var work struct { 884 full lfstack // lock-free list of full blocks workbuf 885 empty lfstack // lock-free list of empty blocks workbuf 886 pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait 887 888 wbufSpans struct { 889 lock mutex 890 // free is a list of spans dedicated to workbufs, but 891 // that don't currently contain any workbufs. 892 free mSpanList 893 // busy is a list of all spans containing workbufs on 894 // one of the workbuf lists. 895 busy mSpanList 896 } 897 898 // Restore 64-bit alignment on 32-bit. 899 _ uint32 900 901 // bytesMarked is the number of bytes marked this cycle. This 902 // includes bytes blackened in scanned objects, noscan objects 903 // that go straight to black, and permagrey objects scanned by 904 // markroot during the concurrent scan phase. This is updated 905 // atomically during the cycle. Updates may be batched 906 // arbitrarily, since the value is only read at the end of the 907 // cycle. 908 // 909 // Because of benign races during marking, this number may not 910 // be the exact number of marked bytes, but it should be very 911 // close. 912 // 913 // Put this field here because it needs 64-bit atomic access 914 // (and thus 8-byte alignment even on 32-bit architectures). 915 bytesMarked uint64 916 917 markrootNext uint32 // next markroot job 918 markrootJobs uint32 // number of markroot jobs 919 920 nproc uint32 921 tstart int64 922 nwait uint32 923 ndone uint32 924 alldone note 925 926 // helperDrainBlock indicates that GC mark termination helpers 927 // should pass gcDrainBlock to gcDrain to block in the 928 // getfull() barrier. Otherwise, they should pass gcDrainNoBlock. 929 // 930 // TODO: This is a temporary fallback to work around races 931 // that cause early mark termination. 932 helperDrainBlock bool 933 934 // Number of roots of various root types. Set by gcMarkRootPrepare. 935 nFlushCacheRoots int 936 nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int 937 938 // markrootDone indicates that roots have been marked at least 939 // once during the current GC cycle. This is checked by root 940 // marking operations that have to happen only during the 941 // first root marking pass, whether that's during the 942 // concurrent mark phase in current GC or mark termination in 943 // STW GC. 944 markrootDone bool 945 946 // Each type of GC state transition is protected by a lock. 947 // Since multiple threads can simultaneously detect the state 948 // transition condition, any thread that detects a transition 949 // condition must acquire the appropriate transition lock, 950 // re-check the transition condition and return if it no 951 // longer holds or perform the transition if it does. 952 // Likewise, any transition must invalidate the transition 953 // condition before releasing the lock. This ensures that each 954 // transition is performed by exactly one thread and threads 955 // that need the transition to happen block until it has 956 // happened. 957 // 958 // startSema protects the transition from "off" to mark or 959 // mark termination. 960 startSema uint32 961 // markDoneSema protects transitions from mark 1 to mark 2 and 962 // from mark 2 to mark termination. 963 markDoneSema uint32 964 965 bgMarkReady note // signal background mark worker has started 966 bgMarkDone uint32 // cas to 1 when at a background mark completion point 967 // Background mark completion signaling 968 969 // mode is the concurrency mode of the current GC cycle. 970 mode gcMode 971 972 // userForced indicates the current GC cycle was forced by an 973 // explicit user call. 974 userForced bool 975 976 // totaltime is the CPU nanoseconds spent in GC since the 977 // program started if debug.gctrace > 0. 978 totaltime int64 979 980 // initialHeapLive is the value of memstats.heap_live at the 981 // beginning of this GC cycle. 982 initialHeapLive uint64 983 984 // assistQueue is a queue of assists that are blocked because 985 // there was neither enough credit to steal or enough work to 986 // do. 987 assistQueue struct { 988 lock mutex 989 head, tail guintptr 990 } 991 992 // sweepWaiters is a list of blocked goroutines to wake when 993 // we transition from mark termination to sweep. 994 sweepWaiters struct { 995 lock mutex 996 head guintptr 997 } 998 999 // cycles is the number of completed GC cycles, where a GC 1000 // cycle is sweep termination, mark, mark termination, and 1001 // sweep. This differs from memstats.numgc, which is 1002 // incremented at mark termination. 1003 cycles uint32 1004 1005 // Timing/utilization stats for this cycle. 1006 stwprocs, maxprocs int32 1007 tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start 1008 1009 pauseNS int64 // total STW time this cycle 1010 pauseStart int64 // nanotime() of last STW 1011 1012 // debug.gctrace heap sizes for this cycle. 1013 heap0, heap1, heap2, heapGoal uint64 1014 } 1015 1016 // GC runs a garbage collection and blocks the caller until the 1017 // garbage collection is complete. It may also block the entire 1018 // program. 1019 func GC() { 1020 // We consider a cycle to be: sweep termination, mark, mark 1021 // termination, and sweep. This function shouldn't return 1022 // until a full cycle has been completed, from beginning to 1023 // end. Hence, we always want to finish up the current cycle 1024 // and start a new one. That means: 1025 // 1026 // 1. In sweep termination, mark, or mark termination of cycle 1027 // N, wait until mark termination N completes and transitions 1028 // to sweep N. 1029 // 1030 // 2. In sweep N, help with sweep N. 1031 // 1032 // At this point we can begin a full cycle N+1. 1033 // 1034 // 3. Trigger cycle N+1 by starting sweep termination N+1. 1035 // 1036 // 4. Wait for mark termination N+1 to complete. 1037 // 1038 // 5. Help with sweep N+1 until it's done. 1039 // 1040 // This all has to be written to deal with the fact that the 1041 // GC may move ahead on its own. For example, when we block 1042 // until mark termination N, we may wake up in cycle N+2. 1043 1044 gp := getg() 1045 1046 // Prevent the GC phase or cycle count from changing. 1047 lock(&work.sweepWaiters.lock) 1048 n := atomic.Load(&work.cycles) 1049 if gcphase == _GCmark { 1050 // Wait until sweep termination, mark, and mark 1051 // termination of cycle N complete. 1052 gp.schedlink = work.sweepWaiters.head 1053 work.sweepWaiters.head.set(gp) 1054 goparkunlock(&work.sweepWaiters.lock, "wait for GC cycle", traceEvGoBlock, 1) 1055 } else { 1056 // We're in sweep N already. 1057 unlock(&work.sweepWaiters.lock) 1058 } 1059 1060 // We're now in sweep N or later. Trigger GC cycle N+1, which 1061 // will first finish sweep N if necessary and then enter sweep 1062 // termination N+1. 1063 gcStart(gcBackgroundMode, gcTrigger{kind: gcTriggerCycle, n: n + 1}) 1064 1065 // Wait for mark termination N+1 to complete. 1066 lock(&work.sweepWaiters.lock) 1067 if gcphase == _GCmark && atomic.Load(&work.cycles) == n+1 { 1068 gp.schedlink = work.sweepWaiters.head 1069 work.sweepWaiters.head.set(gp) 1070 goparkunlock(&work.sweepWaiters.lock, "wait for GC cycle", traceEvGoBlock, 1) 1071 } else { 1072 unlock(&work.sweepWaiters.lock) 1073 } 1074 1075 // Finish sweep N+1 before returning. We do this both to 1076 // complete the cycle and because runtime.GC() is often used 1077 // as part of tests and benchmarks to get the system into a 1078 // relatively stable and isolated state. 1079 for atomic.Load(&work.cycles) == n+1 && gosweepone() != ^uintptr(0) { 1080 sweep.nbgsweep++ 1081 Gosched() 1082 } 1083 1084 // Callers may assume that the heap profile reflects the 1085 // just-completed cycle when this returns (historically this 1086 // happened because this was a STW GC), but right now the 1087 // profile still reflects mark termination N, not N+1. 1088 // 1089 // As soon as all of the sweep frees from cycle N+1 are done, 1090 // we can go ahead and publish the heap profile. 1091 // 1092 // First, wait for sweeping to finish. (We know there are no 1093 // more spans on the sweep queue, but we may be concurrently 1094 // sweeping spans, so we have to wait.) 1095 for atomic.Load(&work.cycles) == n+1 && atomic.Load(&mheap_.sweepers) != 0 { 1096 Gosched() 1097 } 1098 1099 // Now we're really done with sweeping, so we can publish the 1100 // stable heap profile. Only do this if we haven't already hit 1101 // another mark termination. 1102 mp := acquirem() 1103 cycle := atomic.Load(&work.cycles) 1104 if cycle == n+1 || (gcphase == _GCmark && cycle == n+2) { 1105 mProf_PostSweep() 1106 } 1107 releasem(mp) 1108 } 1109 1110 // gcMode indicates how concurrent a GC cycle should be. 1111 type gcMode int 1112 1113 const ( 1114 gcBackgroundMode gcMode = iota // concurrent GC and sweep 1115 gcForceMode // stop-the-world GC now, concurrent sweep 1116 gcForceBlockMode // stop-the-world GC now and STW sweep (forced by user) 1117 ) 1118 1119 // A gcTrigger is a predicate for starting a GC cycle. Specifically, 1120 // it is an exit condition for the _GCoff phase. 1121 type gcTrigger struct { 1122 kind gcTriggerKind 1123 now int64 // gcTriggerTime: current time 1124 n uint32 // gcTriggerCycle: cycle number to start 1125 } 1126 1127 type gcTriggerKind int 1128 1129 const ( 1130 // gcTriggerAlways indicates that a cycle should be started 1131 // unconditionally, even if GOGC is off or we're in a cycle 1132 // right now. This cannot be consolidated with other cycles. 1133 gcTriggerAlways gcTriggerKind = iota 1134 1135 // gcTriggerHeap indicates that a cycle should be started when 1136 // the heap size reaches the trigger heap size computed by the 1137 // controller. 1138 gcTriggerHeap 1139 1140 // gcTriggerTime indicates that a cycle should be started when 1141 // it's been more than forcegcperiod nanoseconds since the 1142 // previous GC cycle. 1143 gcTriggerTime 1144 1145 // gcTriggerCycle indicates that a cycle should be started if 1146 // we have not yet started cycle number gcTrigger.n (relative 1147 // to work.cycles). 1148 gcTriggerCycle 1149 ) 1150 1151 // test returns true if the trigger condition is satisfied, meaning 1152 // that the exit condition for the _GCoff phase has been met. The exit 1153 // condition should be tested when allocating. 1154 func (t gcTrigger) test() bool { 1155 if !memstats.enablegc || panicking != 0 { 1156 return false 1157 } 1158 if t.kind == gcTriggerAlways { 1159 return true 1160 } 1161 if gcphase != _GCoff || gcpercent < 0 { 1162 return false 1163 } 1164 switch t.kind { 1165 case gcTriggerHeap: 1166 // Non-atomic access to heap_live for performance. If 1167 // we are going to trigger on this, this thread just 1168 // atomically wrote heap_live anyway and we'll see our 1169 // own write. 1170 return memstats.heap_live >= memstats.gc_trigger 1171 case gcTriggerTime: 1172 lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime)) 1173 return lastgc != 0 && t.now-lastgc > forcegcperiod 1174 case gcTriggerCycle: 1175 // t.n > work.cycles, but accounting for wraparound. 1176 return int32(t.n-work.cycles) > 0 1177 } 1178 return true 1179 } 1180 1181 // gcStart transitions the GC from _GCoff to _GCmark (if 1182 // !mode.stwMark) or _GCmarktermination (if mode.stwMark) by 1183 // performing sweep termination and GC initialization. 1184 // 1185 // This may return without performing this transition in some cases, 1186 // such as when called on a system stack or with locks held. 1187 func gcStart(mode gcMode, trigger gcTrigger) { 1188 // Since this is called from malloc and malloc is called in 1189 // the guts of a number of libraries that might be holding 1190 // locks, don't attempt to start GC in non-preemptible or 1191 // potentially unstable situations. 1192 mp := acquirem() 1193 if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" { 1194 releasem(mp) 1195 return 1196 } 1197 releasem(mp) 1198 mp = nil 1199 1200 // Pick up the remaining unswept/not being swept spans concurrently 1201 // 1202 // This shouldn't happen if we're being invoked in background 1203 // mode since proportional sweep should have just finished 1204 // sweeping everything, but rounding errors, etc, may leave a 1205 // few spans unswept. In forced mode, this is necessary since 1206 // GC can be forced at any point in the sweeping cycle. 1207 // 1208 // We check the transition condition continuously here in case 1209 // this G gets delayed in to the next GC cycle. 1210 for trigger.test() && gosweepone() != ^uintptr(0) { 1211 sweep.nbgsweep++ 1212 } 1213 1214 // Perform GC initialization and the sweep termination 1215 // transition. 1216 semacquire(&work.startSema) 1217 // Re-check transition condition under transition lock. 1218 if !trigger.test() { 1219 semrelease(&work.startSema) 1220 return 1221 } 1222 1223 // For stats, check if this GC was forced by the user. 1224 work.userForced = trigger.kind == gcTriggerAlways || trigger.kind == gcTriggerCycle 1225 1226 // In gcstoptheworld debug mode, upgrade the mode accordingly. 1227 // We do this after re-checking the transition condition so 1228 // that multiple goroutines that detect the heap trigger don't 1229 // start multiple STW GCs. 1230 if mode == gcBackgroundMode { 1231 if debug.gcstoptheworld == 1 { 1232 mode = gcForceMode 1233 } else if debug.gcstoptheworld == 2 { 1234 mode = gcForceBlockMode 1235 } 1236 } 1237 1238 // Ok, we're doing it! Stop everybody else 1239 semacquire(&worldsema) 1240 1241 if trace.enabled { 1242 traceGCStart() 1243 } 1244 1245 if mode == gcBackgroundMode { 1246 gcBgMarkStartWorkers() 1247 } 1248 1249 gcResetMarkState() 1250 1251 work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs 1252 work.heap0 = atomic.Load64(&memstats.heap_live) 1253 work.pauseNS = 0 1254 work.mode = mode 1255 1256 now := nanotime() 1257 work.tSweepTerm = now 1258 work.pauseStart = now 1259 if trace.enabled { 1260 traceGCSTWStart(1) 1261 } 1262 systemstack(stopTheWorldWithSema) 1263 // Finish sweep before we start concurrent scan. 1264 systemstack(func() { 1265 finishsweep_m() 1266 }) 1267 // clearpools before we start the GC. If we wait they memory will not be 1268 // reclaimed until the next GC cycle. 1269 clearpools() 1270 1271 work.cycles++ 1272 if mode == gcBackgroundMode { // Do as much work concurrently as possible 1273 gcController.startCycle() 1274 work.heapGoal = memstats.next_gc 1275 1276 // Enter concurrent mark phase and enable 1277 // write barriers. 1278 // 1279 // Because the world is stopped, all Ps will 1280 // observe that write barriers are enabled by 1281 // the time we start the world and begin 1282 // scanning. 1283 // 1284 // Write barriers must be enabled before assists are 1285 // enabled because they must be enabled before 1286 // any non-leaf heap objects are marked. Since 1287 // allocations are blocked until assists can 1288 // happen, we want enable assists as early as 1289 // possible. 1290 setGCPhase(_GCmark) 1291 1292 gcBgMarkPrepare() // Must happen before assist enable. 1293 gcMarkRootPrepare() 1294 1295 // Mark all active tinyalloc blocks. Since we're 1296 // allocating from these, they need to be black like 1297 // other allocations. The alternative is to blacken 1298 // the tiny block on every allocation from it, which 1299 // would slow down the tiny allocator. 1300 gcMarkTinyAllocs() 1301 1302 // At this point all Ps have enabled the write 1303 // barrier, thus maintaining the no white to 1304 // black invariant. Enable mutator assists to 1305 // put back-pressure on fast allocating 1306 // mutators. 1307 atomic.Store(&gcBlackenEnabled, 1) 1308 1309 // Assists and workers can start the moment we start 1310 // the world. 1311 gcController.markStartTime = now 1312 1313 // Concurrent mark. 1314 systemstack(func() { 1315 now = startTheWorldWithSema(trace.enabled) 1316 }) 1317 work.pauseNS += now - work.pauseStart 1318 work.tMark = now 1319 } else { 1320 if trace.enabled { 1321 // Switch to mark termination STW. 1322 traceGCSTWDone() 1323 traceGCSTWStart(0) 1324 } 1325 t := nanotime() 1326 work.tMark, work.tMarkTerm = t, t 1327 work.heapGoal = work.heap0 1328 1329 // Perform mark termination. This will restart the world. 1330 gcMarkTermination(memstats.triggerRatio) 1331 } 1332 1333 semrelease(&work.startSema) 1334 } 1335 1336 // gcMarkDone transitions the GC from mark 1 to mark 2 and from mark 2 1337 // to mark termination. 1338 // 1339 // This should be called when all mark work has been drained. In mark 1340 // 1, this includes all root marking jobs, global work buffers, and 1341 // active work buffers in assists and background workers; however, 1342 // work may still be cached in per-P work buffers. In mark 2, per-P 1343 // caches are disabled. 1344 // 1345 // The calling context must be preemptible. 1346 // 1347 // Note that it is explicitly okay to have write barriers in this 1348 // function because completion of concurrent mark is best-effort 1349 // anyway. Any work created by write barriers here will be cleaned up 1350 // by mark termination. 1351 func gcMarkDone() { 1352 top: 1353 semacquire(&work.markDoneSema) 1354 1355 // Re-check transition condition under transition lock. 1356 if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) { 1357 semrelease(&work.markDoneSema) 1358 return 1359 } 1360 1361 // Disallow starting new workers so that any remaining workers 1362 // in the current mark phase will drain out. 1363 // 1364 // TODO(austin): Should dedicated workers keep an eye on this 1365 // and exit gcDrain promptly? 1366 atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff) 1367 atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff) 1368 1369 if !gcBlackenPromptly { 1370 // Transition from mark 1 to mark 2. 1371 // 1372 // The global work list is empty, but there can still be work 1373 // sitting in the per-P work caches. 1374 // Flush and disable work caches. 1375 1376 // Disallow caching workbufs and indicate that we're in mark 2. 1377 gcBlackenPromptly = true 1378 1379 // Prevent completion of mark 2 until we've flushed 1380 // cached workbufs. 1381 atomic.Xadd(&work.nwait, -1) 1382 1383 // GC is set up for mark 2. Let Gs blocked on the 1384 // transition lock go while we flush caches. 1385 semrelease(&work.markDoneSema) 1386 1387 systemstack(func() { 1388 // Flush all currently cached workbufs and 1389 // ensure all Ps see gcBlackenPromptly. This 1390 // also blocks until any remaining mark 1 1391 // workers have exited their loop so we can 1392 // start new mark 2 workers. 1393 forEachP(func(_p_ *p) { 1394 _p_.gcw.dispose() 1395 }) 1396 }) 1397 1398 // Check that roots are marked. We should be able to 1399 // do this before the forEachP, but based on issue 1400 // #16083 there may be a (harmless) race where we can 1401 // enter mark 2 while some workers are still scanning 1402 // stacks. The forEachP ensures these scans are done. 1403 // 1404 // TODO(austin): Figure out the race and fix this 1405 // properly. 1406 gcMarkRootCheck() 1407 1408 // Now we can start up mark 2 workers. 1409 atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff) 1410 atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff) 1411 1412 incnwait := atomic.Xadd(&work.nwait, +1) 1413 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 1414 // This loop will make progress because 1415 // gcBlackenPromptly is now true, so it won't 1416 // take this same "if" branch. 1417 goto top 1418 } 1419 } else { 1420 // Transition to mark termination. 1421 now := nanotime() 1422 work.tMarkTerm = now 1423 work.pauseStart = now 1424 getg().m.preemptoff = "gcing" 1425 if trace.enabled { 1426 traceGCSTWStart(0) 1427 } 1428 systemstack(stopTheWorldWithSema) 1429 // The gcphase is _GCmark, it will transition to _GCmarktermination 1430 // below. The important thing is that the wb remains active until 1431 // all marking is complete. This includes writes made by the GC. 1432 1433 // Record that one root marking pass has completed. 1434 work.markrootDone = true 1435 1436 // Disable assists and background workers. We must do 1437 // this before waking blocked assists. 1438 atomic.Store(&gcBlackenEnabled, 0) 1439 1440 // Wake all blocked assists. These will run when we 1441 // start the world again. 1442 gcWakeAllAssists() 1443 1444 // Likewise, release the transition lock. Blocked 1445 // workers and assists will run when we start the 1446 // world again. 1447 semrelease(&work.markDoneSema) 1448 1449 // endCycle depends on all gcWork cache stats being 1450 // flushed. This is ensured by mark 2. 1451 nextTriggerRatio := gcController.endCycle() 1452 1453 // Perform mark termination. This will restart the world. 1454 gcMarkTermination(nextTriggerRatio) 1455 } 1456 } 1457 1458 func gcMarkTermination(nextTriggerRatio float64) { 1459 // World is stopped. 1460 // Start marktermination which includes enabling the write barrier. 1461 atomic.Store(&gcBlackenEnabled, 0) 1462 gcBlackenPromptly = false 1463 setGCPhase(_GCmarktermination) 1464 1465 work.heap1 = memstats.heap_live 1466 startTime := nanotime() 1467 1468 mp := acquirem() 1469 mp.preemptoff = "gcing" 1470 _g_ := getg() 1471 _g_.m.traceback = 2 1472 gp := _g_.m.curg 1473 casgstatus(gp, _Grunning, _Gwaiting) 1474 gp.waitreason = "garbage collection" 1475 1476 // Run gc on the g0 stack. We do this so that the g stack 1477 // we're currently running on will no longer change. Cuts 1478 // the root set down a bit (g0 stacks are not scanned, and 1479 // we don't need to scan gc's internal state). We also 1480 // need to switch to g0 so we can shrink the stack. 1481 systemstack(func() { 1482 gcMark(startTime) 1483 // Must return immediately. 1484 // The outer function's stack may have moved 1485 // during gcMark (it shrinks stacks, including the 1486 // outer function's stack), so we must not refer 1487 // to any of its variables. Return back to the 1488 // non-system stack to pick up the new addresses 1489 // before continuing. 1490 }) 1491 1492 systemstack(func() { 1493 work.heap2 = work.bytesMarked 1494 if debug.gccheckmark > 0 { 1495 // Run a full stop-the-world mark using checkmark bits, 1496 // to check that we didn't forget to mark anything during 1497 // the concurrent mark process. 1498 gcResetMarkState() 1499 initCheckmarks() 1500 gcMark(startTime) 1501 clearCheckmarks() 1502 } 1503 1504 // marking is complete so we can turn the write barrier off 1505 setGCPhase(_GCoff) 1506 gcSweep(work.mode) 1507 1508 if debug.gctrace > 1 { 1509 startTime = nanotime() 1510 // The g stacks have been scanned so 1511 // they have gcscanvalid==true and gcworkdone==true. 1512 // Reset these so that all stacks will be rescanned. 1513 gcResetMarkState() 1514 finishsweep_m() 1515 1516 // Still in STW but gcphase is _GCoff, reset to _GCmarktermination 1517 // At this point all objects will be found during the gcMark which 1518 // does a complete STW mark and object scan. 1519 setGCPhase(_GCmarktermination) 1520 gcMark(startTime) 1521 setGCPhase(_GCoff) // marking is done, turn off wb. 1522 gcSweep(work.mode) 1523 } 1524 }) 1525 1526 _g_.m.traceback = 0 1527 casgstatus(gp, _Gwaiting, _Grunning) 1528 1529 if trace.enabled { 1530 traceGCDone() 1531 } 1532 1533 // all done 1534 mp.preemptoff = "" 1535 1536 if gcphase != _GCoff { 1537 throw("gc done but gcphase != _GCoff") 1538 } 1539 1540 // Update GC trigger and pacing for the next cycle. 1541 gcSetTriggerRatio(nextTriggerRatio) 1542 1543 // Update timing memstats 1544 now := nanotime() 1545 sec, nsec, _ := time_now() 1546 unixNow := sec*1e9 + int64(nsec) 1547 work.pauseNS += now - work.pauseStart 1548 work.tEnd = now 1549 atomic.Store64(&memstats.last_gc_unix, uint64(unixNow)) // must be Unix time to make sense to user 1550 atomic.Store64(&memstats.last_gc_nanotime, uint64(now)) // monotonic time for us 1551 memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS) 1552 memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow) 1553 memstats.pause_total_ns += uint64(work.pauseNS) 1554 1555 // Update work.totaltime. 1556 sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm) 1557 // We report idle marking time below, but omit it from the 1558 // overall utilization here since it's "free". 1559 markCpu := gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime 1560 markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm) 1561 cycleCpu := sweepTermCpu + markCpu + markTermCpu 1562 work.totaltime += cycleCpu 1563 1564 // Compute overall GC CPU utilization. 1565 totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs) 1566 memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu) 1567 1568 // Reset sweep state. 1569 sweep.nbgsweep = 0 1570 sweep.npausesweep = 0 1571 1572 if work.userForced { 1573 memstats.numforcedgc++ 1574 } 1575 1576 // Bump GC cycle count and wake goroutines waiting on sweep. 1577 lock(&work.sweepWaiters.lock) 1578 memstats.numgc++ 1579 injectglist(work.sweepWaiters.head.ptr()) 1580 work.sweepWaiters.head = 0 1581 unlock(&work.sweepWaiters.lock) 1582 1583 // Finish the current heap profiling cycle and start a new 1584 // heap profiling cycle. We do this before starting the world 1585 // so events don't leak into the wrong cycle. 1586 mProf_NextCycle() 1587 1588 systemstack(func() { startTheWorldWithSema(true) }) 1589 1590 // Flush the heap profile so we can start a new cycle next GC. 1591 // This is relatively expensive, so we don't do it with the 1592 // world stopped. 1593 mProf_Flush() 1594 1595 // Prepare workbufs for freeing by the sweeper. We do this 1596 // asynchronously because it can take non-trivial time. 1597 prepareFreeWorkbufs() 1598 1599 // Free stack spans. This must be done between GC cycles. 1600 systemstack(freeStackSpans) 1601 1602 // Print gctrace before dropping worldsema. As soon as we drop 1603 // worldsema another cycle could start and smash the stats 1604 // we're trying to print. 1605 if debug.gctrace > 0 { 1606 util := int(memstats.gc_cpu_fraction * 100) 1607 1608 var sbuf [24]byte 1609 printlock() 1610 print("gc ", memstats.numgc, 1611 " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ", 1612 util, "%: ") 1613 prev := work.tSweepTerm 1614 for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} { 1615 if i != 0 { 1616 print("+") 1617 } 1618 print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev)))) 1619 prev = ns 1620 } 1621 print(" ms clock, ") 1622 for i, ns := range []int64{sweepTermCpu, gcController.assistTime, gcController.dedicatedMarkTime + gcController.fractionalMarkTime, gcController.idleMarkTime, markTermCpu} { 1623 if i == 2 || i == 3 { 1624 // Separate mark time components with /. 1625 print("/") 1626 } else if i != 0 { 1627 print("+") 1628 } 1629 print(string(fmtNSAsMS(sbuf[:], uint64(ns)))) 1630 } 1631 print(" ms cpu, ", 1632 work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ", 1633 work.heapGoal>>20, " MB goal, ", 1634 work.maxprocs, " P") 1635 if work.userForced { 1636 print(" (forced)") 1637 } 1638 print("\n") 1639 printunlock() 1640 } 1641 1642 semrelease(&worldsema) 1643 // Careful: another GC cycle may start now. 1644 1645 releasem(mp) 1646 mp = nil 1647 1648 // now that gc is done, kick off finalizer thread if needed 1649 if !concurrentSweep { 1650 // give the queued finalizers, if any, a chance to run 1651 Gosched() 1652 } 1653 } 1654 1655 // gcBgMarkStartWorkers prepares background mark worker goroutines. 1656 // These goroutines will not run until the mark phase, but they must 1657 // be started while the work is not stopped and from a regular G 1658 // stack. The caller must hold worldsema. 1659 func gcBgMarkStartWorkers() { 1660 // Background marking is performed by per-P G's. Ensure that 1661 // each P has a background GC G. 1662 for _, p := range &allp { 1663 if p == nil || p.status == _Pdead { 1664 break 1665 } 1666 if p.gcBgMarkWorker == 0 { 1667 go gcBgMarkWorker(p) 1668 notetsleepg(&work.bgMarkReady, -1) 1669 noteclear(&work.bgMarkReady) 1670 } 1671 } 1672 } 1673 1674 // gcBgMarkPrepare sets up state for background marking. 1675 // Mutator assists must not yet be enabled. 1676 func gcBgMarkPrepare() { 1677 // Background marking will stop when the work queues are empty 1678 // and there are no more workers (note that, since this is 1679 // concurrent, this may be a transient state, but mark 1680 // termination will clean it up). Between background workers 1681 // and assists, we don't really know how many workers there 1682 // will be, so we pretend to have an arbitrarily large number 1683 // of workers, almost all of which are "waiting". While a 1684 // worker is working it decrements nwait. If nproc == nwait, 1685 // there are no workers. 1686 work.nproc = ^uint32(0) 1687 work.nwait = ^uint32(0) 1688 } 1689 1690 func gcBgMarkWorker(_p_ *p) { 1691 gp := getg() 1692 1693 type parkInfo struct { 1694 m muintptr // Release this m on park. 1695 attach puintptr // If non-nil, attach to this p on park. 1696 } 1697 // We pass park to a gopark unlock function, so it can't be on 1698 // the stack (see gopark). Prevent deadlock from recursively 1699 // starting GC by disabling preemption. 1700 gp.m.preemptoff = "GC worker init" 1701 park := new(parkInfo) 1702 gp.m.preemptoff = "" 1703 1704 park.m.set(acquirem()) 1705 park.attach.set(_p_) 1706 // Inform gcBgMarkStartWorkers that this worker is ready. 1707 // After this point, the background mark worker is scheduled 1708 // cooperatively by gcController.findRunnable. Hence, it must 1709 // never be preempted, as this would put it into _Grunnable 1710 // and put it on a run queue. Instead, when the preempt flag 1711 // is set, this puts itself into _Gwaiting to be woken up by 1712 // gcController.findRunnable at the appropriate time. 1713 notewakeup(&work.bgMarkReady) 1714 1715 for { 1716 // Go to sleep until woken by gcController.findRunnable. 1717 // We can't releasem yet since even the call to gopark 1718 // may be preempted. 1719 gopark(func(g *g, parkp unsafe.Pointer) bool { 1720 park := (*parkInfo)(parkp) 1721 1722 // The worker G is no longer running, so it's 1723 // now safe to allow preemption. 1724 releasem(park.m.ptr()) 1725 1726 // If the worker isn't attached to its P, 1727 // attach now. During initialization and after 1728 // a phase change, the worker may have been 1729 // running on a different P. As soon as we 1730 // attach, the owner P may schedule the 1731 // worker, so this must be done after the G is 1732 // stopped. 1733 if park.attach != 0 { 1734 p := park.attach.ptr() 1735 park.attach.set(nil) 1736 // cas the worker because we may be 1737 // racing with a new worker starting 1738 // on this P. 1739 if !p.gcBgMarkWorker.cas(0, guintptr(unsafe.Pointer(g))) { 1740 // The P got a new worker. 1741 // Exit this worker. 1742 return false 1743 } 1744 } 1745 return true 1746 }, unsafe.Pointer(park), "GC worker (idle)", traceEvGoBlock, 0) 1747 1748 // Loop until the P dies and disassociates this 1749 // worker (the P may later be reused, in which case 1750 // it will get a new worker) or we failed to associate. 1751 if _p_.gcBgMarkWorker.ptr() != gp { 1752 break 1753 } 1754 1755 // Disable preemption so we can use the gcw. If the 1756 // scheduler wants to preempt us, we'll stop draining, 1757 // dispose the gcw, and then preempt. 1758 park.m.set(acquirem()) 1759 1760 if gcBlackenEnabled == 0 { 1761 throw("gcBgMarkWorker: blackening not enabled") 1762 } 1763 1764 startTime := nanotime() 1765 1766 decnwait := atomic.Xadd(&work.nwait, -1) 1767 if decnwait == work.nproc { 1768 println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc) 1769 throw("work.nwait was > work.nproc") 1770 } 1771 1772 systemstack(func() { 1773 // Mark our goroutine preemptible so its stack 1774 // can be scanned. This lets two mark workers 1775 // scan each other (otherwise, they would 1776 // deadlock). We must not modify anything on 1777 // the G stack. However, stack shrinking is 1778 // disabled for mark workers, so it is safe to 1779 // read from the G stack. 1780 casgstatus(gp, _Grunning, _Gwaiting) 1781 switch _p_.gcMarkWorkerMode { 1782 default: 1783 throw("gcBgMarkWorker: unexpected gcMarkWorkerMode") 1784 case gcMarkWorkerDedicatedMode: 1785 gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit) 1786 if gp.preempt { 1787 // We were preempted. This is 1788 // a useful signal to kick 1789 // everything out of the run 1790 // queue so it can run 1791 // somewhere else. 1792 lock(&sched.lock) 1793 for { 1794 gp, _ := runqget(_p_) 1795 if gp == nil { 1796 break 1797 } 1798 globrunqput(gp) 1799 } 1800 unlock(&sched.lock) 1801 } 1802 // Go back to draining, this time 1803 // without preemption. 1804 gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit) 1805 case gcMarkWorkerFractionalMode: 1806 gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit) 1807 case gcMarkWorkerIdleMode: 1808 gcDrain(&_p_.gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit) 1809 } 1810 casgstatus(gp, _Gwaiting, _Grunning) 1811 }) 1812 1813 // If we are nearing the end of mark, dispose 1814 // of the cache promptly. We must do this 1815 // before signaling that we're no longer 1816 // working so that other workers can't observe 1817 // no workers and no work while we have this 1818 // cached, and before we compute done. 1819 if gcBlackenPromptly { 1820 _p_.gcw.dispose() 1821 } 1822 1823 // Account for time. 1824 duration := nanotime() - startTime 1825 switch _p_.gcMarkWorkerMode { 1826 case gcMarkWorkerDedicatedMode: 1827 atomic.Xaddint64(&gcController.dedicatedMarkTime, duration) 1828 atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1) 1829 case gcMarkWorkerFractionalMode: 1830 atomic.Xaddint64(&gcController.fractionalMarkTime, duration) 1831 atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 1) 1832 case gcMarkWorkerIdleMode: 1833 atomic.Xaddint64(&gcController.idleMarkTime, duration) 1834 } 1835 1836 // Was this the last worker and did we run out 1837 // of work? 1838 incnwait := atomic.Xadd(&work.nwait, +1) 1839 if incnwait > work.nproc { 1840 println("runtime: p.gcMarkWorkerMode=", _p_.gcMarkWorkerMode, 1841 "work.nwait=", incnwait, "work.nproc=", work.nproc) 1842 throw("work.nwait > work.nproc") 1843 } 1844 1845 // If this worker reached a background mark completion 1846 // point, signal the main GC goroutine. 1847 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 1848 // Make this G preemptible and disassociate it 1849 // as the worker for this P so 1850 // findRunnableGCWorker doesn't try to 1851 // schedule it. 1852 _p_.gcBgMarkWorker.set(nil) 1853 releasem(park.m.ptr()) 1854 1855 gcMarkDone() 1856 1857 // Disable preemption and prepare to reattach 1858 // to the P. 1859 // 1860 // We may be running on a different P at this 1861 // point, so we can't reattach until this G is 1862 // parked. 1863 park.m.set(acquirem()) 1864 park.attach.set(_p_) 1865 } 1866 } 1867 } 1868 1869 // gcMarkWorkAvailable returns true if executing a mark worker 1870 // on p is potentially useful. p may be nil, in which case it only 1871 // checks the global sources of work. 1872 func gcMarkWorkAvailable(p *p) bool { 1873 if p != nil && !p.gcw.empty() { 1874 return true 1875 } 1876 if !work.full.empty() { 1877 return true // global work available 1878 } 1879 if work.markrootNext < work.markrootJobs { 1880 return true // root scan work available 1881 } 1882 return false 1883 } 1884 1885 // gcMark runs the mark (or, for concurrent GC, mark termination) 1886 // All gcWork caches must be empty. 1887 // STW is in effect at this point. 1888 //TODO go:nowritebarrier 1889 func gcMark(start_time int64) { 1890 if debug.allocfreetrace > 0 { 1891 tracegc() 1892 } 1893 1894 if gcphase != _GCmarktermination { 1895 throw("in gcMark expecting to see gcphase as _GCmarktermination") 1896 } 1897 work.tstart = start_time 1898 1899 // Queue root marking jobs. 1900 gcMarkRootPrepare() 1901 1902 work.nwait = 0 1903 work.ndone = 0 1904 work.nproc = uint32(gcprocs()) 1905 1906 if work.full == 0 && work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots == 0 { 1907 // There's no work on the work queue and no root jobs 1908 // that can produce work, so don't bother entering the 1909 // getfull() barrier. 1910 // 1911 // This will be the situation the vast majority of the 1912 // time after concurrent mark. However, we still need 1913 // a fallback for STW GC and because there are some 1914 // known races that occasionally leave work around for 1915 // mark termination. 1916 // 1917 // We're still hedging our bets here: if we do 1918 // accidentally produce some work, we'll still process 1919 // it, just not necessarily in parallel. 1920 // 1921 // TODO(austin): Fix the races and and remove 1922 // work draining from mark termination so we don't 1923 // need the fallback path. 1924 work.helperDrainBlock = false 1925 } else { 1926 work.helperDrainBlock = true 1927 } 1928 1929 if work.nproc > 1 { 1930 noteclear(&work.alldone) 1931 helpgc(int32(work.nproc)) 1932 } 1933 1934 gchelperstart() 1935 1936 gcw := &getg().m.p.ptr().gcw 1937 if work.helperDrainBlock { 1938 gcDrain(gcw, gcDrainBlock) 1939 } else { 1940 gcDrain(gcw, gcDrainNoBlock) 1941 } 1942 gcw.dispose() 1943 1944 if debug.gccheckmark > 0 { 1945 // This is expensive when there's a large number of 1946 // Gs, so only do it if checkmark is also enabled. 1947 gcMarkRootCheck() 1948 } 1949 if work.full != 0 { 1950 throw("work.full != 0") 1951 } 1952 1953 if work.nproc > 1 { 1954 notesleep(&work.alldone) 1955 } 1956 1957 // Record that at least one root marking pass has completed. 1958 work.markrootDone = true 1959 1960 // Double-check that all gcWork caches are empty. This should 1961 // be ensured by mark 2 before we enter mark termination. 1962 for i := 0; i < int(gomaxprocs); i++ { 1963 gcw := &allp[i].gcw 1964 if !gcw.empty() { 1965 throw("P has cached GC work at end of mark termination") 1966 } 1967 if gcw.scanWork != 0 || gcw.bytesMarked != 0 { 1968 throw("P has unflushed stats at end of mark termination") 1969 } 1970 } 1971 1972 cachestats() 1973 1974 // Update the marked heap stat. 1975 memstats.heap_marked = work.bytesMarked 1976 1977 // Update other GC heap size stats. This must happen after 1978 // cachestats (which flushes local statistics to these) and 1979 // flushallmcaches (which modifies heap_live). 1980 memstats.heap_live = work.bytesMarked 1981 memstats.heap_scan = uint64(gcController.scanWork) 1982 1983 if trace.enabled { 1984 traceHeapAlloc() 1985 } 1986 } 1987 1988 func gcSweep(mode gcMode) { 1989 if gcphase != _GCoff { 1990 throw("gcSweep being done but phase is not GCoff") 1991 } 1992 1993 lock(&mheap_.lock) 1994 mheap_.sweepgen += 2 1995 mheap_.sweepdone = 0 1996 if mheap_.sweepSpans[mheap_.sweepgen/2%2].index != 0 { 1997 // We should have drained this list during the last 1998 // sweep phase. We certainly need to start this phase 1999 // with an empty swept list. 2000 throw("non-empty swept list") 2001 } 2002 mheap_.pagesSwept = 0 2003 unlock(&mheap_.lock) 2004 2005 if !_ConcurrentSweep || mode == gcForceBlockMode { 2006 // Special case synchronous sweep. 2007 // Record that no proportional sweeping has to happen. 2008 lock(&mheap_.lock) 2009 mheap_.sweepPagesPerByte = 0 2010 unlock(&mheap_.lock) 2011 // Sweep all spans eagerly. 2012 for sweepone() != ^uintptr(0) { 2013 sweep.npausesweep++ 2014 } 2015 // Free workbufs eagerly. 2016 prepareFreeWorkbufs() 2017 for freeSomeWbufs(false) { 2018 } 2019 // All "free" events for this mark/sweep cycle have 2020 // now happened, so we can make this profile cycle 2021 // available immediately. 2022 mProf_NextCycle() 2023 mProf_Flush() 2024 return 2025 } 2026 2027 // Background sweep. 2028 lock(&sweep.lock) 2029 if sweep.parked { 2030 sweep.parked = false 2031 ready(sweep.g, 0, true) 2032 } 2033 unlock(&sweep.lock) 2034 } 2035 2036 // gcResetMarkState resets global state prior to marking (concurrent 2037 // or STW) and resets the stack scan state of all Gs. 2038 // 2039 // This is safe to do without the world stopped because any Gs created 2040 // during or after this will start out in the reset state. 2041 func gcResetMarkState() { 2042 // This may be called during a concurrent phase, so make sure 2043 // allgs doesn't change. 2044 lock(&allglock) 2045 for _, gp := range allgs { 2046 gp.gcscandone = false // set to true in gcphasework 2047 gp.gcscanvalid = false // stack has not been scanned 2048 gp.gcAssistBytes = 0 2049 } 2050 unlock(&allglock) 2051 2052 work.bytesMarked = 0 2053 work.initialHeapLive = atomic.Load64(&memstats.heap_live) 2054 work.markrootDone = false 2055 } 2056 2057 // Hooks for other packages 2058 2059 var poolcleanup func() 2060 2061 //go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup 2062 func sync_runtime_registerPoolCleanup(f func()) { 2063 poolcleanup = f 2064 } 2065 2066 func clearpools() { 2067 // clear sync.Pools 2068 if poolcleanup != nil { 2069 poolcleanup() 2070 } 2071 2072 // Clear central sudog cache. 2073 // Leave per-P caches alone, they have strictly bounded size. 2074 // Disconnect cached list before dropping it on the floor, 2075 // so that a dangling ref to one entry does not pin all of them. 2076 lock(&sched.sudoglock) 2077 var sg, sgnext *sudog 2078 for sg = sched.sudogcache; sg != nil; sg = sgnext { 2079 sgnext = sg.next 2080 sg.next = nil 2081 } 2082 sched.sudogcache = nil 2083 unlock(&sched.sudoglock) 2084 2085 // Clear central defer pools. 2086 // Leave per-P pools alone, they have strictly bounded size. 2087 lock(&sched.deferlock) 2088 for i := range sched.deferpool { 2089 // disconnect cached list before dropping it on the floor, 2090 // so that a dangling ref to one entry does not pin all of them. 2091 var d, dlink *_defer 2092 for d = sched.deferpool[i]; d != nil; d = dlink { 2093 dlink = d.link 2094 d.link = nil 2095 } 2096 sched.deferpool[i] = nil 2097 } 2098 unlock(&sched.deferlock) 2099 } 2100 2101 // Timing 2102 2103 //go:nowritebarrier 2104 func gchelper() { 2105 _g_ := getg() 2106 _g_.m.traceback = 2 2107 gchelperstart() 2108 2109 // Parallel mark over GC roots and heap 2110 if gcphase == _GCmarktermination { 2111 gcw := &_g_.m.p.ptr().gcw 2112 if work.helperDrainBlock { 2113 gcDrain(gcw, gcDrainBlock) // blocks in getfull 2114 } else { 2115 gcDrain(gcw, gcDrainNoBlock) 2116 } 2117 gcw.dispose() 2118 } 2119 2120 nproc := atomic.Load(&work.nproc) // work.nproc can change right after we increment work.ndone 2121 if atomic.Xadd(&work.ndone, +1) == nproc-1 { 2122 notewakeup(&work.alldone) 2123 } 2124 _g_.m.traceback = 0 2125 } 2126 2127 func gchelperstart() { 2128 _g_ := getg() 2129 2130 if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc { 2131 throw("gchelperstart: bad m->helpgc") 2132 } 2133 if _g_ != _g_.m.g0 { 2134 throw("gchelper not running on g0 stack") 2135 } 2136 } 2137 2138 // itoaDiv formats val/(10**dec) into buf. 2139 func itoaDiv(buf []byte, val uint64, dec int) []byte { 2140 i := len(buf) - 1 2141 idec := i - dec 2142 for val >= 10 || i >= idec { 2143 buf[i] = byte(val%10 + '0') 2144 i-- 2145 if i == idec { 2146 buf[i] = '.' 2147 i-- 2148 } 2149 val /= 10 2150 } 2151 buf[i] = byte(val + '0') 2152 return buf[i:] 2153 } 2154 2155 // fmtNSAsMS nicely formats ns nanoseconds as milliseconds. 2156 func fmtNSAsMS(buf []byte, ns uint64) []byte { 2157 if ns >= 10e6 { 2158 // Format as whole milliseconds. 2159 return itoaDiv(buf, ns/1e6, 0) 2160 } 2161 // Format two digits of precision, with at most three decimal places. 2162 x := ns / 1e3 2163 if x == 0 { 2164 buf[0] = '0' 2165 return buf[:1] 2166 } 2167 dec := 3 2168 for x >= 100 { 2169 x /= 10 2170 dec-- 2171 } 2172 return itoaDiv(buf, x, dec) 2173 }