github.com/dannin/go@v0.0.0-20161031215817-d35dfd405eaa/src/runtime/mgc.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // TODO(rsc): The code having to do with the heap bitmap needs very serious cleanup. 6 // It has gotten completely out of control. 7 8 // Garbage collector (GC). 9 // 10 // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple 11 // GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is 12 // non-generational and non-compacting. Allocation is done using size segregated per P allocation 13 // areas to minimize fragmentation while eliminating locks in the common case. 14 // 15 // The algorithm decomposes into several steps. 16 // This is a high level description of the algorithm being used. For an overview of GC a good 17 // place to start is Richard Jones' gchandbook.org. 18 // 19 // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see 20 // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978. 21 // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 22 // 966-975. 23 // For journal quality proofs that these steps are complete, correct, and terminate see 24 // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world. 25 // Concurrency and Computation: Practice and Experience 15(3-5), 2003. 26 // 27 // TODO(austin): The rest of this comment is woefully out of date and 28 // needs to be rewritten. There is no distinct scan phase any more and 29 // we allocate black during GC. 30 // 31 // 0. Set phase = GCscan from GCoff. 32 // 1. Wait for all P's to acknowledge phase change. 33 // At this point all goroutines have passed through a GC safepoint and 34 // know we are in the GCscan phase. 35 // 2. GC scans all goroutine stacks, mark and enqueues all encountered pointers 36 // (marking avoids most duplicate enqueuing but races may produce benign duplication). 37 // Preempted goroutines are scanned before P schedules next goroutine. 38 // 3. Set phase = GCmark. 39 // 4. Wait for all P's to acknowledge phase change. 40 // 5. Now write barrier marks and enqueues black, grey, or white to white pointers. 41 // Malloc still allocates white (non-marked) objects. 42 // 6. Meanwhile GC transitively walks the heap marking reachable objects. 43 // 7. When GC finishes marking heap, it preempts P's one-by-one and 44 // retakes partial wbufs (filled by write barrier or during a stack scan of the goroutine 45 // currently scheduled on the P). 46 // 8. Once the GC has exhausted all available marking work it sets phase = marktermination. 47 // 9. Wait for all P's to acknowledge phase change. 48 // 10. Malloc now allocates black objects, so number of unmarked reachable objects 49 // monotonically decreases. 50 // 11. GC preempts P's one-by-one taking partial wbufs and marks all unmarked yet 51 // reachable objects. 52 // 12. When GC completes a full cycle over P's and discovers no new grey 53 // objects, (which means all reachable objects are marked) set phase = GCoff. 54 // 13. Wait for all P's to acknowledge phase change. 55 // 14. Now malloc allocates white (but sweeps spans before use). 56 // Write barrier becomes nop. 57 // 15. GC does background sweeping, see description below. 58 // 16. When sufficient allocation has taken place replay the sequence starting at 0 above, 59 // see discussion of GC rate below. 60 61 // Changing phases. 62 // Phases are changed by setting the gcphase to the next phase and possibly calling ackgcphase. 63 // All phase action must be benign in the presence of a change. 64 // Starting with GCoff 65 // GCoff to GCscan 66 // GSscan scans stacks and globals greying them and never marks an object black. 67 // Once all the P's are aware of the new phase they will scan gs on preemption. 68 // This means that the scanning of preempted gs can't start until all the Ps 69 // have acknowledged. 70 // When a stack is scanned, this phase also installs stack barriers to 71 // track how much of the stack has been active. 72 // This transition enables write barriers because stack barriers 73 // assume that writes to higher frames will be tracked by write 74 // barriers. Technically this only needs write barriers for writes 75 // to stack slots, but we enable write barriers in general. 76 // GCscan to GCmark 77 // In GCmark, work buffers are drained until there are no more 78 // pointers to scan. 79 // No scanning of objects (making them black) can happen until all 80 // Ps have enabled the write barrier, but that already happened in 81 // the transition to GCscan. 82 // GCmark to GCmarktermination 83 // The only change here is that we start allocating black so the Ps must acknowledge 84 // the change before we begin the termination algorithm 85 // GCmarktermination to GSsweep 86 // Object currently on the freelist must be marked black for this to work. 87 // Are things on the free lists black or white? How does the sweep phase work? 88 89 // Concurrent sweep. 90 // 91 // The sweep phase proceeds concurrently with normal program execution. 92 // The heap is swept span-by-span both lazily (when a goroutine needs another span) 93 // and concurrently in a background goroutine (this helps programs that are not CPU bound). 94 // At the end of STW mark termination all spans are marked as "needs sweeping". 95 // 96 // The background sweeper goroutine simply sweeps spans one-by-one. 97 // 98 // To avoid requesting more OS memory while there are unswept spans, when a 99 // goroutine needs another span, it first attempts to reclaim that much memory 100 // by sweeping. When a goroutine needs to allocate a new small-object span, it 101 // sweeps small-object spans for the same object size until it frees at least 102 // one object. When a goroutine needs to allocate large-object span from heap, 103 // it sweeps spans until it frees at least that many pages into heap. There is 104 // one case where this may not suffice: if a goroutine sweeps and frees two 105 // nonadjacent one-page spans to the heap, it will allocate a new two-page 106 // span, but there can still be other one-page unswept spans which could be 107 // combined into a two-page span. 108 // 109 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt 110 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache, 111 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it. 112 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that 113 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish). 114 // The finalizer goroutine is kicked off only when all spans are swept. 115 // When the next GC starts, it sweeps all not-yet-swept spans (if any). 116 117 // GC rate. 118 // Next GC is after we've allocated an extra amount of memory proportional to 119 // the amount already in use. The proportion is controlled by GOGC environment variable 120 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M 121 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear 122 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant 123 // (and also the amount of extra memory used). 124 125 // Oblets 126 // 127 // In order to prevent long pauses while scanning large objects and to 128 // improve parallelism, the garbage collector breaks up scan jobs for 129 // objects larger than maxObletBytes into "oblets" of at most 130 // maxObletBytes. When scanning encounters the beginning of a large 131 // object, it scans only the first oblet and enqueues the remaining 132 // oblets as new scan jobs. 133 134 package runtime 135 136 import ( 137 "runtime/internal/atomic" 138 "runtime/internal/sys" 139 "unsafe" 140 ) 141 142 const ( 143 _DebugGC = 0 144 _ConcurrentSweep = true 145 _FinBlockSize = 4 * 1024 146 147 // sweepMinHeapDistance is a lower bound on the heap distance 148 // (in bytes) reserved for concurrent sweeping between GC 149 // cycles. This will be scaled by gcpercent/100. 150 sweepMinHeapDistance = 1024 * 1024 151 ) 152 153 // heapminimum is the minimum heap size at which to trigger GC. 154 // For small heaps, this overrides the usual GOGC*live set rule. 155 // 156 // When there is a very small live set but a lot of allocation, simply 157 // collecting when the heap reaches GOGC*live results in many GC 158 // cycles and high total per-GC overhead. This minimum amortizes this 159 // per-GC overhead while keeping the heap reasonably small. 160 // 161 // During initialization this is set to 4MB*GOGC/100. In the case of 162 // GOGC==0, this will set heapminimum to 0, resulting in constant 163 // collection even when the heap size is small, which is useful for 164 // debugging. 165 var heapminimum uint64 = defaultHeapMinimum 166 167 // defaultHeapMinimum is the value of heapminimum for GOGC==100. 168 const defaultHeapMinimum = 4 << 20 169 170 // Initialized from $GOGC. GOGC=off means no GC. 171 var gcpercent int32 172 173 func gcinit() { 174 if unsafe.Sizeof(workbuf{}) != _WorkbufSize { 175 throw("size of Workbuf is suboptimal") 176 } 177 178 _ = setGCPercent(readgogc()) 179 for datap := &firstmoduledata; datap != nil; datap = datap.next { 180 datap.gcdatamask = progToPointerMask((*byte)(unsafe.Pointer(datap.gcdata)), datap.edata-datap.data) 181 datap.gcbssmask = progToPointerMask((*byte)(unsafe.Pointer(datap.gcbss)), datap.ebss-datap.bss) 182 } 183 memstats.gc_trigger = heapminimum 184 // Compute the goal heap size based on the trigger: 185 // trigger = marked * (1 + triggerRatio) 186 // marked = trigger / (1 + triggerRatio) 187 // goal = marked * (1 + GOGC/100) 188 // = trigger / (1 + triggerRatio) * (1 + GOGC/100) 189 memstats.next_gc = uint64(float64(memstats.gc_trigger) / (1 + gcController.triggerRatio) * (1 + float64(gcpercent)/100)) 190 if gcpercent < 0 { 191 memstats.next_gc = ^uint64(0) 192 } 193 work.startSema = 1 194 work.markDoneSema = 1 195 } 196 197 func readgogc() int32 { 198 p := gogetenv("GOGC") 199 if p == "" { 200 return 100 201 } 202 if p == "off" { 203 return -1 204 } 205 return int32(atoi(p)) 206 } 207 208 // gcenable is called after the bulk of the runtime initialization, 209 // just before we're about to start letting user code run. 210 // It kicks off the background sweeper goroutine and enables GC. 211 func gcenable() { 212 c := make(chan int, 1) 213 go bgsweep(c) 214 <-c 215 memstats.enablegc = true // now that runtime is initialized, GC is okay 216 } 217 218 //go:linkname setGCPercent runtime/debug.setGCPercent 219 func setGCPercent(in int32) (out int32) { 220 lock(&mheap_.lock) 221 out = gcpercent 222 if in < 0 { 223 in = -1 224 } 225 gcpercent = in 226 heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100 227 if gcController.triggerRatio > float64(gcpercent)/100 { 228 gcController.triggerRatio = float64(gcpercent) / 100 229 } 230 // This is either in gcinit or followed by a STW GC, both of 231 // which will reset other stats like memstats.gc_trigger and 232 // memstats.next_gc to appropriate values. 233 unlock(&mheap_.lock) 234 return out 235 } 236 237 // Garbage collector phase. 238 // Indicates to write barrier and synchronization task to perform. 239 var gcphase uint32 240 241 // The compiler knows about this variable. 242 // If you change it, you must change the compiler too. 243 var writeBarrier struct { 244 enabled bool // compiler emits a check of this before calling write barrier 245 pad [3]byte // compiler uses 32-bit load for "enabled" field 246 needed bool // whether we need a write barrier for current GC phase 247 cgo bool // whether we need a write barrier for a cgo check 248 alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load 249 } 250 251 // gcBlackenEnabled is 1 if mutator assists and background mark 252 // workers are allowed to blacken objects. This must only be set when 253 // gcphase == _GCmark. 254 var gcBlackenEnabled uint32 255 256 // gcBlackenPromptly indicates that optimizations that may 257 // hide work from the global work queue should be disabled. 258 // 259 // If gcBlackenPromptly is true, per-P gcWork caches should 260 // be flushed immediately and new objects should be allocated black. 261 // 262 // There is a tension between allocating objects white and 263 // allocating them black. If white and the objects die before being 264 // marked they can be collected during this GC cycle. On the other 265 // hand allocating them black will reduce _GCmarktermination latency 266 // since more work is done in the mark phase. This tension is resolved 267 // by allocating white until the mark phase is approaching its end and 268 // then allocating black for the remainder of the mark phase. 269 var gcBlackenPromptly bool 270 271 const ( 272 _GCoff = iota // GC not running; sweeping in background, write barrier disabled 273 _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED 274 _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED 275 ) 276 277 //go:nosplit 278 func setGCPhase(x uint32) { 279 atomic.Store(&gcphase, x) 280 writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination 281 writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo 282 } 283 284 // gcMarkWorkerMode represents the mode that a concurrent mark worker 285 // should operate in. 286 // 287 // Concurrent marking happens through four different mechanisms. One 288 // is mutator assists, which happen in response to allocations and are 289 // not scheduled. The other three are variations in the per-P mark 290 // workers and are distinguished by gcMarkWorkerMode. 291 type gcMarkWorkerMode int 292 293 const ( 294 // gcMarkWorkerDedicatedMode indicates that the P of a mark 295 // worker is dedicated to running that mark worker. The mark 296 // worker should run without preemption. 297 gcMarkWorkerDedicatedMode gcMarkWorkerMode = iota 298 299 // gcMarkWorkerFractionalMode indicates that a P is currently 300 // running the "fractional" mark worker. The fractional worker 301 // is necessary when GOMAXPROCS*gcGoalUtilization is not an 302 // integer. The fractional worker should run until it is 303 // preempted and will be scheduled to pick up the fractional 304 // part of GOMAXPROCS*gcGoalUtilization. 305 gcMarkWorkerFractionalMode 306 307 // gcMarkWorkerIdleMode indicates that a P is running the mark 308 // worker because it has nothing else to do. The idle worker 309 // should run until it is preempted and account its time 310 // against gcController.idleMarkTime. 311 gcMarkWorkerIdleMode 312 ) 313 314 // gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes 315 // to use in execution traces. 316 var gcMarkWorkerModeStrings = [...]string{ 317 "GC (dedicated)", 318 "GC (fractional)", 319 "GC (idle)", 320 } 321 322 // gcController implements the GC pacing controller that determines 323 // when to trigger concurrent garbage collection and how much marking 324 // work to do in mutator assists and background marking. 325 // 326 // It uses a feedback control algorithm to adjust the memstats.gc_trigger 327 // trigger based on the heap growth and GC CPU utilization each cycle. 328 // This algorithm optimizes for heap growth to match GOGC and for CPU 329 // utilization between assist and background marking to be 25% of 330 // GOMAXPROCS. The high-level design of this algorithm is documented 331 // at https://golang.org/s/go15gcpacing. 332 var gcController = gcControllerState{ 333 // Initial trigger ratio guess. 334 triggerRatio: 7 / 8.0, 335 } 336 337 type gcControllerState struct { 338 // scanWork is the total scan work performed this cycle. This 339 // is updated atomically during the cycle. Updates occur in 340 // bounded batches, since it is both written and read 341 // throughout the cycle. At the end of the cycle, this is how 342 // much of the retained heap is scannable. 343 // 344 // Currently this is the bytes of heap scanned. For most uses, 345 // this is an opaque unit of work, but for estimation the 346 // definition is important. 347 scanWork int64 348 349 // bgScanCredit is the scan work credit accumulated by the 350 // concurrent background scan. This credit is accumulated by 351 // the background scan and stolen by mutator assists. This is 352 // updated atomically. Updates occur in bounded batches, since 353 // it is both written and read throughout the cycle. 354 bgScanCredit int64 355 356 // assistTime is the nanoseconds spent in mutator assists 357 // during this cycle. This is updated atomically. Updates 358 // occur in bounded batches, since it is both written and read 359 // throughout the cycle. 360 assistTime int64 361 362 // dedicatedMarkTime is the nanoseconds spent in dedicated 363 // mark workers during this cycle. This is updated atomically 364 // at the end of the concurrent mark phase. 365 dedicatedMarkTime int64 366 367 // fractionalMarkTime is the nanoseconds spent in the 368 // fractional mark worker during this cycle. This is updated 369 // atomically throughout the cycle and will be up-to-date if 370 // the fractional mark worker is not currently running. 371 fractionalMarkTime int64 372 373 // idleMarkTime is the nanoseconds spent in idle marking 374 // during this cycle. This is updated atomically throughout 375 // the cycle. 376 idleMarkTime int64 377 378 // markStartTime is the absolute start time in nanoseconds 379 // that assists and background mark workers started. 380 markStartTime int64 381 382 // dedicatedMarkWorkersNeeded is the number of dedicated mark 383 // workers that need to be started. This is computed at the 384 // beginning of each cycle and decremented atomically as 385 // dedicated mark workers get started. 386 dedicatedMarkWorkersNeeded int64 387 388 // assistWorkPerByte is the ratio of scan work to allocated 389 // bytes that should be performed by mutator assists. This is 390 // computed at the beginning of each cycle and updated every 391 // time heap_scan is updated. 392 assistWorkPerByte float64 393 394 // assistBytesPerWork is 1/assistWorkPerByte. 395 assistBytesPerWork float64 396 397 // fractionalUtilizationGoal is the fraction of wall clock 398 // time that should be spent in the fractional mark worker. 399 // For example, if the overall mark utilization goal is 25% 400 // and GOMAXPROCS is 6, one P will be a dedicated mark worker 401 // and this will be set to 0.5 so that 50% of the time some P 402 // is in a fractional mark worker. This is computed at the 403 // beginning of each cycle. 404 fractionalUtilizationGoal float64 405 406 // triggerRatio is the heap growth ratio at which the garbage 407 // collection cycle should start. E.g., if this is 0.6, then 408 // GC should start when the live heap has reached 1.6 times 409 // the heap size marked by the previous cycle. This should be 410 // ≤ GOGC/100 so the trigger heap size is less than the goal 411 // heap size. This is updated at the end of of each cycle. 412 triggerRatio float64 413 414 _ [sys.CacheLineSize]byte 415 416 // fractionalMarkWorkersNeeded is the number of fractional 417 // mark workers that need to be started. This is either 0 or 418 // 1. This is potentially updated atomically at every 419 // scheduling point (hence it gets its own cache line). 420 fractionalMarkWorkersNeeded int64 421 422 _ [sys.CacheLineSize]byte 423 } 424 425 // startCycle resets the GC controller's state and computes estimates 426 // for a new GC cycle. The caller must hold worldsema. 427 func (c *gcControllerState) startCycle() { 428 c.scanWork = 0 429 c.bgScanCredit = 0 430 c.assistTime = 0 431 c.dedicatedMarkTime = 0 432 c.fractionalMarkTime = 0 433 c.idleMarkTime = 0 434 435 // If this is the first GC cycle or we're operating on a very 436 // small heap, fake heap_marked so it looks like gc_trigger is 437 // the appropriate growth from heap_marked, even though the 438 // real heap_marked may not have a meaningful value (on the 439 // first cycle) or may be much smaller (resulting in a large 440 // error response). 441 if memstats.gc_trigger <= heapminimum { 442 memstats.heap_marked = uint64(float64(memstats.gc_trigger) / (1 + c.triggerRatio)) 443 } 444 445 // Re-compute the heap goal for this cycle in case something 446 // changed. This is the same calculation we use elsewhere. 447 memstats.next_gc = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100 448 if gcpercent < 0 { 449 memstats.next_gc = ^uint64(0) 450 } 451 452 // Ensure that the heap goal is at least a little larger than 453 // the current live heap size. This may not be the case if GC 454 // start is delayed or if the allocation that pushed heap_live 455 // over gc_trigger is large or if the trigger is really close to 456 // GOGC. Assist is proportional to this distance, so enforce a 457 // minimum distance, even if it means going over the GOGC goal 458 // by a tiny bit. 459 if memstats.next_gc < memstats.heap_live+1024*1024 { 460 memstats.next_gc = memstats.heap_live + 1024*1024 461 } 462 463 // Compute the total mark utilization goal and divide it among 464 // dedicated and fractional workers. 465 totalUtilizationGoal := float64(gomaxprocs) * gcGoalUtilization 466 c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal) 467 c.fractionalUtilizationGoal = totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded) 468 if c.fractionalUtilizationGoal > 0 { 469 c.fractionalMarkWorkersNeeded = 1 470 } else { 471 c.fractionalMarkWorkersNeeded = 0 472 } 473 474 // Clear per-P state 475 for _, p := range &allp { 476 if p == nil { 477 break 478 } 479 p.gcAssistTime = 0 480 } 481 482 // Compute initial values for controls that are updated 483 // throughout the cycle. 484 c.revise() 485 486 if debug.gcpacertrace > 0 { 487 print("pacer: assist ratio=", c.assistWorkPerByte, 488 " (scan ", memstats.heap_scan>>20, " MB in ", 489 work.initialHeapLive>>20, "->", 490 memstats.next_gc>>20, " MB)", 491 " workers=", c.dedicatedMarkWorkersNeeded, 492 "+", c.fractionalMarkWorkersNeeded, "\n") 493 } 494 } 495 496 // revise updates the assist ratio during the GC cycle to account for 497 // improved estimates. This should be called either under STW or 498 // whenever memstats.heap_scan or memstats.heap_live is updated (with 499 // mheap_.lock held). 500 // 501 // It should only be called when gcBlackenEnabled != 0 (because this 502 // is when assists are enabled and the necessary statistics are 503 // available). 504 // 505 // TODO: Consider removing the periodic controller update altogether. 506 // Since we switched to allocating black, in theory we shouldn't have 507 // to change the assist ratio. However, this is still a useful hook 508 // that we've found many uses for when experimenting. 509 func (c *gcControllerState) revise() { 510 // Compute the expected scan work remaining. 511 // 512 // Note that we currently count allocations during GC as both 513 // scannable heap (heap_scan) and scan work completed 514 // (scanWork), so this difference won't be changed by 515 // allocations during GC. 516 // 517 // This particular estimate is a strict upper bound on the 518 // possible remaining scan work for the current heap. 519 // You might consider dividing this by 2 (or by 520 // (100+GOGC)/100) to counter this over-estimation, but 521 // benchmarks show that this has almost no effect on mean 522 // mutator utilization, heap size, or assist time and it 523 // introduces the danger of under-estimating and letting the 524 // mutator outpace the garbage collector. 525 scanWorkExpected := int64(memstats.heap_scan) - c.scanWork 526 if scanWorkExpected < 1000 { 527 // We set a somewhat arbitrary lower bound on 528 // remaining scan work since if we aim a little high, 529 // we can miss by a little. 530 // 531 // We *do* need to enforce that this is at least 1, 532 // since marking is racy and double-scanning objects 533 // may legitimately make the expected scan work 534 // negative. 535 scanWorkExpected = 1000 536 } 537 538 // Compute the heap distance remaining. 539 heapDistance := int64(memstats.next_gc) - int64(memstats.heap_live) 540 if heapDistance <= 0 { 541 // This shouldn't happen, but if it does, avoid 542 // dividing by zero or setting the assist negative. 543 heapDistance = 1 544 } 545 546 // Compute the mutator assist ratio so by the time the mutator 547 // allocates the remaining heap bytes up to next_gc, it will 548 // have done (or stolen) the remaining amount of scan work. 549 c.assistWorkPerByte = float64(scanWorkExpected) / float64(heapDistance) 550 c.assistBytesPerWork = float64(heapDistance) / float64(scanWorkExpected) 551 } 552 553 // endCycle updates the GC controller state at the end of the 554 // concurrent part of the GC cycle. 555 func (c *gcControllerState) endCycle() { 556 h_t := c.triggerRatio // For debugging 557 558 // Proportional response gain for the trigger controller. Must 559 // be in [0, 1]. Lower values smooth out transient effects but 560 // take longer to respond to phase changes. Higher values 561 // react to phase changes quickly, but are more affected by 562 // transient changes. Values near 1 may be unstable. 563 const triggerGain = 0.5 564 565 // Compute next cycle trigger ratio. First, this computes the 566 // "error" for this cycle; that is, how far off the trigger 567 // was from what it should have been, accounting for both heap 568 // growth and GC CPU utilization. We compute the actual heap 569 // growth during this cycle and scale that by how far off from 570 // the goal CPU utilization we were (to estimate the heap 571 // growth if we had the desired CPU utilization). The 572 // difference between this estimate and the GOGC-based goal 573 // heap growth is the error. 574 goalGrowthRatio := float64(gcpercent) / 100 575 actualGrowthRatio := float64(memstats.heap_live)/float64(memstats.heap_marked) - 1 576 assistDuration := nanotime() - c.markStartTime 577 578 // Assume background mark hit its utilization goal. 579 utilization := gcGoalUtilization 580 // Add assist utilization; avoid divide by zero. 581 if assistDuration > 0 { 582 utilization += float64(c.assistTime) / float64(assistDuration*int64(gomaxprocs)) 583 } 584 585 triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio) 586 587 // Finally, we adjust the trigger for next time by this error, 588 // damped by the proportional gain. 589 c.triggerRatio += triggerGain * triggerError 590 if c.triggerRatio < 0 { 591 // This can happen if the mutator is allocating very 592 // quickly or the GC is scanning very slowly. 593 c.triggerRatio = 0 594 } else if c.triggerRatio > goalGrowthRatio*0.95 { 595 // Ensure there's always a little margin so that the 596 // mutator assist ratio isn't infinity. 597 c.triggerRatio = goalGrowthRatio * 0.95 598 } 599 600 if debug.gcpacertrace > 0 { 601 // Print controller state in terms of the design 602 // document. 603 H_m_prev := memstats.heap_marked 604 H_T := memstats.gc_trigger 605 h_a := actualGrowthRatio 606 H_a := memstats.heap_live 607 h_g := goalGrowthRatio 608 H_g := int64(float64(H_m_prev) * (1 + h_g)) 609 u_a := utilization 610 u_g := gcGoalUtilization 611 W_a := c.scanWork 612 print("pacer: H_m_prev=", H_m_prev, 613 " h_t=", h_t, " H_T=", H_T, 614 " h_a=", h_a, " H_a=", H_a, 615 " h_g=", h_g, " H_g=", H_g, 616 " u_a=", u_a, " u_g=", u_g, 617 " W_a=", W_a, 618 " goalΔ=", goalGrowthRatio-h_t, 619 " actualΔ=", h_a-h_t, 620 " u_a/u_g=", u_a/u_g, 621 "\n") 622 } 623 } 624 625 // enlistWorker encourages another dedicated mark worker to start on 626 // another P if there are spare worker slots. It is used by putfull 627 // when more work is made available. 628 // 629 //go:nowritebarrier 630 func (c *gcControllerState) enlistWorker() { 631 if c.dedicatedMarkWorkersNeeded <= 0 { 632 return 633 } 634 // Pick a random other P to preempt. 635 if gomaxprocs <= 1 { 636 return 637 } 638 gp := getg() 639 if gp == nil || gp.m == nil || gp.m.p == 0 { 640 return 641 } 642 myID := gp.m.p.ptr().id 643 for tries := 0; tries < 5; tries++ { 644 id := int32(fastrand() % uint32(gomaxprocs-1)) 645 if id >= myID { 646 id++ 647 } 648 p := allp[id] 649 if p.status != _Prunning { 650 continue 651 } 652 if preemptone(p) { 653 return 654 } 655 } 656 } 657 658 // findRunnableGCWorker returns the background mark worker for _p_ if it 659 // should be run. This must only be called when gcBlackenEnabled != 0. 660 func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g { 661 if gcBlackenEnabled == 0 { 662 throw("gcControllerState.findRunnable: blackening not enabled") 663 } 664 if _p_.gcBgMarkWorker == 0 { 665 // The mark worker associated with this P is blocked 666 // performing a mark transition. We can't run it 667 // because it may be on some other run or wait queue. 668 return nil 669 } 670 671 if !gcMarkWorkAvailable(_p_) { 672 // No work to be done right now. This can happen at 673 // the end of the mark phase when there are still 674 // assists tapering off. Don't bother running a worker 675 // now because it'll just return immediately. 676 return nil 677 } 678 679 decIfPositive := func(ptr *int64) bool { 680 if *ptr > 0 { 681 if atomic.Xaddint64(ptr, -1) >= 0 { 682 return true 683 } 684 // We lost a race 685 atomic.Xaddint64(ptr, +1) 686 } 687 return false 688 } 689 690 if decIfPositive(&c.dedicatedMarkWorkersNeeded) { 691 // This P is now dedicated to marking until the end of 692 // the concurrent mark phase. 693 _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode 694 // TODO(austin): This P isn't going to run anything 695 // else for a while, so kick everything out of its run 696 // queue. 697 } else { 698 if !decIfPositive(&c.fractionalMarkWorkersNeeded) { 699 // No more workers are need right now. 700 return nil 701 } 702 703 // This P has picked the token for the fractional worker. 704 // Is the GC currently under or at the utilization goal? 705 // If so, do more work. 706 // 707 // We used to check whether doing one time slice of work 708 // would remain under the utilization goal, but that has the 709 // effect of delaying work until the mutator has run for 710 // enough time slices to pay for the work. During those time 711 // slices, write barriers are enabled, so the mutator is running slower. 712 // Now instead we do the work whenever we're under or at the 713 // utilization work and pay for it by letting the mutator run later. 714 // This doesn't change the overall utilization averages, but it 715 // front loads the GC work so that the GC finishes earlier and 716 // write barriers can be turned off sooner, effectively giving 717 // the mutator a faster machine. 718 // 719 // The old, slower behavior can be restored by setting 720 // gcForcePreemptNS = forcePreemptNS. 721 const gcForcePreemptNS = 0 722 723 // TODO(austin): We could fast path this and basically 724 // eliminate contention on c.fractionalMarkWorkersNeeded by 725 // precomputing the minimum time at which it's worth 726 // next scheduling the fractional worker. Then Ps 727 // don't have to fight in the window where we've 728 // passed that deadline and no one has started the 729 // worker yet. 730 // 731 // TODO(austin): Shorter preemption interval for mark 732 // worker to improve fairness and give this 733 // finer-grained control over schedule? 734 now := nanotime() - gcController.markStartTime 735 then := now + gcForcePreemptNS 736 timeUsed := c.fractionalMarkTime + gcForcePreemptNS 737 if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal { 738 // Nope, we'd overshoot the utilization goal 739 atomic.Xaddint64(&c.fractionalMarkWorkersNeeded, +1) 740 return nil 741 } 742 _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode 743 } 744 745 // Run the background mark worker 746 gp := _p_.gcBgMarkWorker.ptr() 747 casgstatus(gp, _Gwaiting, _Grunnable) 748 if trace.enabled { 749 traceGoUnpark(gp, 0) 750 } 751 return gp 752 } 753 754 // gcGoalUtilization is the goal CPU utilization for background 755 // marking as a fraction of GOMAXPROCS. 756 const gcGoalUtilization = 0.25 757 758 // gcCreditSlack is the amount of scan work credit that can can 759 // accumulate locally before updating gcController.scanWork and, 760 // optionally, gcController.bgScanCredit. Lower values give a more 761 // accurate assist ratio and make it more likely that assists will 762 // successfully steal background credit. Higher values reduce memory 763 // contention. 764 const gcCreditSlack = 2000 765 766 // gcAssistTimeSlack is the nanoseconds of mutator assist time that 767 // can accumulate on a P before updating gcController.assistTime. 768 const gcAssistTimeSlack = 5000 769 770 // gcOverAssistWork determines how many extra units of scan work a GC 771 // assist does when an assist happens. This amortizes the cost of an 772 // assist by pre-paying for this many bytes of future allocations. 773 const gcOverAssistWork = 64 << 10 774 775 var work struct { 776 full uint64 // lock-free list of full blocks workbuf 777 empty uint64 // lock-free list of empty blocks workbuf 778 pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait 779 780 markrootNext uint32 // next markroot job 781 markrootJobs uint32 // number of markroot jobs 782 783 nproc uint32 784 tstart int64 785 nwait uint32 786 ndone uint32 787 alldone note 788 789 // helperDrainBlock indicates that GC mark termination helpers 790 // should pass gcDrainBlock to gcDrain to block in the 791 // getfull() barrier. Otherwise, they should pass gcDrainNoBlock. 792 // 793 // TODO: This is a temporary fallback to support 794 // debug.gcrescanstacks > 0 and to work around some known 795 // races. Remove this when we remove the debug option and fix 796 // the races. 797 helperDrainBlock bool 798 799 // Number of roots of various root types. Set by gcMarkRootPrepare. 800 nFlushCacheRoots int 801 nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nRescanRoots int 802 803 // markrootDone indicates that roots have been marked at least 804 // once during the current GC cycle. This is checked by root 805 // marking operations that have to happen only during the 806 // first root marking pass, whether that's during the 807 // concurrent mark phase in current GC or mark termination in 808 // STW GC. 809 markrootDone bool 810 811 // Each type of GC state transition is protected by a lock. 812 // Since multiple threads can simultaneously detect the state 813 // transition condition, any thread that detects a transition 814 // condition must acquire the appropriate transition lock, 815 // re-check the transition condition and return if it no 816 // longer holds or perform the transition if it does. 817 // Likewise, any transition must invalidate the transition 818 // condition before releasing the lock. This ensures that each 819 // transition is performed by exactly one thread and threads 820 // that need the transition to happen block until it has 821 // happened. 822 // 823 // startSema protects the transition from "off" to mark or 824 // mark termination. 825 startSema uint32 826 // markDoneSema protects transitions from mark 1 to mark 2 and 827 // from mark 2 to mark termination. 828 markDoneSema uint32 829 830 bgMarkReady note // signal background mark worker has started 831 bgMarkDone uint32 // cas to 1 when at a background mark completion point 832 // Background mark completion signaling 833 834 // mode is the concurrency mode of the current GC cycle. 835 mode gcMode 836 837 // totaltime is the CPU nanoseconds spent in GC since the 838 // program started if debug.gctrace > 0. 839 totaltime int64 840 841 // bytesMarked is the number of bytes marked this cycle. This 842 // includes bytes blackened in scanned objects, noscan objects 843 // that go straight to black, and permagrey objects scanned by 844 // markroot during the concurrent scan phase. This is updated 845 // atomically during the cycle. Updates may be batched 846 // arbitrarily, since the value is only read at the end of the 847 // cycle. 848 // 849 // Because of benign races during marking, this number may not 850 // be the exact number of marked bytes, but it should be very 851 // close. 852 bytesMarked uint64 853 854 // initialHeapLive is the value of memstats.heap_live at the 855 // beginning of this GC cycle. 856 initialHeapLive uint64 857 858 // assistQueue is a queue of assists that are blocked because 859 // there was neither enough credit to steal or enough work to 860 // do. 861 assistQueue struct { 862 lock mutex 863 head, tail guintptr 864 } 865 866 // rescan is a list of G's that need to be rescanned during 867 // mark termination. A G adds itself to this list when it 868 // first invalidates its stack scan. 869 rescan struct { 870 lock mutex 871 list []guintptr 872 } 873 874 // Timing/utilization stats for this cycle. 875 stwprocs, maxprocs int32 876 tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start 877 878 pauseNS int64 // total STW time this cycle 879 pauseStart int64 // nanotime() of last STW 880 881 // debug.gctrace heap sizes for this cycle. 882 heap0, heap1, heap2, heapGoal uint64 883 } 884 885 // GC runs a garbage collection and blocks the caller until the 886 // garbage collection is complete. It may also block the entire 887 // program. 888 func GC() { 889 gcStart(gcForceBlockMode, false) 890 } 891 892 // gcMode indicates how concurrent a GC cycle should be. 893 type gcMode int 894 895 const ( 896 gcBackgroundMode gcMode = iota // concurrent GC and sweep 897 gcForceMode // stop-the-world GC now, concurrent sweep 898 gcForceBlockMode // stop-the-world GC now and STW sweep 899 ) 900 901 // gcShouldStart returns true if the exit condition for the _GCoff 902 // phase has been met. The exit condition should be tested when 903 // allocating. 904 // 905 // If forceTrigger is true, it ignores the current heap size, but 906 // checks all other conditions. In general this should be false. 907 func gcShouldStart(forceTrigger bool) bool { 908 return gcphase == _GCoff && (forceTrigger || memstats.heap_live >= memstats.gc_trigger) && memstats.enablegc && panicking == 0 && gcpercent >= 0 909 } 910 911 // gcStart transitions the GC from _GCoff to _GCmark (if mode == 912 // gcBackgroundMode) or _GCmarktermination (if mode != 913 // gcBackgroundMode) by performing sweep termination and GC 914 // initialization. 915 // 916 // This may return without performing this transition in some cases, 917 // such as when called on a system stack or with locks held. 918 func gcStart(mode gcMode, forceTrigger bool) { 919 // Since this is called from malloc and malloc is called in 920 // the guts of a number of libraries that might be holding 921 // locks, don't attempt to start GC in non-preemptible or 922 // potentially unstable situations. 923 mp := acquirem() 924 if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" { 925 releasem(mp) 926 return 927 } 928 releasem(mp) 929 mp = nil 930 931 // Pick up the remaining unswept/not being swept spans concurrently 932 // 933 // This shouldn't happen if we're being invoked in background 934 // mode since proportional sweep should have just finished 935 // sweeping everything, but rounding errors, etc, may leave a 936 // few spans unswept. In forced mode, this is necessary since 937 // GC can be forced at any point in the sweeping cycle. 938 // 939 // We check the transition condition continuously here in case 940 // this G gets delayed in to the next GC cycle. 941 for (mode != gcBackgroundMode || gcShouldStart(forceTrigger)) && gosweepone() != ^uintptr(0) { 942 sweep.nbgsweep++ 943 } 944 945 // Perform GC initialization and the sweep termination 946 // transition. 947 // 948 // If this is a forced GC, don't acquire the transition lock 949 // or re-check the transition condition because we 950 // specifically *don't* want to share the transition with 951 // another thread. 952 useStartSema := mode == gcBackgroundMode 953 if useStartSema { 954 semacquire(&work.startSema, 0) 955 // Re-check transition condition under transition lock. 956 if !gcShouldStart(forceTrigger) { 957 semrelease(&work.startSema) 958 return 959 } 960 } 961 962 // In gcstoptheworld debug mode, upgrade the mode accordingly. 963 // We do this after re-checking the transition condition so 964 // that multiple goroutines that detect the heap trigger don't 965 // start multiple STW GCs. 966 if mode == gcBackgroundMode { 967 if debug.gcstoptheworld == 1 { 968 mode = gcForceMode 969 } else if debug.gcstoptheworld == 2 { 970 mode = gcForceBlockMode 971 } 972 } 973 974 // Ok, we're doing it! Stop everybody else 975 semacquire(&worldsema, 0) 976 977 if trace.enabled { 978 traceGCStart() 979 } 980 981 if mode == gcBackgroundMode { 982 gcBgMarkStartWorkers() 983 } 984 985 gcResetMarkState() 986 987 now := nanotime() 988 work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs 989 work.tSweepTerm = now 990 work.heap0 = memstats.heap_live 991 work.pauseNS = 0 992 work.mode = mode 993 994 work.pauseStart = now 995 systemstack(stopTheWorldWithSema) 996 // Finish sweep before we start concurrent scan. 997 systemstack(func() { 998 finishsweep_m() 999 }) 1000 // clearpools before we start the GC. If we wait they memory will not be 1001 // reclaimed until the next GC cycle. 1002 clearpools() 1003 1004 if mode == gcBackgroundMode { // Do as much work concurrently as possible 1005 gcController.startCycle() 1006 work.heapGoal = memstats.next_gc 1007 1008 // Enter concurrent mark phase and enable 1009 // write barriers. 1010 // 1011 // Because the world is stopped, all Ps will 1012 // observe that write barriers are enabled by 1013 // the time we start the world and begin 1014 // scanning. 1015 // 1016 // It's necessary to enable write barriers 1017 // during the scan phase for several reasons: 1018 // 1019 // They must be enabled for writes to higher 1020 // stack frames before we scan stacks and 1021 // install stack barriers because this is how 1022 // we track writes to inactive stack frames. 1023 // (Alternatively, we could not install stack 1024 // barriers over frame boundaries with 1025 // up-pointers). 1026 // 1027 // They must be enabled before assists are 1028 // enabled because they must be enabled before 1029 // any non-leaf heap objects are marked. Since 1030 // allocations are blocked until assists can 1031 // happen, we want enable assists as early as 1032 // possible. 1033 setGCPhase(_GCmark) 1034 1035 gcBgMarkPrepare() // Must happen before assist enable. 1036 gcMarkRootPrepare() 1037 1038 // Mark all active tinyalloc blocks. Since we're 1039 // allocating from these, they need to be black like 1040 // other allocations. The alternative is to blacken 1041 // the tiny block on every allocation from it, which 1042 // would slow down the tiny allocator. 1043 gcMarkTinyAllocs() 1044 1045 // At this point all Ps have enabled the write 1046 // barrier, thus maintaining the no white to 1047 // black invariant. Enable mutator assists to 1048 // put back-pressure on fast allocating 1049 // mutators. 1050 atomic.Store(&gcBlackenEnabled, 1) 1051 1052 // Assists and workers can start the moment we start 1053 // the world. 1054 gcController.markStartTime = now 1055 1056 // Concurrent mark. 1057 systemstack(startTheWorldWithSema) 1058 now = nanotime() 1059 work.pauseNS += now - work.pauseStart 1060 work.tMark = now 1061 } else { 1062 t := nanotime() 1063 work.tMark, work.tMarkTerm = t, t 1064 work.heapGoal = work.heap0 1065 1066 // Perform mark termination. This will restart the world. 1067 gcMarkTermination() 1068 } 1069 1070 if useStartSema { 1071 semrelease(&work.startSema) 1072 } 1073 } 1074 1075 // gcMarkDone transitions the GC from mark 1 to mark 2 and from mark 2 1076 // to mark termination. 1077 // 1078 // This should be called when all mark work has been drained. In mark 1079 // 1, this includes all root marking jobs, global work buffers, and 1080 // active work buffers in assists and background workers; however, 1081 // work may still be cached in per-P work buffers. In mark 2, per-P 1082 // caches are disabled. 1083 // 1084 // The calling context must be preemptible. 1085 // 1086 // Note that it is explicitly okay to have write barriers in this 1087 // function because completion of concurrent mark is best-effort 1088 // anyway. Any work created by write barriers here will be cleaned up 1089 // by mark termination. 1090 func gcMarkDone() { 1091 top: 1092 semacquire(&work.markDoneSema, 0) 1093 1094 // Re-check transition condition under transition lock. 1095 if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) { 1096 semrelease(&work.markDoneSema) 1097 return 1098 } 1099 1100 // Disallow starting new workers so that any remaining workers 1101 // in the current mark phase will drain out. 1102 // 1103 // TODO(austin): Should dedicated workers keep an eye on this 1104 // and exit gcDrain promptly? 1105 atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff) 1106 atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff) 1107 1108 if !gcBlackenPromptly { 1109 // Transition from mark 1 to mark 2. 1110 // 1111 // The global work list is empty, but there can still be work 1112 // sitting in the per-P work caches. 1113 // Flush and disable work caches. 1114 1115 gcMarkRootCheck() 1116 1117 // Disallow caching workbufs and indicate that we're in mark 2. 1118 gcBlackenPromptly = true 1119 1120 // Prevent completion of mark 2 until we've flushed 1121 // cached workbufs. 1122 atomic.Xadd(&work.nwait, -1) 1123 1124 // GC is set up for mark 2. Let Gs blocked on the 1125 // transition lock go while we flush caches. 1126 semrelease(&work.markDoneSema) 1127 1128 systemstack(func() { 1129 // Flush all currently cached workbufs and 1130 // ensure all Ps see gcBlackenPromptly. This 1131 // also blocks until any remaining mark 1 1132 // workers have exited their loop so we can 1133 // start new mark 2 workers. 1134 forEachP(func(_p_ *p) { 1135 _p_.gcw.dispose() 1136 }) 1137 }) 1138 1139 // Now we can start up mark 2 workers. 1140 atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff) 1141 atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff) 1142 1143 incnwait := atomic.Xadd(&work.nwait, +1) 1144 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 1145 // This loop will make progress because 1146 // gcBlackenPromptly is now true, so it won't 1147 // take this same "if" branch. 1148 goto top 1149 } 1150 } else { 1151 // Transition to mark termination. 1152 now := nanotime() 1153 work.tMarkTerm = now 1154 work.pauseStart = now 1155 getg().m.preemptoff = "gcing" 1156 systemstack(stopTheWorldWithSema) 1157 // The gcphase is _GCmark, it will transition to _GCmarktermination 1158 // below. The important thing is that the wb remains active until 1159 // all marking is complete. This includes writes made by the GC. 1160 1161 // Record that one root marking pass has completed. 1162 work.markrootDone = true 1163 1164 // Disable assists and background workers. We must do 1165 // this before waking blocked assists. 1166 atomic.Store(&gcBlackenEnabled, 0) 1167 1168 // Wake all blocked assists. These will run when we 1169 // start the world again. 1170 gcWakeAllAssists() 1171 1172 // Likewise, release the transition lock. Blocked 1173 // workers and assists will run when we start the 1174 // world again. 1175 semrelease(&work.markDoneSema) 1176 1177 // endCycle depends on all gcWork cache stats being 1178 // flushed. This is ensured by mark 2. 1179 gcController.endCycle() 1180 1181 // Perform mark termination. This will restart the world. 1182 gcMarkTermination() 1183 } 1184 } 1185 1186 func gcMarkTermination() { 1187 // World is stopped. 1188 // Start marktermination which includes enabling the write barrier. 1189 atomic.Store(&gcBlackenEnabled, 0) 1190 gcBlackenPromptly = false 1191 setGCPhase(_GCmarktermination) 1192 1193 work.heap1 = memstats.heap_live 1194 startTime := nanotime() 1195 1196 mp := acquirem() 1197 mp.preemptoff = "gcing" 1198 _g_ := getg() 1199 _g_.m.traceback = 2 1200 gp := _g_.m.curg 1201 casgstatus(gp, _Grunning, _Gwaiting) 1202 gp.waitreason = "garbage collection" 1203 1204 // Run gc on the g0 stack. We do this so that the g stack 1205 // we're currently running on will no longer change. Cuts 1206 // the root set down a bit (g0 stacks are not scanned, and 1207 // we don't need to scan gc's internal state). We also 1208 // need to switch to g0 so we can shrink the stack. 1209 systemstack(func() { 1210 gcMark(startTime) 1211 // Must return immediately. 1212 // The outer function's stack may have moved 1213 // during gcMark (it shrinks stacks, including the 1214 // outer function's stack), so we must not refer 1215 // to any of its variables. Return back to the 1216 // non-system stack to pick up the new addresses 1217 // before continuing. 1218 }) 1219 1220 systemstack(func() { 1221 work.heap2 = work.bytesMarked 1222 if debug.gccheckmark > 0 { 1223 // Run a full stop-the-world mark using checkmark bits, 1224 // to check that we didn't forget to mark anything during 1225 // the concurrent mark process. 1226 gcResetMarkState() 1227 initCheckmarks() 1228 gcMark(startTime) 1229 clearCheckmarks() 1230 } 1231 1232 // marking is complete so we can turn the write barrier off 1233 setGCPhase(_GCoff) 1234 gcSweep(work.mode) 1235 1236 if debug.gctrace > 1 { 1237 startTime = nanotime() 1238 // The g stacks have been scanned so 1239 // they have gcscanvalid==true and gcworkdone==true. 1240 // Reset these so that all stacks will be rescanned. 1241 gcResetMarkState() 1242 finishsweep_m() 1243 1244 // Still in STW but gcphase is _GCoff, reset to _GCmarktermination 1245 // At this point all objects will be found during the gcMark which 1246 // does a complete STW mark and object scan. 1247 setGCPhase(_GCmarktermination) 1248 gcMark(startTime) 1249 setGCPhase(_GCoff) // marking is done, turn off wb. 1250 gcSweep(work.mode) 1251 } 1252 }) 1253 1254 _g_.m.traceback = 0 1255 casgstatus(gp, _Gwaiting, _Grunning) 1256 1257 if trace.enabled { 1258 traceGCDone() 1259 } 1260 1261 // all done 1262 mp.preemptoff = "" 1263 1264 if gcphase != _GCoff { 1265 throw("gc done but gcphase != _GCoff") 1266 } 1267 1268 // Update timing memstats 1269 now, unixNow := nanotime(), unixnanotime() 1270 work.pauseNS += now - work.pauseStart 1271 work.tEnd = now 1272 atomic.Store64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user 1273 memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS) 1274 memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow) 1275 memstats.pause_total_ns += uint64(work.pauseNS) 1276 1277 // Update work.totaltime. 1278 sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm) 1279 // We report idle marking time below, but omit it from the 1280 // overall utilization here since it's "free". 1281 markCpu := gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime 1282 markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm) 1283 cycleCpu := sweepTermCpu + markCpu + markTermCpu 1284 work.totaltime += cycleCpu 1285 1286 // Compute overall GC CPU utilization. 1287 totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs) 1288 memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu) 1289 1290 memstats.numgc++ 1291 1292 // Reset sweep state. 1293 sweep.nbgsweep = 0 1294 sweep.npausesweep = 0 1295 1296 systemstack(startTheWorldWithSema) 1297 1298 // Update heap profile stats if gcSweep didn't do it. This is 1299 // relatively expensive, so we don't want to do it while the 1300 // world is stopped, but it needs to happen ASAP after 1301 // starting the world to prevent too many allocations from the 1302 // next cycle leaking in. It must happen before releasing 1303 // worldsema since there are applications that do a 1304 // runtime.GC() to update the heap profile and then 1305 // immediately collect the profile. 1306 if _ConcurrentSweep && work.mode != gcForceBlockMode { 1307 mProf_GC() 1308 } 1309 1310 // Free stack spans. This must be done between GC cycles. 1311 systemstack(freeStackSpans) 1312 1313 // Best-effort remove stack barriers so they don't get in the 1314 // way of things like GDB and perf. 1315 lock(&allglock) 1316 myallgs := allgs 1317 unlock(&allglock) 1318 gcTryRemoveAllStackBarriers(myallgs) 1319 1320 // Print gctrace before dropping worldsema. As soon as we drop 1321 // worldsema another cycle could start and smash the stats 1322 // we're trying to print. 1323 if debug.gctrace > 0 { 1324 util := int(memstats.gc_cpu_fraction * 100) 1325 1326 var sbuf [24]byte 1327 printlock() 1328 print("gc ", memstats.numgc, 1329 " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ", 1330 util, "%: ") 1331 prev := work.tSweepTerm 1332 for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} { 1333 if i != 0 { 1334 print("+") 1335 } 1336 print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev)))) 1337 prev = ns 1338 } 1339 print(" ms clock, ") 1340 for i, ns := range []int64{sweepTermCpu, gcController.assistTime, gcController.dedicatedMarkTime + gcController.fractionalMarkTime, gcController.idleMarkTime, markTermCpu} { 1341 if i == 2 || i == 3 { 1342 // Separate mark time components with /. 1343 print("/") 1344 } else if i != 0 { 1345 print("+") 1346 } 1347 print(string(fmtNSAsMS(sbuf[:], uint64(ns)))) 1348 } 1349 print(" ms cpu, ", 1350 work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ", 1351 work.heapGoal>>20, " MB goal, ", 1352 work.maxprocs, " P") 1353 if work.mode != gcBackgroundMode { 1354 print(" (forced)") 1355 } 1356 print("\n") 1357 printunlock() 1358 } 1359 1360 semrelease(&worldsema) 1361 // Careful: another GC cycle may start now. 1362 1363 releasem(mp) 1364 mp = nil 1365 1366 // now that gc is done, kick off finalizer thread if needed 1367 if !concurrentSweep { 1368 // give the queued finalizers, if any, a chance to run 1369 Gosched() 1370 } 1371 } 1372 1373 // gcBgMarkStartWorkers prepares background mark worker goroutines. 1374 // These goroutines will not run until the mark phase, but they must 1375 // be started while the work is not stopped and from a regular G 1376 // stack. The caller must hold worldsema. 1377 func gcBgMarkStartWorkers() { 1378 // Background marking is performed by per-P G's. Ensure that 1379 // each P has a background GC G. 1380 for _, p := range &allp { 1381 if p == nil || p.status == _Pdead { 1382 break 1383 } 1384 if p.gcBgMarkWorker == 0 { 1385 go gcBgMarkWorker(p) 1386 notetsleepg(&work.bgMarkReady, -1) 1387 noteclear(&work.bgMarkReady) 1388 } 1389 } 1390 } 1391 1392 // gcBgMarkPrepare sets up state for background marking. 1393 // Mutator assists must not yet be enabled. 1394 func gcBgMarkPrepare() { 1395 // Background marking will stop when the work queues are empty 1396 // and there are no more workers (note that, since this is 1397 // concurrent, this may be a transient state, but mark 1398 // termination will clean it up). Between background workers 1399 // and assists, we don't really know how many workers there 1400 // will be, so we pretend to have an arbitrarily large number 1401 // of workers, almost all of which are "waiting". While a 1402 // worker is working it decrements nwait. If nproc == nwait, 1403 // there are no workers. 1404 work.nproc = ^uint32(0) 1405 work.nwait = ^uint32(0) 1406 } 1407 1408 func gcBgMarkWorker(_p_ *p) { 1409 gp := getg() 1410 1411 type parkInfo struct { 1412 m muintptr // Release this m on park. 1413 attach puintptr // If non-nil, attach to this p on park. 1414 } 1415 // We pass park to a gopark unlock function, so it can't be on 1416 // the stack (see gopark). Prevent deadlock from recursively 1417 // starting GC by disabling preemption. 1418 gp.m.preemptoff = "GC worker init" 1419 park := new(parkInfo) 1420 gp.m.preemptoff = "" 1421 1422 park.m.set(acquirem()) 1423 park.attach.set(_p_) 1424 // Inform gcBgMarkStartWorkers that this worker is ready. 1425 // After this point, the background mark worker is scheduled 1426 // cooperatively by gcController.findRunnable. Hence, it must 1427 // never be preempted, as this would put it into _Grunnable 1428 // and put it on a run queue. Instead, when the preempt flag 1429 // is set, this puts itself into _Gwaiting to be woken up by 1430 // gcController.findRunnable at the appropriate time. 1431 notewakeup(&work.bgMarkReady) 1432 1433 for { 1434 // Go to sleep until woken by gcController.findRunnable. 1435 // We can't releasem yet since even the call to gopark 1436 // may be preempted. 1437 gopark(func(g *g, parkp unsafe.Pointer) bool { 1438 park := (*parkInfo)(parkp) 1439 1440 // The worker G is no longer running, so it's 1441 // now safe to allow preemption. 1442 releasem(park.m.ptr()) 1443 1444 // If the worker isn't attached to its P, 1445 // attach now. During initialization and after 1446 // a phase change, the worker may have been 1447 // running on a different P. As soon as we 1448 // attach, the owner P may schedule the 1449 // worker, so this must be done after the G is 1450 // stopped. 1451 if park.attach != 0 { 1452 p := park.attach.ptr() 1453 park.attach.set(nil) 1454 // cas the worker because we may be 1455 // racing with a new worker starting 1456 // on this P. 1457 if !p.gcBgMarkWorker.cas(0, guintptr(unsafe.Pointer(g))) { 1458 // The P got a new worker. 1459 // Exit this worker. 1460 return false 1461 } 1462 } 1463 return true 1464 }, unsafe.Pointer(park), "GC worker (idle)", traceEvGoBlock, 0) 1465 1466 // Loop until the P dies and disassociates this 1467 // worker (the P may later be reused, in which case 1468 // it will get a new worker) or we failed to associate. 1469 if _p_.gcBgMarkWorker.ptr() != gp { 1470 break 1471 } 1472 1473 // Disable preemption so we can use the gcw. If the 1474 // scheduler wants to preempt us, we'll stop draining, 1475 // dispose the gcw, and then preempt. 1476 park.m.set(acquirem()) 1477 1478 if gcBlackenEnabled == 0 { 1479 throw("gcBgMarkWorker: blackening not enabled") 1480 } 1481 1482 startTime := nanotime() 1483 1484 decnwait := atomic.Xadd(&work.nwait, -1) 1485 if decnwait == work.nproc { 1486 println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc) 1487 throw("work.nwait was > work.nproc") 1488 } 1489 1490 systemstack(func() { 1491 // Mark our goroutine preemptible so its stack 1492 // can be scanned. This lets two mark workers 1493 // scan each other (otherwise, they would 1494 // deadlock). We must not modify anything on 1495 // the G stack. However, stack shrinking is 1496 // disabled for mark workers, so it is safe to 1497 // read from the G stack. 1498 casgstatus(gp, _Grunning, _Gwaiting) 1499 switch _p_.gcMarkWorkerMode { 1500 default: 1501 throw("gcBgMarkWorker: unexpected gcMarkWorkerMode") 1502 case gcMarkWorkerDedicatedMode: 1503 gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit) 1504 case gcMarkWorkerFractionalMode, gcMarkWorkerIdleMode: 1505 gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit) 1506 } 1507 casgstatus(gp, _Gwaiting, _Grunning) 1508 }) 1509 1510 // If we are nearing the end of mark, dispose 1511 // of the cache promptly. We must do this 1512 // before signaling that we're no longer 1513 // working so that other workers can't observe 1514 // no workers and no work while we have this 1515 // cached, and before we compute done. 1516 if gcBlackenPromptly { 1517 _p_.gcw.dispose() 1518 } 1519 1520 // Account for time. 1521 duration := nanotime() - startTime 1522 switch _p_.gcMarkWorkerMode { 1523 case gcMarkWorkerDedicatedMode: 1524 atomic.Xaddint64(&gcController.dedicatedMarkTime, duration) 1525 atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1) 1526 case gcMarkWorkerFractionalMode: 1527 atomic.Xaddint64(&gcController.fractionalMarkTime, duration) 1528 atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 1) 1529 case gcMarkWorkerIdleMode: 1530 atomic.Xaddint64(&gcController.idleMarkTime, duration) 1531 } 1532 1533 // Was this the last worker and did we run out 1534 // of work? 1535 incnwait := atomic.Xadd(&work.nwait, +1) 1536 if incnwait > work.nproc { 1537 println("runtime: p.gcMarkWorkerMode=", _p_.gcMarkWorkerMode, 1538 "work.nwait=", incnwait, "work.nproc=", work.nproc) 1539 throw("work.nwait > work.nproc") 1540 } 1541 1542 // If this worker reached a background mark completion 1543 // point, signal the main GC goroutine. 1544 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 1545 // Make this G preemptible and disassociate it 1546 // as the worker for this P so 1547 // findRunnableGCWorker doesn't try to 1548 // schedule it. 1549 _p_.gcBgMarkWorker.set(nil) 1550 releasem(park.m.ptr()) 1551 1552 gcMarkDone() 1553 1554 // Disable preemption and prepare to reattach 1555 // to the P. 1556 // 1557 // We may be running on a different P at this 1558 // point, so we can't reattach until this G is 1559 // parked. 1560 park.m.set(acquirem()) 1561 park.attach.set(_p_) 1562 } 1563 } 1564 } 1565 1566 // gcMarkWorkAvailable returns true if executing a mark worker 1567 // on p is potentially useful. p may be nil, in which case it only 1568 // checks the global sources of work. 1569 func gcMarkWorkAvailable(p *p) bool { 1570 if p != nil && !p.gcw.empty() { 1571 return true 1572 } 1573 if atomic.Load64(&work.full) != 0 { 1574 return true // global work available 1575 } 1576 if work.markrootNext < work.markrootJobs { 1577 return true // root scan work available 1578 } 1579 return false 1580 } 1581 1582 // gcMark runs the mark (or, for concurrent GC, mark termination) 1583 // All gcWork caches must be empty. 1584 // STW is in effect at this point. 1585 //TODO go:nowritebarrier 1586 func gcMark(start_time int64) { 1587 if debug.allocfreetrace > 0 { 1588 tracegc() 1589 } 1590 1591 if gcphase != _GCmarktermination { 1592 throw("in gcMark expecting to see gcphase as _GCmarktermination") 1593 } 1594 work.tstart = start_time 1595 1596 // Queue root marking jobs. 1597 gcMarkRootPrepare() 1598 1599 work.nwait = 0 1600 work.ndone = 0 1601 work.nproc = uint32(gcprocs()) 1602 1603 if debug.gcrescanstacks == 0 && work.full == 0 && work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots+work.nRescanRoots == 0 { 1604 // There's no work on the work queue and no root jobs 1605 // that can produce work, so don't bother entering the 1606 // getfull() barrier. 1607 // 1608 // With the hybrid barrier enabled, this will be the 1609 // situation the vast majority of the time after 1610 // concurrent mark. However, we still need a fallback 1611 // for STW GC and because there are some known races 1612 // that occasionally leave work around for mark 1613 // termination. 1614 // 1615 // We're still hedging our bets here: if we do 1616 // accidentally produce some work, we'll still process 1617 // it, just not necessarily in parallel. 1618 // 1619 // TODO(austin): When we eliminate 1620 // debug.gcrescanstacks: fix the races, and remove 1621 // work draining from mark termination so we don't 1622 // need the fallback path. 1623 work.helperDrainBlock = false 1624 } else { 1625 work.helperDrainBlock = true 1626 } 1627 1628 if trace.enabled { 1629 traceGCScanStart() 1630 } 1631 1632 if work.nproc > 1 { 1633 noteclear(&work.alldone) 1634 helpgc(int32(work.nproc)) 1635 } 1636 1637 gchelperstart() 1638 1639 gcw := &getg().m.p.ptr().gcw 1640 if work.helperDrainBlock { 1641 gcDrain(gcw, gcDrainBlock) 1642 } else { 1643 gcDrain(gcw, gcDrainNoBlock) 1644 } 1645 gcw.dispose() 1646 1647 if debug.gccheckmark > 0 { 1648 // This is expensive when there's a large number of 1649 // Gs, so only do it if checkmark is also enabled. 1650 gcMarkRootCheck() 1651 } 1652 if work.full != 0 { 1653 throw("work.full != 0") 1654 } 1655 1656 if work.nproc > 1 { 1657 notesleep(&work.alldone) 1658 } 1659 1660 // Record that at least one root marking pass has completed. 1661 work.markrootDone = true 1662 1663 // Double-check that all gcWork caches are empty. This should 1664 // be ensured by mark 2 before we enter mark termination. 1665 for i := 0; i < int(gomaxprocs); i++ { 1666 gcw := &allp[i].gcw 1667 if !gcw.empty() { 1668 throw("P has cached GC work at end of mark termination") 1669 } 1670 if gcw.scanWork != 0 || gcw.bytesMarked != 0 { 1671 throw("P has unflushed stats at end of mark termination") 1672 } 1673 } 1674 1675 if trace.enabled { 1676 traceGCScanDone() 1677 } 1678 1679 cachestats() 1680 1681 // Update the marked heap stat. 1682 memstats.heap_marked = work.bytesMarked 1683 1684 // Trigger the next GC cycle when the allocated heap has grown 1685 // by triggerRatio over the marked heap size. Assume that 1686 // we're in steady state, so the marked heap size is the 1687 // same now as it was at the beginning of the GC cycle. 1688 memstats.gc_trigger = uint64(float64(memstats.heap_marked) * (1 + gcController.triggerRatio)) 1689 if memstats.gc_trigger < heapminimum { 1690 memstats.gc_trigger = heapminimum 1691 } 1692 if int64(memstats.gc_trigger) < 0 { 1693 print("next_gc=", memstats.next_gc, " bytesMarked=", work.bytesMarked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "\n") 1694 throw("gc_trigger underflow") 1695 } 1696 1697 // Update other GC heap size stats. This must happen after 1698 // cachestats (which flushes local statistics to these) and 1699 // flushallmcaches (which modifies heap_live). 1700 memstats.heap_live = work.bytesMarked 1701 memstats.heap_scan = uint64(gcController.scanWork) 1702 1703 minTrigger := memstats.heap_live + sweepMinHeapDistance*uint64(gcpercent)/100 1704 if memstats.gc_trigger < minTrigger { 1705 // The allocated heap is already past the trigger. 1706 // This can happen if the triggerRatio is very low and 1707 // the marked heap is less than the live heap size. 1708 // 1709 // Concurrent sweep happens in the heap growth from 1710 // heap_live to gc_trigger, so bump gc_trigger up to ensure 1711 // that concurrent sweep has some heap growth in which 1712 // to perform sweeping before we start the next GC 1713 // cycle. 1714 memstats.gc_trigger = minTrigger 1715 } 1716 1717 // The next GC cycle should finish before the allocated heap 1718 // has grown by GOGC/100. 1719 memstats.next_gc = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100 1720 if gcpercent < 0 { 1721 memstats.next_gc = ^uint64(0) 1722 } 1723 if memstats.next_gc < memstats.gc_trigger { 1724 memstats.next_gc = memstats.gc_trigger 1725 } 1726 1727 if trace.enabled { 1728 traceHeapAlloc() 1729 traceNextGC() 1730 } 1731 } 1732 1733 func gcSweep(mode gcMode) { 1734 if gcphase != _GCoff { 1735 throw("gcSweep being done but phase is not GCoff") 1736 } 1737 1738 lock(&mheap_.lock) 1739 mheap_.sweepgen += 2 1740 mheap_.sweepdone = 0 1741 if mheap_.sweepSpans[mheap_.sweepgen/2%2].index != 0 { 1742 // We should have drained this list during the last 1743 // sweep phase. We certainly need to start this phase 1744 // with an empty swept list. 1745 throw("non-empty swept list") 1746 } 1747 unlock(&mheap_.lock) 1748 1749 if !_ConcurrentSweep || mode == gcForceBlockMode { 1750 // Special case synchronous sweep. 1751 // Record that no proportional sweeping has to happen. 1752 lock(&mheap_.lock) 1753 mheap_.sweepPagesPerByte = 0 1754 mheap_.pagesSwept = 0 1755 unlock(&mheap_.lock) 1756 // Sweep all spans eagerly. 1757 for sweepone() != ^uintptr(0) { 1758 sweep.npausesweep++ 1759 } 1760 // Do an additional mProf_GC, because all 'free' events are now real as well. 1761 mProf_GC() 1762 mProf_GC() 1763 return 1764 } 1765 1766 // Concurrent sweep needs to sweep all of the in-use pages by 1767 // the time the allocated heap reaches the GC trigger. Compute 1768 // the ratio of in-use pages to sweep per byte allocated. 1769 heapDistance := int64(memstats.gc_trigger) - int64(memstats.heap_live) 1770 // Add a little margin so rounding errors and concurrent 1771 // sweep are less likely to leave pages unswept when GC starts. 1772 heapDistance -= 1024 * 1024 1773 if heapDistance < _PageSize { 1774 // Avoid setting the sweep ratio extremely high 1775 heapDistance = _PageSize 1776 } 1777 lock(&mheap_.lock) 1778 mheap_.sweepPagesPerByte = float64(mheap_.pagesInUse) / float64(heapDistance) 1779 mheap_.pagesSwept = 0 1780 mheap_.spanBytesAlloc = 0 1781 unlock(&mheap_.lock) 1782 1783 // Background sweep. 1784 lock(&sweep.lock) 1785 if sweep.parked { 1786 sweep.parked = false 1787 ready(sweep.g, 0, true) 1788 } 1789 unlock(&sweep.lock) 1790 } 1791 1792 // gcResetMarkState resets global state prior to marking (concurrent 1793 // or STW) and resets the stack scan state of all Gs. 1794 // 1795 // This is safe to do without the world stopped because any Gs created 1796 // during or after this will start out in the reset state. 1797 func gcResetMarkState() { 1798 // This may be called during a concurrent phase, so make sure 1799 // allgs doesn't change. 1800 if !(gcphase == _GCoff || gcphase == _GCmarktermination) { 1801 // Accessing gcRescan is unsafe. 1802 throw("bad GC phase") 1803 } 1804 lock(&allglock) 1805 for _, gp := range allgs { 1806 gp.gcscandone = false // set to true in gcphasework 1807 gp.gcscanvalid = false // stack has not been scanned 1808 gp.gcRescan = -1 1809 gp.gcAssistBytes = 0 1810 } 1811 unlock(&allglock) 1812 1813 // Clear rescan list. 1814 work.rescan.list = work.rescan.list[:0] 1815 1816 work.bytesMarked = 0 1817 work.initialHeapLive = memstats.heap_live 1818 work.markrootDone = false 1819 } 1820 1821 // Hooks for other packages 1822 1823 var poolcleanup func() 1824 1825 //go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup 1826 func sync_runtime_registerPoolCleanup(f func()) { 1827 poolcleanup = f 1828 } 1829 1830 func clearpools() { 1831 // clear sync.Pools 1832 if poolcleanup != nil { 1833 poolcleanup() 1834 } 1835 1836 // Clear central sudog cache. 1837 // Leave per-P caches alone, they have strictly bounded size. 1838 // Disconnect cached list before dropping it on the floor, 1839 // so that a dangling ref to one entry does not pin all of them. 1840 lock(&sched.sudoglock) 1841 var sg, sgnext *sudog 1842 for sg = sched.sudogcache; sg != nil; sg = sgnext { 1843 sgnext = sg.next 1844 sg.next = nil 1845 } 1846 sched.sudogcache = nil 1847 unlock(&sched.sudoglock) 1848 1849 // Clear central defer pools. 1850 // Leave per-P pools alone, they have strictly bounded size. 1851 lock(&sched.deferlock) 1852 for i := range sched.deferpool { 1853 // disconnect cached list before dropping it on the floor, 1854 // so that a dangling ref to one entry does not pin all of them. 1855 var d, dlink *_defer 1856 for d = sched.deferpool[i]; d != nil; d = dlink { 1857 dlink = d.link 1858 d.link = nil 1859 } 1860 sched.deferpool[i] = nil 1861 } 1862 unlock(&sched.deferlock) 1863 } 1864 1865 // Timing 1866 1867 //go:nowritebarrier 1868 func gchelper() { 1869 _g_ := getg() 1870 _g_.m.traceback = 2 1871 gchelperstart() 1872 1873 if trace.enabled { 1874 traceGCScanStart() 1875 } 1876 1877 // Parallel mark over GC roots and heap 1878 if gcphase == _GCmarktermination { 1879 gcw := &_g_.m.p.ptr().gcw 1880 if work.helperDrainBlock { 1881 gcDrain(gcw, gcDrainBlock) // blocks in getfull 1882 } else { 1883 gcDrain(gcw, gcDrainNoBlock) 1884 } 1885 gcw.dispose() 1886 } 1887 1888 if trace.enabled { 1889 traceGCScanDone() 1890 } 1891 1892 nproc := work.nproc // work.nproc can change right after we increment work.ndone 1893 if atomic.Xadd(&work.ndone, +1) == nproc-1 { 1894 notewakeup(&work.alldone) 1895 } 1896 _g_.m.traceback = 0 1897 } 1898 1899 func gchelperstart() { 1900 _g_ := getg() 1901 1902 if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc { 1903 throw("gchelperstart: bad m->helpgc") 1904 } 1905 if _g_ != _g_.m.g0 { 1906 throw("gchelper not running on g0 stack") 1907 } 1908 } 1909 1910 // itoaDiv formats val/(10**dec) into buf. 1911 func itoaDiv(buf []byte, val uint64, dec int) []byte { 1912 i := len(buf) - 1 1913 idec := i - dec 1914 for val >= 10 || i >= idec { 1915 buf[i] = byte(val%10 + '0') 1916 i-- 1917 if i == idec { 1918 buf[i] = '.' 1919 i-- 1920 } 1921 val /= 10 1922 } 1923 buf[i] = byte(val + '0') 1924 return buf[i:] 1925 } 1926 1927 // fmtNSAsMS nicely formats ns nanoseconds as milliseconds. 1928 func fmtNSAsMS(buf []byte, ns uint64) []byte { 1929 if ns >= 10e6 { 1930 // Format as whole milliseconds. 1931 return itoaDiv(buf, ns/1e6, 0) 1932 } 1933 // Format two digits of precision, with at most three decimal places. 1934 x := ns / 1e3 1935 if x == 0 { 1936 buf[0] = '0' 1937 return buf[:1] 1938 } 1939 dec := 3 1940 for x >= 100 { 1941 x /= 10 1942 dec-- 1943 } 1944 return itoaDiv(buf, x, dec) 1945 }