github.com/riscv/riscv-go@v0.0.0-20200123204226-124ebd6fcc8e/src/runtime/mgc.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // Garbage collector (GC). 6 // 7 // The GC runs concurrently with mutator threads, is type accurate (aka precise), allows multiple 8 // GC thread to run in parallel. It is a concurrent mark and sweep that uses a write barrier. It is 9 // non-generational and non-compacting. Allocation is done using size segregated per P allocation 10 // areas to minimize fragmentation while eliminating locks in the common case. 11 // 12 // The algorithm decomposes into several steps. 13 // This is a high level description of the algorithm being used. For an overview of GC a good 14 // place to start is Richard Jones' gchandbook.org. 15 // 16 // The algorithm's intellectual heritage includes Dijkstra's on-the-fly algorithm, see 17 // Edsger W. Dijkstra, Leslie Lamport, A. J. Martin, C. S. Scholten, and E. F. M. Steffens. 1978. 18 // On-the-fly garbage collection: an exercise in cooperation. Commun. ACM 21, 11 (November 1978), 19 // 966-975. 20 // For journal quality proofs that these steps are complete, correct, and terminate see 21 // Hudson, R., and Moss, J.E.B. Copying Garbage Collection without stopping the world. 22 // Concurrency and Computation: Practice and Experience 15(3-5), 2003. 23 // 24 // 1. GC performs sweep termination. 25 // 26 // a. Stop the world. This causes all Ps to reach a GC safe-point. 27 // 28 // b. Sweep any unswept spans. There will only be unswept spans if 29 // this GC cycle was forced before the expected time. 30 // 31 // 2. GC performs the "mark 1" sub-phase. In this sub-phase, Ps are 32 // allowed to locally cache parts of the work queue. 33 // 34 // a. Prepare for the mark phase by setting gcphase to _GCmark 35 // (from _GCoff), enabling the write barrier, enabling mutator 36 // assists, and enqueueing root mark jobs. No objects may be 37 // scanned until all Ps have enabled the write barrier, which is 38 // accomplished using STW. 39 // 40 // b. Start the world. From this point, GC work is done by mark 41 // workers started by the scheduler and by assists performed as 42 // part of allocation. The write barrier shades both the 43 // overwritten pointer and the new pointer value for any pointer 44 // writes (see mbarrier.go for details). Newly allocated objects 45 // are immediately marked black. 46 // 47 // c. GC performs root marking jobs. This includes scanning all 48 // stacks, shading all globals, and shading any heap pointers in 49 // off-heap runtime data structures. Scanning a stack stops a 50 // goroutine, shades any pointers found on its stack, and then 51 // resumes the goroutine. 52 // 53 // d. GC drains the work queue of grey objects, scanning each grey 54 // object to black and shading all pointers found in the object 55 // (which in turn may add those pointers to the work queue). 56 // 57 // 3. Once the global work queue is empty (but local work queue caches 58 // may still contain work), GC performs the "mark 2" sub-phase. 59 // 60 // a. GC stops all workers, disables local work queue caches, 61 // flushes each P's local work queue cache to the global work queue 62 // cache, and reenables workers. 63 // 64 // b. GC again drains the work queue, as in 2d above. 65 // 66 // 4. Once the work queue is empty, GC performs mark termination. 67 // 68 // a. Stop the world. 69 // 70 // b. Set gcphase to _GCmarktermination, and disable workers and 71 // assists. 72 // 73 // c. Drain any remaining work from the work queue (typically there 74 // will be none). 75 // 76 // d. Perform other housekeeping like flushing mcaches. 77 // 78 // 5. GC performs the sweep phase. 79 // 80 // a. Prepare for the sweep phase by setting gcphase to _GCoff, 81 // setting up sweep state and disabling the write barrier. 82 // 83 // b. Start the world. From this point on, newly allocated objects 84 // are white, and allocating sweeps spans before use if necessary. 85 // 86 // c. GC does concurrent sweeping in the background and in response 87 // to allocation. See description below. 88 // 89 // 6. When sufficient allocation has taken place, replay the sequence 90 // starting with 1 above. See discussion of GC rate below. 91 92 // Concurrent sweep. 93 // 94 // The sweep phase proceeds concurrently with normal program execution. 95 // The heap is swept span-by-span both lazily (when a goroutine needs another span) 96 // and concurrently in a background goroutine (this helps programs that are not CPU bound). 97 // At the end of STW mark termination all spans are marked as "needs sweeping". 98 // 99 // The background sweeper goroutine simply sweeps spans one-by-one. 100 // 101 // To avoid requesting more OS memory while there are unswept spans, when a 102 // goroutine needs another span, it first attempts to reclaim that much memory 103 // by sweeping. When a goroutine needs to allocate a new small-object span, it 104 // sweeps small-object spans for the same object size until it frees at least 105 // one object. When a goroutine needs to allocate large-object span from heap, 106 // it sweeps spans until it frees at least that many pages into heap. There is 107 // one case where this may not suffice: if a goroutine sweeps and frees two 108 // nonadjacent one-page spans to the heap, it will allocate a new two-page 109 // span, but there can still be other one-page unswept spans which could be 110 // combined into a two-page span. 111 // 112 // It's critical to ensure that no operations proceed on unswept spans (that would corrupt 113 // mark bits in GC bitmap). During GC all mcaches are flushed into the central cache, 114 // so they are empty. When a goroutine grabs a new span into mcache, it sweeps it. 115 // When a goroutine explicitly frees an object or sets a finalizer, it ensures that 116 // the span is swept (either by sweeping it, or by waiting for the concurrent sweep to finish). 117 // The finalizer goroutine is kicked off only when all spans are swept. 118 // When the next GC starts, it sweeps all not-yet-swept spans (if any). 119 120 // GC rate. 121 // Next GC is after we've allocated an extra amount of memory proportional to 122 // the amount already in use. The proportion is controlled by GOGC environment variable 123 // (100 by default). If GOGC=100 and we're using 4M, we'll GC again when we get to 8M 124 // (this mark is tracked in next_gc variable). This keeps the GC cost in linear 125 // proportion to the allocation cost. Adjusting GOGC just changes the linear constant 126 // (and also the amount of extra memory used). 127 128 // Oblets 129 // 130 // In order to prevent long pauses while scanning large objects and to 131 // improve parallelism, the garbage collector breaks up scan jobs for 132 // objects larger than maxObletBytes into "oblets" of at most 133 // maxObletBytes. When scanning encounters the beginning of a large 134 // object, it scans only the first oblet and enqueues the remaining 135 // oblets as new scan jobs. 136 137 package runtime 138 139 import ( 140 "runtime/internal/atomic" 141 "runtime/internal/sys" 142 "unsafe" 143 ) 144 145 const ( 146 _DebugGC = 0 147 _ConcurrentSweep = true 148 _FinBlockSize = 4 * 1024 149 150 // sweepMinHeapDistance is a lower bound on the heap distance 151 // (in bytes) reserved for concurrent sweeping between GC 152 // cycles. This will be scaled by gcpercent/100. 153 sweepMinHeapDistance = 1024 * 1024 154 ) 155 156 // heapminimum is the minimum heap size at which to trigger GC. 157 // For small heaps, this overrides the usual GOGC*live set rule. 158 // 159 // When there is a very small live set but a lot of allocation, simply 160 // collecting when the heap reaches GOGC*live results in many GC 161 // cycles and high total per-GC overhead. This minimum amortizes this 162 // per-GC overhead while keeping the heap reasonably small. 163 // 164 // During initialization this is set to 4MB*GOGC/100. In the case of 165 // GOGC==0, this will set heapminimum to 0, resulting in constant 166 // collection even when the heap size is small, which is useful for 167 // debugging. 168 var heapminimum uint64 = defaultHeapMinimum 169 170 // defaultHeapMinimum is the value of heapminimum for GOGC==100. 171 const defaultHeapMinimum = 4 << 20 172 173 // Initialized from $GOGC. GOGC=off means no GC. 174 var gcpercent int32 175 176 func gcinit() { 177 if unsafe.Sizeof(workbuf{}) != _WorkbufSize { 178 throw("size of Workbuf is suboptimal") 179 } 180 181 _ = setGCPercent(readgogc()) 182 memstats.gc_trigger = heapminimum 183 // Compute the goal heap size based on the trigger: 184 // trigger = marked * (1 + triggerRatio) 185 // marked = trigger / (1 + triggerRatio) 186 // goal = marked * (1 + GOGC/100) 187 // = trigger / (1 + triggerRatio) * (1 + GOGC/100) 188 memstats.next_gc = uint64(float64(memstats.gc_trigger) / (1 + gcController.triggerRatio) * (1 + float64(gcpercent)/100)) 189 if gcpercent < 0 { 190 memstats.next_gc = ^uint64(0) 191 } 192 work.startSema = 1 193 work.markDoneSema = 1 194 } 195 196 func readgogc() int32 { 197 p := gogetenv("GOGC") 198 if p == "off" { 199 return -1 200 } 201 if n, ok := atoi32(p); ok { 202 return n 203 } 204 return 100 205 } 206 207 // gcenable is called after the bulk of the runtime initialization, 208 // just before we're about to start letting user code run. 209 // It kicks off the background sweeper goroutine and enables GC. 210 func gcenable() { 211 c := make(chan int, 1) 212 go bgsweep(c) 213 <-c 214 memstats.enablegc = true // now that runtime is initialized, GC is okay 215 } 216 217 //go:linkname setGCPercent runtime/debug.setGCPercent 218 func setGCPercent(in int32) (out int32) { 219 lock(&mheap_.lock) 220 out = gcpercent 221 if in < 0 { 222 in = -1 223 } 224 gcpercent = in 225 heapminimum = defaultHeapMinimum * uint64(gcpercent) / 100 226 if gcController.triggerRatio > float64(gcpercent)/100 { 227 gcController.triggerRatio = float64(gcpercent) / 100 228 } 229 // This is either in gcinit or followed by a STW GC, both of 230 // which will reset other stats like memstats.gc_trigger and 231 // memstats.next_gc to appropriate values. 232 unlock(&mheap_.lock) 233 return out 234 } 235 236 // Garbage collector phase. 237 // Indicates to write barrier and synchronization task to perform. 238 var gcphase uint32 239 240 // The compiler knows about this variable. 241 // If you change it, you must change the compiler too. 242 var writeBarrier struct { 243 enabled bool // compiler emits a check of this before calling write barrier 244 pad [3]byte // compiler uses 32-bit load for "enabled" field 245 needed bool // whether we need a write barrier for current GC phase 246 cgo bool // whether we need a write barrier for a cgo check 247 alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load 248 } 249 250 // gcBlackenEnabled is 1 if mutator assists and background mark 251 // workers are allowed to blacken objects. This must only be set when 252 // gcphase == _GCmark. 253 var gcBlackenEnabled uint32 254 255 // gcBlackenPromptly indicates that optimizations that may 256 // hide work from the global work queue should be disabled. 257 // 258 // If gcBlackenPromptly is true, per-P gcWork caches should 259 // be flushed immediately and new objects should be allocated black. 260 // 261 // There is a tension between allocating objects white and 262 // allocating them black. If white and the objects die before being 263 // marked they can be collected during this GC cycle. On the other 264 // hand allocating them black will reduce _GCmarktermination latency 265 // since more work is done in the mark phase. This tension is resolved 266 // by allocating white until the mark phase is approaching its end and 267 // then allocating black for the remainder of the mark phase. 268 var gcBlackenPromptly bool 269 270 const ( 271 _GCoff = iota // GC not running; sweeping in background, write barrier disabled 272 _GCmark // GC marking roots and workbufs: allocate black, write barrier ENABLED 273 _GCmarktermination // GC mark termination: allocate black, P's help GC, write barrier ENABLED 274 ) 275 276 //go:nosplit 277 func setGCPhase(x uint32) { 278 atomic.Store(&gcphase, x) 279 writeBarrier.needed = gcphase == _GCmark || gcphase == _GCmarktermination 280 writeBarrier.enabled = writeBarrier.needed || writeBarrier.cgo 281 } 282 283 // gcMarkWorkerMode represents the mode that a concurrent mark worker 284 // should operate in. 285 // 286 // Concurrent marking happens through four different mechanisms. One 287 // is mutator assists, which happen in response to allocations and are 288 // not scheduled. The other three are variations in the per-P mark 289 // workers and are distinguished by gcMarkWorkerMode. 290 type gcMarkWorkerMode int 291 292 const ( 293 // gcMarkWorkerDedicatedMode indicates that the P of a mark 294 // worker is dedicated to running that mark worker. The mark 295 // worker should run without preemption. 296 gcMarkWorkerDedicatedMode gcMarkWorkerMode = iota 297 298 // gcMarkWorkerFractionalMode indicates that a P is currently 299 // running the "fractional" mark worker. The fractional worker 300 // is necessary when GOMAXPROCS*gcGoalUtilization is not an 301 // integer. The fractional worker should run until it is 302 // preempted and will be scheduled to pick up the fractional 303 // part of GOMAXPROCS*gcGoalUtilization. 304 gcMarkWorkerFractionalMode 305 306 // gcMarkWorkerIdleMode indicates that a P is running the mark 307 // worker because it has nothing else to do. The idle worker 308 // should run until it is preempted and account its time 309 // against gcController.idleMarkTime. 310 gcMarkWorkerIdleMode 311 ) 312 313 // gcMarkWorkerModeStrings are the strings labels of gcMarkWorkerModes 314 // to use in execution traces. 315 var gcMarkWorkerModeStrings = [...]string{ 316 "GC (dedicated)", 317 "GC (fractional)", 318 "GC (idle)", 319 } 320 321 // gcController implements the GC pacing controller that determines 322 // when to trigger concurrent garbage collection and how much marking 323 // work to do in mutator assists and background marking. 324 // 325 // It uses a feedback control algorithm to adjust the memstats.gc_trigger 326 // trigger based on the heap growth and GC CPU utilization each cycle. 327 // This algorithm optimizes for heap growth to match GOGC and for CPU 328 // utilization between assist and background marking to be 25% of 329 // GOMAXPROCS. The high-level design of this algorithm is documented 330 // at https://golang.org/s/go15gcpacing. 331 var gcController = gcControllerState{ 332 // Initial trigger ratio guess. 333 triggerRatio: 7 / 8.0, 334 } 335 336 type gcControllerState struct { 337 // scanWork is the total scan work performed this cycle. This 338 // is updated atomically during the cycle. Updates occur in 339 // bounded batches, since it is both written and read 340 // throughout the cycle. At the end of the cycle, this is how 341 // much of the retained heap is scannable. 342 // 343 // Currently this is the bytes of heap scanned. For most uses, 344 // this is an opaque unit of work, but for estimation the 345 // definition is important. 346 scanWork int64 347 348 // bgScanCredit is the scan work credit accumulated by the 349 // concurrent background scan. This credit is accumulated by 350 // the background scan and stolen by mutator assists. This is 351 // updated atomically. Updates occur in bounded batches, since 352 // it is both written and read throughout the cycle. 353 bgScanCredit int64 354 355 // assistTime is the nanoseconds spent in mutator assists 356 // during this cycle. This is updated atomically. Updates 357 // occur in bounded batches, since it is both written and read 358 // throughout the cycle. 359 assistTime int64 360 361 // dedicatedMarkTime is the nanoseconds spent in dedicated 362 // mark workers during this cycle. This is updated atomically 363 // at the end of the concurrent mark phase. 364 dedicatedMarkTime int64 365 366 // fractionalMarkTime is the nanoseconds spent in the 367 // fractional mark worker during this cycle. This is updated 368 // atomically throughout the cycle and will be up-to-date if 369 // the fractional mark worker is not currently running. 370 fractionalMarkTime int64 371 372 // idleMarkTime is the nanoseconds spent in idle marking 373 // during this cycle. This is updated atomically throughout 374 // the cycle. 375 idleMarkTime int64 376 377 // markStartTime is the absolute start time in nanoseconds 378 // that assists and background mark workers started. 379 markStartTime int64 380 381 // dedicatedMarkWorkersNeeded is the number of dedicated mark 382 // workers that need to be started. This is computed at the 383 // beginning of each cycle and decremented atomically as 384 // dedicated mark workers get started. 385 dedicatedMarkWorkersNeeded int64 386 387 // assistWorkPerByte is the ratio of scan work to allocated 388 // bytes that should be performed by mutator assists. This is 389 // computed at the beginning of each cycle and updated every 390 // time heap_scan is updated. 391 assistWorkPerByte float64 392 393 // assistBytesPerWork is 1/assistWorkPerByte. 394 assistBytesPerWork float64 395 396 // fractionalUtilizationGoal is the fraction of wall clock 397 // time that should be spent in the fractional mark worker. 398 // For example, if the overall mark utilization goal is 25% 399 // and GOMAXPROCS is 6, one P will be a dedicated mark worker 400 // and this will be set to 0.5 so that 50% of the time some P 401 // is in a fractional mark worker. This is computed at the 402 // beginning of each cycle. 403 fractionalUtilizationGoal float64 404 405 // triggerRatio is the heap growth ratio at which the garbage 406 // collection cycle should start. E.g., if this is 0.6, then 407 // GC should start when the live heap has reached 1.6 times 408 // the heap size marked by the previous cycle. This should be 409 // ≤ GOGC/100 so the trigger heap size is less than the goal 410 // heap size. This is updated at the end of of each cycle. 411 triggerRatio float64 412 413 _ [sys.CacheLineSize]byte 414 415 // fractionalMarkWorkersNeeded is the number of fractional 416 // mark workers that need to be started. This is either 0 or 417 // 1. This is potentially updated atomically at every 418 // scheduling point (hence it gets its own cache line). 419 fractionalMarkWorkersNeeded int64 420 421 _ [sys.CacheLineSize]byte 422 } 423 424 // startCycle resets the GC controller's state and computes estimates 425 // for a new GC cycle. The caller must hold worldsema. 426 func (c *gcControllerState) startCycle() { 427 c.scanWork = 0 428 c.bgScanCredit = 0 429 c.assistTime = 0 430 c.dedicatedMarkTime = 0 431 c.fractionalMarkTime = 0 432 c.idleMarkTime = 0 433 434 // If this is the first GC cycle or we're operating on a very 435 // small heap, fake heap_marked so it looks like gc_trigger is 436 // the appropriate growth from heap_marked, even though the 437 // real heap_marked may not have a meaningful value (on the 438 // first cycle) or may be much smaller (resulting in a large 439 // error response). 440 if memstats.gc_trigger <= heapminimum { 441 memstats.heap_marked = uint64(float64(memstats.gc_trigger) / (1 + c.triggerRatio)) 442 } 443 444 // Re-compute the heap goal for this cycle in case something 445 // changed. This is the same calculation we use elsewhere. 446 memstats.next_gc = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100 447 if gcpercent < 0 { 448 memstats.next_gc = ^uint64(0) 449 } 450 451 // Ensure that the heap goal is at least a little larger than 452 // the current live heap size. This may not be the case if GC 453 // start is delayed or if the allocation that pushed heap_live 454 // over gc_trigger is large or if the trigger is really close to 455 // GOGC. Assist is proportional to this distance, so enforce a 456 // minimum distance, even if it means going over the GOGC goal 457 // by a tiny bit. 458 if memstats.next_gc < memstats.heap_live+1024*1024 { 459 memstats.next_gc = memstats.heap_live + 1024*1024 460 } 461 462 // Compute the total mark utilization goal and divide it among 463 // dedicated and fractional workers. 464 totalUtilizationGoal := float64(gomaxprocs) * gcGoalUtilization 465 c.dedicatedMarkWorkersNeeded = int64(totalUtilizationGoal) 466 c.fractionalUtilizationGoal = totalUtilizationGoal - float64(c.dedicatedMarkWorkersNeeded) 467 if c.fractionalUtilizationGoal > 0 { 468 c.fractionalMarkWorkersNeeded = 1 469 } else { 470 c.fractionalMarkWorkersNeeded = 0 471 } 472 473 // Clear per-P state 474 for _, p := range &allp { 475 if p == nil { 476 break 477 } 478 p.gcAssistTime = 0 479 } 480 481 // Compute initial values for controls that are updated 482 // throughout the cycle. 483 c.revise() 484 485 if debug.gcpacertrace > 0 { 486 print("pacer: assist ratio=", c.assistWorkPerByte, 487 " (scan ", memstats.heap_scan>>20, " MB in ", 488 work.initialHeapLive>>20, "->", 489 memstats.next_gc>>20, " MB)", 490 " workers=", c.dedicatedMarkWorkersNeeded, 491 "+", c.fractionalMarkWorkersNeeded, "\n") 492 } 493 } 494 495 // revise updates the assist ratio during the GC cycle to account for 496 // improved estimates. This should be called either under STW or 497 // whenever memstats.heap_scan or memstats.heap_live is updated (with 498 // mheap_.lock held). 499 // 500 // It should only be called when gcBlackenEnabled != 0 (because this 501 // is when assists are enabled and the necessary statistics are 502 // available). 503 // 504 // TODO: Consider removing the periodic controller update altogether. 505 // Since we switched to allocating black, in theory we shouldn't have 506 // to change the assist ratio. However, this is still a useful hook 507 // that we've found many uses for when experimenting. 508 func (c *gcControllerState) revise() { 509 // Compute the expected scan work remaining. 510 // 511 // Note that we currently count allocations during GC as both 512 // scannable heap (heap_scan) and scan work completed 513 // (scanWork), so this difference won't be changed by 514 // allocations during GC. 515 // 516 // This particular estimate is a strict upper bound on the 517 // possible remaining scan work for the current heap. 518 // You might consider dividing this by 2 (or by 519 // (100+GOGC)/100) to counter this over-estimation, but 520 // benchmarks show that this has almost no effect on mean 521 // mutator utilization, heap size, or assist time and it 522 // introduces the danger of under-estimating and letting the 523 // mutator outpace the garbage collector. 524 scanWorkExpected := int64(memstats.heap_scan) - c.scanWork 525 if scanWorkExpected < 1000 { 526 // We set a somewhat arbitrary lower bound on 527 // remaining scan work since if we aim a little high, 528 // we can miss by a little. 529 // 530 // We *do* need to enforce that this is at least 1, 531 // since marking is racy and double-scanning objects 532 // may legitimately make the expected scan work 533 // negative. 534 scanWorkExpected = 1000 535 } 536 537 // Compute the heap distance remaining. 538 heapDistance := int64(memstats.next_gc) - int64(memstats.heap_live) 539 if heapDistance <= 0 { 540 // This shouldn't happen, but if it does, avoid 541 // dividing by zero or setting the assist negative. 542 heapDistance = 1 543 } 544 545 // Compute the mutator assist ratio so by the time the mutator 546 // allocates the remaining heap bytes up to next_gc, it will 547 // have done (or stolen) the remaining amount of scan work. 548 c.assistWorkPerByte = float64(scanWorkExpected) / float64(heapDistance) 549 c.assistBytesPerWork = float64(heapDistance) / float64(scanWorkExpected) 550 } 551 552 // endCycle updates the GC controller state at the end of the 553 // concurrent part of the GC cycle. 554 func (c *gcControllerState) endCycle() { 555 h_t := c.triggerRatio // For debugging 556 557 // Proportional response gain for the trigger controller. Must 558 // be in [0, 1]. Lower values smooth out transient effects but 559 // take longer to respond to phase changes. Higher values 560 // react to phase changes quickly, but are more affected by 561 // transient changes. Values near 1 may be unstable. 562 const triggerGain = 0.5 563 564 // Compute next cycle trigger ratio. First, this computes the 565 // "error" for this cycle; that is, how far off the trigger 566 // was from what it should have been, accounting for both heap 567 // growth and GC CPU utilization. We compute the actual heap 568 // growth during this cycle and scale that by how far off from 569 // the goal CPU utilization we were (to estimate the heap 570 // growth if we had the desired CPU utilization). The 571 // difference between this estimate and the GOGC-based goal 572 // heap growth is the error. 573 goalGrowthRatio := float64(gcpercent) / 100 574 actualGrowthRatio := float64(memstats.heap_live)/float64(memstats.heap_marked) - 1 575 assistDuration := nanotime() - c.markStartTime 576 577 // Assume background mark hit its utilization goal. 578 utilization := gcGoalUtilization 579 // Add assist utilization; avoid divide by zero. 580 if assistDuration > 0 { 581 utilization += float64(c.assistTime) / float64(assistDuration*int64(gomaxprocs)) 582 } 583 584 triggerError := goalGrowthRatio - c.triggerRatio - utilization/gcGoalUtilization*(actualGrowthRatio-c.triggerRatio) 585 586 // Finally, we adjust the trigger for next time by this error, 587 // damped by the proportional gain. 588 c.triggerRatio += triggerGain * triggerError 589 if c.triggerRatio < 0 { 590 // This can happen if the mutator is allocating very 591 // quickly or the GC is scanning very slowly. 592 c.triggerRatio = 0 593 } else if c.triggerRatio > goalGrowthRatio*0.95 { 594 // Ensure there's always a little margin so that the 595 // mutator assist ratio isn't infinity. 596 c.triggerRatio = goalGrowthRatio * 0.95 597 } 598 599 if debug.gcpacertrace > 0 { 600 // Print controller state in terms of the design 601 // document. 602 H_m_prev := memstats.heap_marked 603 H_T := memstats.gc_trigger 604 h_a := actualGrowthRatio 605 H_a := memstats.heap_live 606 h_g := goalGrowthRatio 607 H_g := int64(float64(H_m_prev) * (1 + h_g)) 608 u_a := utilization 609 u_g := gcGoalUtilization 610 W_a := c.scanWork 611 print("pacer: H_m_prev=", H_m_prev, 612 " h_t=", h_t, " H_T=", H_T, 613 " h_a=", h_a, " H_a=", H_a, 614 " h_g=", h_g, " H_g=", H_g, 615 " u_a=", u_a, " u_g=", u_g, 616 " W_a=", W_a, 617 " goalΔ=", goalGrowthRatio-h_t, 618 " actualΔ=", h_a-h_t, 619 " u_a/u_g=", u_a/u_g, 620 "\n") 621 } 622 } 623 624 // enlistWorker encourages another dedicated mark worker to start on 625 // another P if there are spare worker slots. It is used by putfull 626 // when more work is made available. 627 // 628 //go:nowritebarrier 629 func (c *gcControllerState) enlistWorker() { 630 // If there are idle Ps, wake one so it will run an idle worker. 631 if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { 632 wakep() 633 return 634 } 635 636 // There are no idle Ps. If we need more dedicated workers, 637 // try to preempt a running P so it will switch to a worker. 638 if c.dedicatedMarkWorkersNeeded <= 0 { 639 return 640 } 641 // Pick a random other P to preempt. 642 if gomaxprocs <= 1 { 643 return 644 } 645 gp := getg() 646 if gp == nil || gp.m == nil || gp.m.p == 0 { 647 return 648 } 649 myID := gp.m.p.ptr().id 650 for tries := 0; tries < 5; tries++ { 651 id := int32(fastrand() % uint32(gomaxprocs-1)) 652 if id >= myID { 653 id++ 654 } 655 p := allp[id] 656 if p.status != _Prunning { 657 continue 658 } 659 if preemptone(p) { 660 return 661 } 662 } 663 } 664 665 // findRunnableGCWorker returns the background mark worker for _p_ if it 666 // should be run. This must only be called when gcBlackenEnabled != 0. 667 func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g { 668 if gcBlackenEnabled == 0 { 669 throw("gcControllerState.findRunnable: blackening not enabled") 670 } 671 if _p_.gcBgMarkWorker == 0 { 672 // The mark worker associated with this P is blocked 673 // performing a mark transition. We can't run it 674 // because it may be on some other run or wait queue. 675 return nil 676 } 677 678 if !gcMarkWorkAvailable(_p_) { 679 // No work to be done right now. This can happen at 680 // the end of the mark phase when there are still 681 // assists tapering off. Don't bother running a worker 682 // now because it'll just return immediately. 683 return nil 684 } 685 686 decIfPositive := func(ptr *int64) bool { 687 if *ptr > 0 { 688 if atomic.Xaddint64(ptr, -1) >= 0 { 689 return true 690 } 691 // We lost a race 692 atomic.Xaddint64(ptr, +1) 693 } 694 return false 695 } 696 697 if decIfPositive(&c.dedicatedMarkWorkersNeeded) { 698 // This P is now dedicated to marking until the end of 699 // the concurrent mark phase. 700 _p_.gcMarkWorkerMode = gcMarkWorkerDedicatedMode 701 // TODO(austin): This P isn't going to run anything 702 // else for a while, so kick everything out of its run 703 // queue. 704 } else { 705 if !decIfPositive(&c.fractionalMarkWorkersNeeded) { 706 // No more workers are need right now. 707 return nil 708 } 709 710 // This P has picked the token for the fractional worker. 711 // Is the GC currently under or at the utilization goal? 712 // If so, do more work. 713 // 714 // We used to check whether doing one time slice of work 715 // would remain under the utilization goal, but that has the 716 // effect of delaying work until the mutator has run for 717 // enough time slices to pay for the work. During those time 718 // slices, write barriers are enabled, so the mutator is running slower. 719 // Now instead we do the work whenever we're under or at the 720 // utilization work and pay for it by letting the mutator run later. 721 // This doesn't change the overall utilization averages, but it 722 // front loads the GC work so that the GC finishes earlier and 723 // write barriers can be turned off sooner, effectively giving 724 // the mutator a faster machine. 725 // 726 // The old, slower behavior can be restored by setting 727 // gcForcePreemptNS = forcePreemptNS. 728 const gcForcePreemptNS = 0 729 730 // TODO(austin): We could fast path this and basically 731 // eliminate contention on c.fractionalMarkWorkersNeeded by 732 // precomputing the minimum time at which it's worth 733 // next scheduling the fractional worker. Then Ps 734 // don't have to fight in the window where we've 735 // passed that deadline and no one has started the 736 // worker yet. 737 // 738 // TODO(austin): Shorter preemption interval for mark 739 // worker to improve fairness and give this 740 // finer-grained control over schedule? 741 now := nanotime() - gcController.markStartTime 742 then := now + gcForcePreemptNS 743 timeUsed := c.fractionalMarkTime + gcForcePreemptNS 744 if then > 0 && float64(timeUsed)/float64(then) > c.fractionalUtilizationGoal { 745 // Nope, we'd overshoot the utilization goal 746 atomic.Xaddint64(&c.fractionalMarkWorkersNeeded, +1) 747 return nil 748 } 749 _p_.gcMarkWorkerMode = gcMarkWorkerFractionalMode 750 } 751 752 // Run the background mark worker 753 gp := _p_.gcBgMarkWorker.ptr() 754 casgstatus(gp, _Gwaiting, _Grunnable) 755 if trace.enabled { 756 traceGoUnpark(gp, 0) 757 } 758 return gp 759 } 760 761 // gcGoalUtilization is the goal CPU utilization for background 762 // marking as a fraction of GOMAXPROCS. 763 const gcGoalUtilization = 0.25 764 765 // gcCreditSlack is the amount of scan work credit that can can 766 // accumulate locally before updating gcController.scanWork and, 767 // optionally, gcController.bgScanCredit. Lower values give a more 768 // accurate assist ratio and make it more likely that assists will 769 // successfully steal background credit. Higher values reduce memory 770 // contention. 771 const gcCreditSlack = 2000 772 773 // gcAssistTimeSlack is the nanoseconds of mutator assist time that 774 // can accumulate on a P before updating gcController.assistTime. 775 const gcAssistTimeSlack = 5000 776 777 // gcOverAssistWork determines how many extra units of scan work a GC 778 // assist does when an assist happens. This amortizes the cost of an 779 // assist by pre-paying for this many bytes of future allocations. 780 const gcOverAssistWork = 64 << 10 781 782 var work struct { 783 full uint64 // lock-free list of full blocks workbuf 784 empty uint64 // lock-free list of empty blocks workbuf 785 pad0 [sys.CacheLineSize]uint8 // prevents false-sharing between full/empty and nproc/nwait 786 787 // bytesMarked is the number of bytes marked this cycle. This 788 // includes bytes blackened in scanned objects, noscan objects 789 // that go straight to black, and permagrey objects scanned by 790 // markroot during the concurrent scan phase. This is updated 791 // atomically during the cycle. Updates may be batched 792 // arbitrarily, since the value is only read at the end of the 793 // cycle. 794 // 795 // Because of benign races during marking, this number may not 796 // be the exact number of marked bytes, but it should be very 797 // close. 798 // 799 // Put this field here because it needs 64-bit atomic access 800 // (and thus 8-byte alignment even on 32-bit architectures). 801 bytesMarked uint64 802 803 markrootNext uint32 // next markroot job 804 markrootJobs uint32 // number of markroot jobs 805 806 nproc uint32 807 tstart int64 808 nwait uint32 809 ndone uint32 810 alldone note 811 812 // helperDrainBlock indicates that GC mark termination helpers 813 // should pass gcDrainBlock to gcDrain to block in the 814 // getfull() barrier. Otherwise, they should pass gcDrainNoBlock. 815 // 816 // TODO: This is a temporary fallback to support 817 // debug.gcrescanstacks > 0 and to work around some known 818 // races. Remove this when we remove the debug option and fix 819 // the races. 820 helperDrainBlock bool 821 822 // Number of roots of various root types. Set by gcMarkRootPrepare. 823 nFlushCacheRoots int 824 nDataRoots, nBSSRoots, nSpanRoots, nStackRoots, nRescanRoots int 825 826 // markrootDone indicates that roots have been marked at least 827 // once during the current GC cycle. This is checked by root 828 // marking operations that have to happen only during the 829 // first root marking pass, whether that's during the 830 // concurrent mark phase in current GC or mark termination in 831 // STW GC. 832 markrootDone bool 833 834 // Each type of GC state transition is protected by a lock. 835 // Since multiple threads can simultaneously detect the state 836 // transition condition, any thread that detects a transition 837 // condition must acquire the appropriate transition lock, 838 // re-check the transition condition and return if it no 839 // longer holds or perform the transition if it does. 840 // Likewise, any transition must invalidate the transition 841 // condition before releasing the lock. This ensures that each 842 // transition is performed by exactly one thread and threads 843 // that need the transition to happen block until it has 844 // happened. 845 // 846 // startSema protects the transition from "off" to mark or 847 // mark termination. 848 startSema uint32 849 // markDoneSema protects transitions from mark 1 to mark 2 and 850 // from mark 2 to mark termination. 851 markDoneSema uint32 852 853 bgMarkReady note // signal background mark worker has started 854 bgMarkDone uint32 // cas to 1 when at a background mark completion point 855 // Background mark completion signaling 856 857 // mode is the concurrency mode of the current GC cycle. 858 mode gcMode 859 860 // totaltime is the CPU nanoseconds spent in GC since the 861 // program started if debug.gctrace > 0. 862 totaltime int64 863 864 // initialHeapLive is the value of memstats.heap_live at the 865 // beginning of this GC cycle. 866 initialHeapLive uint64 867 868 // assistQueue is a queue of assists that are blocked because 869 // there was neither enough credit to steal or enough work to 870 // do. 871 assistQueue struct { 872 lock mutex 873 head, tail guintptr 874 } 875 876 // rescan is a list of G's that need to be rescanned during 877 // mark termination. A G adds itself to this list when it 878 // first invalidates its stack scan. 879 rescan struct { 880 lock mutex 881 list []guintptr 882 } 883 884 // Timing/utilization stats for this cycle. 885 stwprocs, maxprocs int32 886 tSweepTerm, tMark, tMarkTerm, tEnd int64 // nanotime() of phase start 887 888 pauseNS int64 // total STW time this cycle 889 pauseStart int64 // nanotime() of last STW 890 891 // debug.gctrace heap sizes for this cycle. 892 heap0, heap1, heap2, heapGoal uint64 893 } 894 895 // GC runs a garbage collection and blocks the caller until the 896 // garbage collection is complete. It may also block the entire 897 // program. 898 func GC() { 899 gcStart(gcForceBlockMode, false) 900 } 901 902 // gcMode indicates how concurrent a GC cycle should be. 903 type gcMode int 904 905 const ( 906 gcBackgroundMode gcMode = iota // concurrent GC and sweep 907 gcForceMode // stop-the-world GC now, concurrent sweep 908 gcForceBlockMode // stop-the-world GC now and STW sweep (forced by user) 909 ) 910 911 // gcShouldStart returns true if the exit condition for the _GCoff 912 // phase has been met. The exit condition should be tested when 913 // allocating. 914 // 915 // If forceTrigger is true, it ignores the current heap size, but 916 // checks all other conditions. In general this should be false. 917 func gcShouldStart(forceTrigger bool) bool { 918 return gcphase == _GCoff && (forceTrigger || memstats.heap_live >= memstats.gc_trigger) && memstats.enablegc && panicking == 0 && gcpercent >= 0 919 } 920 921 // gcStart transitions the GC from _GCoff to _GCmark (if mode == 922 // gcBackgroundMode) or _GCmarktermination (if mode != 923 // gcBackgroundMode) by performing sweep termination and GC 924 // initialization. 925 // 926 // This may return without performing this transition in some cases, 927 // such as when called on a system stack or with locks held. 928 func gcStart(mode gcMode, forceTrigger bool) { 929 // Since this is called from malloc and malloc is called in 930 // the guts of a number of libraries that might be holding 931 // locks, don't attempt to start GC in non-preemptible or 932 // potentially unstable situations. 933 mp := acquirem() 934 if gp := getg(); gp == mp.g0 || mp.locks > 1 || mp.preemptoff != "" { 935 releasem(mp) 936 return 937 } 938 releasem(mp) 939 mp = nil 940 941 // Pick up the remaining unswept/not being swept spans concurrently 942 // 943 // This shouldn't happen if we're being invoked in background 944 // mode since proportional sweep should have just finished 945 // sweeping everything, but rounding errors, etc, may leave a 946 // few spans unswept. In forced mode, this is necessary since 947 // GC can be forced at any point in the sweeping cycle. 948 // 949 // We check the transition condition continuously here in case 950 // this G gets delayed in to the next GC cycle. 951 for (mode != gcBackgroundMode || gcShouldStart(forceTrigger)) && gosweepone() != ^uintptr(0) { 952 sweep.nbgsweep++ 953 } 954 955 // Perform GC initialization and the sweep termination 956 // transition. 957 // 958 // If this is a forced GC, don't acquire the transition lock 959 // or re-check the transition condition because we 960 // specifically *don't* want to share the transition with 961 // another thread. 962 useStartSema := mode == gcBackgroundMode 963 if useStartSema { 964 semacquire(&work.startSema, 0) 965 // Re-check transition condition under transition lock. 966 if !gcShouldStart(forceTrigger) { 967 semrelease(&work.startSema) 968 return 969 } 970 } 971 972 // For stats, check if this GC was forced by the user. 973 forced := mode != gcBackgroundMode 974 975 // In gcstoptheworld debug mode, upgrade the mode accordingly. 976 // We do this after re-checking the transition condition so 977 // that multiple goroutines that detect the heap trigger don't 978 // start multiple STW GCs. 979 if mode == gcBackgroundMode { 980 if debug.gcstoptheworld == 1 { 981 mode = gcForceMode 982 } else if debug.gcstoptheworld == 2 { 983 mode = gcForceBlockMode 984 } 985 } 986 987 // Ok, we're doing it! Stop everybody else 988 semacquire(&worldsema, 0) 989 990 if trace.enabled { 991 traceGCStart() 992 } 993 994 if mode == gcBackgroundMode { 995 gcBgMarkStartWorkers() 996 } 997 998 gcResetMarkState() 999 1000 now := nanotime() 1001 work.stwprocs, work.maxprocs = gcprocs(), gomaxprocs 1002 work.tSweepTerm = now 1003 work.heap0 = memstats.heap_live 1004 work.pauseNS = 0 1005 work.mode = mode 1006 1007 work.pauseStart = now 1008 systemstack(stopTheWorldWithSema) 1009 // Finish sweep before we start concurrent scan. 1010 systemstack(func() { 1011 finishsweep_m() 1012 }) 1013 // clearpools before we start the GC. If we wait they memory will not be 1014 // reclaimed until the next GC cycle. 1015 clearpools() 1016 1017 if mode == gcBackgroundMode { // Do as much work concurrently as possible 1018 gcController.startCycle() 1019 work.heapGoal = memstats.next_gc 1020 1021 // Enter concurrent mark phase and enable 1022 // write barriers. 1023 // 1024 // Because the world is stopped, all Ps will 1025 // observe that write barriers are enabled by 1026 // the time we start the world and begin 1027 // scanning. 1028 // 1029 // It's necessary to enable write barriers 1030 // during the scan phase for several reasons: 1031 // 1032 // They must be enabled for writes to higher 1033 // stack frames before we scan stacks and 1034 // install stack barriers because this is how 1035 // we track writes to inactive stack frames. 1036 // (Alternatively, we could not install stack 1037 // barriers over frame boundaries with 1038 // up-pointers). 1039 // 1040 // They must be enabled before assists are 1041 // enabled because they must be enabled before 1042 // any non-leaf heap objects are marked. Since 1043 // allocations are blocked until assists can 1044 // happen, we want enable assists as early as 1045 // possible. 1046 setGCPhase(_GCmark) 1047 1048 gcBgMarkPrepare() // Must happen before assist enable. 1049 gcMarkRootPrepare() 1050 1051 // Mark all active tinyalloc blocks. Since we're 1052 // allocating from these, they need to be black like 1053 // other allocations. The alternative is to blacken 1054 // the tiny block on every allocation from it, which 1055 // would slow down the tiny allocator. 1056 gcMarkTinyAllocs() 1057 1058 // At this point all Ps have enabled the write 1059 // barrier, thus maintaining the no white to 1060 // black invariant. Enable mutator assists to 1061 // put back-pressure on fast allocating 1062 // mutators. 1063 atomic.Store(&gcBlackenEnabled, 1) 1064 1065 // Assists and workers can start the moment we start 1066 // the world. 1067 gcController.markStartTime = now 1068 1069 // Concurrent mark. 1070 systemstack(startTheWorldWithSema) 1071 now = nanotime() 1072 work.pauseNS += now - work.pauseStart 1073 work.tMark = now 1074 } else { 1075 t := nanotime() 1076 work.tMark, work.tMarkTerm = t, t 1077 work.heapGoal = work.heap0 1078 1079 if forced { 1080 memstats.numforcedgc++ 1081 } 1082 1083 // Perform mark termination. This will restart the world. 1084 gcMarkTermination() 1085 } 1086 1087 if useStartSema { 1088 semrelease(&work.startSema) 1089 } 1090 } 1091 1092 // gcMarkDone transitions the GC from mark 1 to mark 2 and from mark 2 1093 // to mark termination. 1094 // 1095 // This should be called when all mark work has been drained. In mark 1096 // 1, this includes all root marking jobs, global work buffers, and 1097 // active work buffers in assists and background workers; however, 1098 // work may still be cached in per-P work buffers. In mark 2, per-P 1099 // caches are disabled. 1100 // 1101 // The calling context must be preemptible. 1102 // 1103 // Note that it is explicitly okay to have write barriers in this 1104 // function because completion of concurrent mark is best-effort 1105 // anyway. Any work created by write barriers here will be cleaned up 1106 // by mark termination. 1107 func gcMarkDone() { 1108 top: 1109 semacquire(&work.markDoneSema, 0) 1110 1111 // Re-check transition condition under transition lock. 1112 if !(gcphase == _GCmark && work.nwait == work.nproc && !gcMarkWorkAvailable(nil)) { 1113 semrelease(&work.markDoneSema) 1114 return 1115 } 1116 1117 // Disallow starting new workers so that any remaining workers 1118 // in the current mark phase will drain out. 1119 // 1120 // TODO(austin): Should dedicated workers keep an eye on this 1121 // and exit gcDrain promptly? 1122 atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, -0xffffffff) 1123 atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, -0xffffffff) 1124 1125 if !gcBlackenPromptly { 1126 // Transition from mark 1 to mark 2. 1127 // 1128 // The global work list is empty, but there can still be work 1129 // sitting in the per-P work caches. 1130 // Flush and disable work caches. 1131 1132 // Disallow caching workbufs and indicate that we're in mark 2. 1133 gcBlackenPromptly = true 1134 1135 // Prevent completion of mark 2 until we've flushed 1136 // cached workbufs. 1137 atomic.Xadd(&work.nwait, -1) 1138 1139 // GC is set up for mark 2. Let Gs blocked on the 1140 // transition lock go while we flush caches. 1141 semrelease(&work.markDoneSema) 1142 1143 systemstack(func() { 1144 // Flush all currently cached workbufs and 1145 // ensure all Ps see gcBlackenPromptly. This 1146 // also blocks until any remaining mark 1 1147 // workers have exited their loop so we can 1148 // start new mark 2 workers. 1149 forEachP(func(_p_ *p) { 1150 _p_.gcw.dispose() 1151 }) 1152 }) 1153 1154 // Check that roots are marked. We should be able to 1155 // do this before the forEachP, but based on issue 1156 // #16083 there may be a (harmless) race where we can 1157 // enter mark 2 while some workers are still scanning 1158 // stacks. The forEachP ensures these scans are done. 1159 // 1160 // TODO(austin): Figure out the race and fix this 1161 // properly. 1162 gcMarkRootCheck() 1163 1164 // Now we can start up mark 2 workers. 1165 atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 0xffffffff) 1166 atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 0xffffffff) 1167 1168 incnwait := atomic.Xadd(&work.nwait, +1) 1169 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 1170 // This loop will make progress because 1171 // gcBlackenPromptly is now true, so it won't 1172 // take this same "if" branch. 1173 goto top 1174 } 1175 } else { 1176 // Transition to mark termination. 1177 now := nanotime() 1178 work.tMarkTerm = now 1179 work.pauseStart = now 1180 getg().m.preemptoff = "gcing" 1181 systemstack(stopTheWorldWithSema) 1182 // The gcphase is _GCmark, it will transition to _GCmarktermination 1183 // below. The important thing is that the wb remains active until 1184 // all marking is complete. This includes writes made by the GC. 1185 1186 // Record that one root marking pass has completed. 1187 work.markrootDone = true 1188 1189 // Disable assists and background workers. We must do 1190 // this before waking blocked assists. 1191 atomic.Store(&gcBlackenEnabled, 0) 1192 1193 // Wake all blocked assists. These will run when we 1194 // start the world again. 1195 gcWakeAllAssists() 1196 1197 // Likewise, release the transition lock. Blocked 1198 // workers and assists will run when we start the 1199 // world again. 1200 semrelease(&work.markDoneSema) 1201 1202 // endCycle depends on all gcWork cache stats being 1203 // flushed. This is ensured by mark 2. 1204 gcController.endCycle() 1205 1206 // Perform mark termination. This will restart the world. 1207 gcMarkTermination() 1208 } 1209 } 1210 1211 func gcMarkTermination() { 1212 // World is stopped. 1213 // Start marktermination which includes enabling the write barrier. 1214 atomic.Store(&gcBlackenEnabled, 0) 1215 gcBlackenPromptly = false 1216 setGCPhase(_GCmarktermination) 1217 1218 work.heap1 = memstats.heap_live 1219 startTime := nanotime() 1220 1221 mp := acquirem() 1222 mp.preemptoff = "gcing" 1223 _g_ := getg() 1224 _g_.m.traceback = 2 1225 gp := _g_.m.curg 1226 casgstatus(gp, _Grunning, _Gwaiting) 1227 gp.waitreason = "garbage collection" 1228 1229 // Run gc on the g0 stack. We do this so that the g stack 1230 // we're currently running on will no longer change. Cuts 1231 // the root set down a bit (g0 stacks are not scanned, and 1232 // we don't need to scan gc's internal state). We also 1233 // need to switch to g0 so we can shrink the stack. 1234 systemstack(func() { 1235 gcMark(startTime) 1236 // Must return immediately. 1237 // The outer function's stack may have moved 1238 // during gcMark (it shrinks stacks, including the 1239 // outer function's stack), so we must not refer 1240 // to any of its variables. Return back to the 1241 // non-system stack to pick up the new addresses 1242 // before continuing. 1243 }) 1244 1245 systemstack(func() { 1246 work.heap2 = work.bytesMarked 1247 if debug.gccheckmark > 0 { 1248 // Run a full stop-the-world mark using checkmark bits, 1249 // to check that we didn't forget to mark anything during 1250 // the concurrent mark process. 1251 gcResetMarkState() 1252 initCheckmarks() 1253 gcMark(startTime) 1254 clearCheckmarks() 1255 } 1256 1257 // marking is complete so we can turn the write barrier off 1258 setGCPhase(_GCoff) 1259 gcSweep(work.mode) 1260 1261 if debug.gctrace > 1 { 1262 startTime = nanotime() 1263 // The g stacks have been scanned so 1264 // they have gcscanvalid==true and gcworkdone==true. 1265 // Reset these so that all stacks will be rescanned. 1266 gcResetMarkState() 1267 finishsweep_m() 1268 1269 // Still in STW but gcphase is _GCoff, reset to _GCmarktermination 1270 // At this point all objects will be found during the gcMark which 1271 // does a complete STW mark and object scan. 1272 setGCPhase(_GCmarktermination) 1273 gcMark(startTime) 1274 setGCPhase(_GCoff) // marking is done, turn off wb. 1275 gcSweep(work.mode) 1276 } 1277 }) 1278 1279 _g_.m.traceback = 0 1280 casgstatus(gp, _Gwaiting, _Grunning) 1281 1282 if trace.enabled { 1283 traceGCDone() 1284 } 1285 1286 // all done 1287 mp.preemptoff = "" 1288 1289 if gcphase != _GCoff { 1290 throw("gc done but gcphase != _GCoff") 1291 } 1292 1293 // Update timing memstats 1294 now, unixNow := nanotime(), unixnanotime() 1295 work.pauseNS += now - work.pauseStart 1296 work.tEnd = now 1297 atomic.Store64(&memstats.last_gc, uint64(unixNow)) // must be Unix time to make sense to user 1298 memstats.pause_ns[memstats.numgc%uint32(len(memstats.pause_ns))] = uint64(work.pauseNS) 1299 memstats.pause_end[memstats.numgc%uint32(len(memstats.pause_end))] = uint64(unixNow) 1300 memstats.pause_total_ns += uint64(work.pauseNS) 1301 1302 // Update work.totaltime. 1303 sweepTermCpu := int64(work.stwprocs) * (work.tMark - work.tSweepTerm) 1304 // We report idle marking time below, but omit it from the 1305 // overall utilization here since it's "free". 1306 markCpu := gcController.assistTime + gcController.dedicatedMarkTime + gcController.fractionalMarkTime 1307 markTermCpu := int64(work.stwprocs) * (work.tEnd - work.tMarkTerm) 1308 cycleCpu := sweepTermCpu + markCpu + markTermCpu 1309 work.totaltime += cycleCpu 1310 1311 // Compute overall GC CPU utilization. 1312 totalCpu := sched.totaltime + (now-sched.procresizetime)*int64(gomaxprocs) 1313 memstats.gc_cpu_fraction = float64(work.totaltime) / float64(totalCpu) 1314 1315 memstats.numgc++ 1316 1317 // Reset sweep state. 1318 sweep.nbgsweep = 0 1319 sweep.npausesweep = 0 1320 1321 systemstack(startTheWorldWithSema) 1322 1323 // Update heap profile stats if gcSweep didn't do it. This is 1324 // relatively expensive, so we don't want to do it while the 1325 // world is stopped, but it needs to happen ASAP after 1326 // starting the world to prevent too many allocations from the 1327 // next cycle leaking in. It must happen before releasing 1328 // worldsema since there are applications that do a 1329 // runtime.GC() to update the heap profile and then 1330 // immediately collect the profile. 1331 if _ConcurrentSweep && work.mode != gcForceBlockMode { 1332 mProf_GC() 1333 } 1334 1335 // Free stack spans. This must be done between GC cycles. 1336 systemstack(freeStackSpans) 1337 1338 // Best-effort remove stack barriers so they don't get in the 1339 // way of things like GDB and perf. 1340 lock(&allglock) 1341 myallgs := allgs 1342 unlock(&allglock) 1343 gcTryRemoveAllStackBarriers(myallgs) 1344 1345 // Print gctrace before dropping worldsema. As soon as we drop 1346 // worldsema another cycle could start and smash the stats 1347 // we're trying to print. 1348 if debug.gctrace > 0 { 1349 util := int(memstats.gc_cpu_fraction * 100) 1350 1351 var sbuf [24]byte 1352 printlock() 1353 print("gc ", memstats.numgc, 1354 " @", string(itoaDiv(sbuf[:], uint64(work.tSweepTerm-runtimeInitTime)/1e6, 3)), "s ", 1355 util, "%: ") 1356 prev := work.tSweepTerm 1357 for i, ns := range []int64{work.tMark, work.tMarkTerm, work.tEnd} { 1358 if i != 0 { 1359 print("+") 1360 } 1361 print(string(fmtNSAsMS(sbuf[:], uint64(ns-prev)))) 1362 prev = ns 1363 } 1364 print(" ms clock, ") 1365 for i, ns := range []int64{sweepTermCpu, gcController.assistTime, gcController.dedicatedMarkTime + gcController.fractionalMarkTime, gcController.idleMarkTime, markTermCpu} { 1366 if i == 2 || i == 3 { 1367 // Separate mark time components with /. 1368 print("/") 1369 } else if i != 0 { 1370 print("+") 1371 } 1372 print(string(fmtNSAsMS(sbuf[:], uint64(ns)))) 1373 } 1374 print(" ms cpu, ", 1375 work.heap0>>20, "->", work.heap1>>20, "->", work.heap2>>20, " MB, ", 1376 work.heapGoal>>20, " MB goal, ", 1377 work.maxprocs, " P") 1378 if work.mode != gcBackgroundMode { 1379 print(" (forced)") 1380 } 1381 print("\n") 1382 printunlock() 1383 } 1384 1385 semrelease(&worldsema) 1386 // Careful: another GC cycle may start now. 1387 1388 releasem(mp) 1389 mp = nil 1390 1391 // now that gc is done, kick off finalizer thread if needed 1392 if !concurrentSweep { 1393 // give the queued finalizers, if any, a chance to run 1394 Gosched() 1395 } 1396 } 1397 1398 // gcBgMarkStartWorkers prepares background mark worker goroutines. 1399 // These goroutines will not run until the mark phase, but they must 1400 // be started while the work is not stopped and from a regular G 1401 // stack. The caller must hold worldsema. 1402 func gcBgMarkStartWorkers() { 1403 // Background marking is performed by per-P G's. Ensure that 1404 // each P has a background GC G. 1405 for _, p := range &allp { 1406 if p == nil || p.status == _Pdead { 1407 break 1408 } 1409 if p.gcBgMarkWorker == 0 { 1410 go gcBgMarkWorker(p) 1411 notetsleepg(&work.bgMarkReady, -1) 1412 noteclear(&work.bgMarkReady) 1413 } 1414 } 1415 } 1416 1417 // gcBgMarkPrepare sets up state for background marking. 1418 // Mutator assists must not yet be enabled. 1419 func gcBgMarkPrepare() { 1420 // Background marking will stop when the work queues are empty 1421 // and there are no more workers (note that, since this is 1422 // concurrent, this may be a transient state, but mark 1423 // termination will clean it up). Between background workers 1424 // and assists, we don't really know how many workers there 1425 // will be, so we pretend to have an arbitrarily large number 1426 // of workers, almost all of which are "waiting". While a 1427 // worker is working it decrements nwait. If nproc == nwait, 1428 // there are no workers. 1429 work.nproc = ^uint32(0) 1430 work.nwait = ^uint32(0) 1431 } 1432 1433 func gcBgMarkWorker(_p_ *p) { 1434 gp := getg() 1435 1436 type parkInfo struct { 1437 m muintptr // Release this m on park. 1438 attach puintptr // If non-nil, attach to this p on park. 1439 } 1440 // We pass park to a gopark unlock function, so it can't be on 1441 // the stack (see gopark). Prevent deadlock from recursively 1442 // starting GC by disabling preemption. 1443 gp.m.preemptoff = "GC worker init" 1444 park := new(parkInfo) 1445 gp.m.preemptoff = "" 1446 1447 park.m.set(acquirem()) 1448 park.attach.set(_p_) 1449 // Inform gcBgMarkStartWorkers that this worker is ready. 1450 // After this point, the background mark worker is scheduled 1451 // cooperatively by gcController.findRunnable. Hence, it must 1452 // never be preempted, as this would put it into _Grunnable 1453 // and put it on a run queue. Instead, when the preempt flag 1454 // is set, this puts itself into _Gwaiting to be woken up by 1455 // gcController.findRunnable at the appropriate time. 1456 notewakeup(&work.bgMarkReady) 1457 1458 for { 1459 // Go to sleep until woken by gcController.findRunnable. 1460 // We can't releasem yet since even the call to gopark 1461 // may be preempted. 1462 gopark(func(g *g, parkp unsafe.Pointer) bool { 1463 park := (*parkInfo)(parkp) 1464 1465 // The worker G is no longer running, so it's 1466 // now safe to allow preemption. 1467 releasem(park.m.ptr()) 1468 1469 // If the worker isn't attached to its P, 1470 // attach now. During initialization and after 1471 // a phase change, the worker may have been 1472 // running on a different P. As soon as we 1473 // attach, the owner P may schedule the 1474 // worker, so this must be done after the G is 1475 // stopped. 1476 if park.attach != 0 { 1477 p := park.attach.ptr() 1478 park.attach.set(nil) 1479 // cas the worker because we may be 1480 // racing with a new worker starting 1481 // on this P. 1482 if !p.gcBgMarkWorker.cas(0, guintptr(unsafe.Pointer(g))) { 1483 // The P got a new worker. 1484 // Exit this worker. 1485 return false 1486 } 1487 } 1488 return true 1489 }, unsafe.Pointer(park), "GC worker (idle)", traceEvGoBlock, 0) 1490 1491 // Loop until the P dies and disassociates this 1492 // worker (the P may later be reused, in which case 1493 // it will get a new worker) or we failed to associate. 1494 if _p_.gcBgMarkWorker.ptr() != gp { 1495 break 1496 } 1497 1498 // Disable preemption so we can use the gcw. If the 1499 // scheduler wants to preempt us, we'll stop draining, 1500 // dispose the gcw, and then preempt. 1501 park.m.set(acquirem()) 1502 1503 if gcBlackenEnabled == 0 { 1504 throw("gcBgMarkWorker: blackening not enabled") 1505 } 1506 1507 startTime := nanotime() 1508 1509 decnwait := atomic.Xadd(&work.nwait, -1) 1510 if decnwait == work.nproc { 1511 println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc) 1512 throw("work.nwait was > work.nproc") 1513 } 1514 1515 systemstack(func() { 1516 // Mark our goroutine preemptible so its stack 1517 // can be scanned. This lets two mark workers 1518 // scan each other (otherwise, they would 1519 // deadlock). We must not modify anything on 1520 // the G stack. However, stack shrinking is 1521 // disabled for mark workers, so it is safe to 1522 // read from the G stack. 1523 casgstatus(gp, _Grunning, _Gwaiting) 1524 switch _p_.gcMarkWorkerMode { 1525 default: 1526 throw("gcBgMarkWorker: unexpected gcMarkWorkerMode") 1527 case gcMarkWorkerDedicatedMode: 1528 gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit) 1529 case gcMarkWorkerFractionalMode: 1530 gcDrain(&_p_.gcw, gcDrainUntilPreempt|gcDrainFlushBgCredit) 1531 case gcMarkWorkerIdleMode: 1532 gcDrain(&_p_.gcw, gcDrainIdle|gcDrainUntilPreempt|gcDrainFlushBgCredit) 1533 } 1534 casgstatus(gp, _Gwaiting, _Grunning) 1535 }) 1536 1537 // If we are nearing the end of mark, dispose 1538 // of the cache promptly. We must do this 1539 // before signaling that we're no longer 1540 // working so that other workers can't observe 1541 // no workers and no work while we have this 1542 // cached, and before we compute done. 1543 if gcBlackenPromptly { 1544 _p_.gcw.dispose() 1545 } 1546 1547 // Account for time. 1548 duration := nanotime() - startTime 1549 switch _p_.gcMarkWorkerMode { 1550 case gcMarkWorkerDedicatedMode: 1551 atomic.Xaddint64(&gcController.dedicatedMarkTime, duration) 1552 atomic.Xaddint64(&gcController.dedicatedMarkWorkersNeeded, 1) 1553 case gcMarkWorkerFractionalMode: 1554 atomic.Xaddint64(&gcController.fractionalMarkTime, duration) 1555 atomic.Xaddint64(&gcController.fractionalMarkWorkersNeeded, 1) 1556 case gcMarkWorkerIdleMode: 1557 atomic.Xaddint64(&gcController.idleMarkTime, duration) 1558 } 1559 1560 // Was this the last worker and did we run out 1561 // of work? 1562 incnwait := atomic.Xadd(&work.nwait, +1) 1563 if incnwait > work.nproc { 1564 println("runtime: p.gcMarkWorkerMode=", _p_.gcMarkWorkerMode, 1565 "work.nwait=", incnwait, "work.nproc=", work.nproc) 1566 throw("work.nwait > work.nproc") 1567 } 1568 1569 // If this worker reached a background mark completion 1570 // point, signal the main GC goroutine. 1571 if incnwait == work.nproc && !gcMarkWorkAvailable(nil) { 1572 // Make this G preemptible and disassociate it 1573 // as the worker for this P so 1574 // findRunnableGCWorker doesn't try to 1575 // schedule it. 1576 _p_.gcBgMarkWorker.set(nil) 1577 releasem(park.m.ptr()) 1578 1579 gcMarkDone() 1580 1581 // Disable preemption and prepare to reattach 1582 // to the P. 1583 // 1584 // We may be running on a different P at this 1585 // point, so we can't reattach until this G is 1586 // parked. 1587 park.m.set(acquirem()) 1588 park.attach.set(_p_) 1589 } 1590 } 1591 } 1592 1593 // gcMarkWorkAvailable returns true if executing a mark worker 1594 // on p is potentially useful. p may be nil, in which case it only 1595 // checks the global sources of work. 1596 func gcMarkWorkAvailable(p *p) bool { 1597 if p != nil && !p.gcw.empty() { 1598 return true 1599 } 1600 if atomic.Load64(&work.full) != 0 { 1601 return true // global work available 1602 } 1603 if work.markrootNext < work.markrootJobs { 1604 return true // root scan work available 1605 } 1606 return false 1607 } 1608 1609 // gcMark runs the mark (or, for concurrent GC, mark termination) 1610 // All gcWork caches must be empty. 1611 // STW is in effect at this point. 1612 //TODO go:nowritebarrier 1613 func gcMark(start_time int64) { 1614 if debug.allocfreetrace > 0 { 1615 tracegc() 1616 } 1617 1618 if gcphase != _GCmarktermination { 1619 throw("in gcMark expecting to see gcphase as _GCmarktermination") 1620 } 1621 work.tstart = start_time 1622 1623 // Queue root marking jobs. 1624 gcMarkRootPrepare() 1625 1626 work.nwait = 0 1627 work.ndone = 0 1628 work.nproc = uint32(gcprocs()) 1629 1630 if debug.gcrescanstacks == 0 && work.full == 0 && work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots+work.nRescanRoots == 0 { 1631 // There's no work on the work queue and no root jobs 1632 // that can produce work, so don't bother entering the 1633 // getfull() barrier. 1634 // 1635 // With the hybrid barrier enabled, this will be the 1636 // situation the vast majority of the time after 1637 // concurrent mark. However, we still need a fallback 1638 // for STW GC and because there are some known races 1639 // that occasionally leave work around for mark 1640 // termination. 1641 // 1642 // We're still hedging our bets here: if we do 1643 // accidentally produce some work, we'll still process 1644 // it, just not necessarily in parallel. 1645 // 1646 // TODO(austin): When we eliminate 1647 // debug.gcrescanstacks: fix the races, and remove 1648 // work draining from mark termination so we don't 1649 // need the fallback path. 1650 work.helperDrainBlock = false 1651 } else { 1652 work.helperDrainBlock = true 1653 } 1654 1655 if trace.enabled { 1656 traceGCScanStart() 1657 } 1658 1659 if work.nproc > 1 { 1660 noteclear(&work.alldone) 1661 helpgc(int32(work.nproc)) 1662 } 1663 1664 gchelperstart() 1665 1666 gcw := &getg().m.p.ptr().gcw 1667 if work.helperDrainBlock { 1668 gcDrain(gcw, gcDrainBlock) 1669 } else { 1670 gcDrain(gcw, gcDrainNoBlock) 1671 } 1672 gcw.dispose() 1673 1674 if debug.gccheckmark > 0 { 1675 // This is expensive when there's a large number of 1676 // Gs, so only do it if checkmark is also enabled. 1677 gcMarkRootCheck() 1678 } 1679 if work.full != 0 { 1680 throw("work.full != 0") 1681 } 1682 1683 if work.nproc > 1 { 1684 notesleep(&work.alldone) 1685 } 1686 1687 // Record that at least one root marking pass has completed. 1688 work.markrootDone = true 1689 1690 // Double-check that all gcWork caches are empty. This should 1691 // be ensured by mark 2 before we enter mark termination. 1692 for i := 0; i < int(gomaxprocs); i++ { 1693 gcw := &allp[i].gcw 1694 if !gcw.empty() { 1695 throw("P has cached GC work at end of mark termination") 1696 } 1697 if gcw.scanWork != 0 || gcw.bytesMarked != 0 { 1698 throw("P has unflushed stats at end of mark termination") 1699 } 1700 } 1701 1702 if trace.enabled { 1703 traceGCScanDone() 1704 } 1705 1706 cachestats() 1707 1708 // Update the marked heap stat. 1709 memstats.heap_marked = work.bytesMarked 1710 1711 // Trigger the next GC cycle when the allocated heap has grown 1712 // by triggerRatio over the marked heap size. Assume that 1713 // we're in steady state, so the marked heap size is the 1714 // same now as it was at the beginning of the GC cycle. 1715 memstats.gc_trigger = uint64(float64(memstats.heap_marked) * (1 + gcController.triggerRatio)) 1716 if memstats.gc_trigger < heapminimum { 1717 memstats.gc_trigger = heapminimum 1718 } 1719 if int64(memstats.gc_trigger) < 0 { 1720 print("next_gc=", memstats.next_gc, " bytesMarked=", work.bytesMarked, " heap_live=", memstats.heap_live, " initialHeapLive=", work.initialHeapLive, "\n") 1721 throw("gc_trigger underflow") 1722 } 1723 1724 // Update other GC heap size stats. This must happen after 1725 // cachestats (which flushes local statistics to these) and 1726 // flushallmcaches (which modifies heap_live). 1727 memstats.heap_live = work.bytesMarked 1728 memstats.heap_scan = uint64(gcController.scanWork) 1729 1730 minTrigger := memstats.heap_live + sweepMinHeapDistance*uint64(gcpercent)/100 1731 if memstats.gc_trigger < minTrigger { 1732 // The allocated heap is already past the trigger. 1733 // This can happen if the triggerRatio is very low and 1734 // the marked heap is less than the live heap size. 1735 // 1736 // Concurrent sweep happens in the heap growth from 1737 // heap_live to gc_trigger, so bump gc_trigger up to ensure 1738 // that concurrent sweep has some heap growth in which 1739 // to perform sweeping before we start the next GC 1740 // cycle. 1741 memstats.gc_trigger = minTrigger 1742 } 1743 1744 // The next GC cycle should finish before the allocated heap 1745 // has grown by GOGC/100. 1746 memstats.next_gc = memstats.heap_marked + memstats.heap_marked*uint64(gcpercent)/100 1747 if gcpercent < 0 { 1748 memstats.next_gc = ^uint64(0) 1749 } 1750 if memstats.next_gc < memstats.gc_trigger { 1751 memstats.next_gc = memstats.gc_trigger 1752 } 1753 1754 if trace.enabled { 1755 traceHeapAlloc() 1756 traceNextGC() 1757 } 1758 } 1759 1760 func gcSweep(mode gcMode) { 1761 if gcphase != _GCoff { 1762 throw("gcSweep being done but phase is not GCoff") 1763 } 1764 1765 lock(&mheap_.lock) 1766 mheap_.sweepgen += 2 1767 mheap_.sweepdone = 0 1768 if mheap_.sweepSpans[mheap_.sweepgen/2%2].index != 0 { 1769 // We should have drained this list during the last 1770 // sweep phase. We certainly need to start this phase 1771 // with an empty swept list. 1772 throw("non-empty swept list") 1773 } 1774 unlock(&mheap_.lock) 1775 1776 if !_ConcurrentSweep || mode == gcForceBlockMode { 1777 // Special case synchronous sweep. 1778 // Record that no proportional sweeping has to happen. 1779 lock(&mheap_.lock) 1780 mheap_.sweepPagesPerByte = 0 1781 mheap_.pagesSwept = 0 1782 unlock(&mheap_.lock) 1783 // Sweep all spans eagerly. 1784 for sweepone() != ^uintptr(0) { 1785 sweep.npausesweep++ 1786 } 1787 // Do an additional mProf_GC, because all 'free' events are now real as well. 1788 mProf_GC() 1789 mProf_GC() 1790 return 1791 } 1792 1793 // Concurrent sweep needs to sweep all of the in-use pages by 1794 // the time the allocated heap reaches the GC trigger. Compute 1795 // the ratio of in-use pages to sweep per byte allocated. 1796 heapDistance := int64(memstats.gc_trigger) - int64(memstats.heap_live) 1797 // Add a little margin so rounding errors and concurrent 1798 // sweep are less likely to leave pages unswept when GC starts. 1799 heapDistance -= 1024 * 1024 1800 if heapDistance < _PageSize { 1801 // Avoid setting the sweep ratio extremely high 1802 heapDistance = _PageSize 1803 } 1804 lock(&mheap_.lock) 1805 mheap_.sweepPagesPerByte = float64(mheap_.pagesInUse) / float64(heapDistance) 1806 mheap_.pagesSwept = 0 1807 mheap_.spanBytesAlloc = 0 1808 unlock(&mheap_.lock) 1809 1810 // Background sweep. 1811 lock(&sweep.lock) 1812 if sweep.parked { 1813 sweep.parked = false 1814 ready(sweep.g, 0, true) 1815 } 1816 unlock(&sweep.lock) 1817 } 1818 1819 // gcResetMarkState resets global state prior to marking (concurrent 1820 // or STW) and resets the stack scan state of all Gs. 1821 // 1822 // This is safe to do without the world stopped because any Gs created 1823 // during or after this will start out in the reset state. 1824 func gcResetMarkState() { 1825 // This may be called during a concurrent phase, so make sure 1826 // allgs doesn't change. 1827 if !(gcphase == _GCoff || gcphase == _GCmarktermination) { 1828 // Accessing gcRescan is unsafe. 1829 throw("bad GC phase") 1830 } 1831 lock(&allglock) 1832 for _, gp := range allgs { 1833 gp.gcscandone = false // set to true in gcphasework 1834 gp.gcscanvalid = false // stack has not been scanned 1835 gp.gcRescan = -1 1836 gp.gcAssistBytes = 0 1837 } 1838 unlock(&allglock) 1839 1840 // Clear rescan list. 1841 work.rescan.list = work.rescan.list[:0] 1842 1843 work.bytesMarked = 0 1844 work.initialHeapLive = memstats.heap_live 1845 work.markrootDone = false 1846 } 1847 1848 // Hooks for other packages 1849 1850 var poolcleanup func() 1851 1852 //go:linkname sync_runtime_registerPoolCleanup sync.runtime_registerPoolCleanup 1853 func sync_runtime_registerPoolCleanup(f func()) { 1854 poolcleanup = f 1855 } 1856 1857 func clearpools() { 1858 // clear sync.Pools 1859 if poolcleanup != nil { 1860 poolcleanup() 1861 } 1862 1863 // Clear central sudog cache. 1864 // Leave per-P caches alone, they have strictly bounded size. 1865 // Disconnect cached list before dropping it on the floor, 1866 // so that a dangling ref to one entry does not pin all of them. 1867 lock(&sched.sudoglock) 1868 var sg, sgnext *sudog 1869 for sg = sched.sudogcache; sg != nil; sg = sgnext { 1870 sgnext = sg.next 1871 sg.next = nil 1872 } 1873 sched.sudogcache = nil 1874 unlock(&sched.sudoglock) 1875 1876 // Clear central defer pools. 1877 // Leave per-P pools alone, they have strictly bounded size. 1878 lock(&sched.deferlock) 1879 for i := range sched.deferpool { 1880 // disconnect cached list before dropping it on the floor, 1881 // so that a dangling ref to one entry does not pin all of them. 1882 var d, dlink *_defer 1883 for d = sched.deferpool[i]; d != nil; d = dlink { 1884 dlink = d.link 1885 d.link = nil 1886 } 1887 sched.deferpool[i] = nil 1888 } 1889 unlock(&sched.deferlock) 1890 } 1891 1892 // Timing 1893 1894 //go:nowritebarrier 1895 func gchelper() { 1896 _g_ := getg() 1897 _g_.m.traceback = 2 1898 gchelperstart() 1899 1900 if trace.enabled { 1901 traceGCScanStart() 1902 } 1903 1904 // Parallel mark over GC roots and heap 1905 if gcphase == _GCmarktermination { 1906 gcw := &_g_.m.p.ptr().gcw 1907 if work.helperDrainBlock { 1908 gcDrain(gcw, gcDrainBlock) // blocks in getfull 1909 } else { 1910 gcDrain(gcw, gcDrainNoBlock) 1911 } 1912 gcw.dispose() 1913 } 1914 1915 if trace.enabled { 1916 traceGCScanDone() 1917 } 1918 1919 nproc := work.nproc // work.nproc can change right after we increment work.ndone 1920 if atomic.Xadd(&work.ndone, +1) == nproc-1 { 1921 notewakeup(&work.alldone) 1922 } 1923 _g_.m.traceback = 0 1924 } 1925 1926 func gchelperstart() { 1927 _g_ := getg() 1928 1929 if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc { 1930 throw("gchelperstart: bad m->helpgc") 1931 } 1932 if _g_ != _g_.m.g0 { 1933 throw("gchelper not running on g0 stack") 1934 } 1935 } 1936 1937 // itoaDiv formats val/(10**dec) into buf. 1938 func itoaDiv(buf []byte, val uint64, dec int) []byte { 1939 i := len(buf) - 1 1940 idec := i - dec 1941 for val >= 10 || i >= idec { 1942 buf[i] = byte(val%10 + '0') 1943 i-- 1944 if i == idec { 1945 buf[i] = '.' 1946 i-- 1947 } 1948 val /= 10 1949 } 1950 buf[i] = byte(val + '0') 1951 return buf[i:] 1952 } 1953 1954 // fmtNSAsMS nicely formats ns nanoseconds as milliseconds. 1955 func fmtNSAsMS(buf []byte, ns uint64) []byte { 1956 if ns >= 10e6 { 1957 // Format as whole milliseconds. 1958 return itoaDiv(buf, ns/1e6, 0) 1959 } 1960 // Format two digits of precision, with at most three decimal places. 1961 x := ns / 1e3 1962 if x == 0 { 1963 buf[0] = '0' 1964 return buf[:1] 1965 } 1966 dec := 3 1967 for x >= 100 { 1968 x /= 10 1969 dec-- 1970 } 1971 return itoaDiv(buf, x, dec) 1972 }