github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/mgcpacer.go (about) 1 // Copyright 2021 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/cpu" 9 "internal/goexperiment" 10 "runtime/internal/atomic" 11 _ "unsafe" // for go:linkname 12 ) 13 14 const ( 15 // gcGoalUtilization is the goal CPU utilization for 16 // marking as a fraction of GOMAXPROCS. 17 // 18 // Increasing the goal utilization will shorten GC cycles as the GC 19 // has more resources behind it, lessening costs from the write barrier, 20 // but comes at the cost of increasing mutator latency. 21 gcGoalUtilization = gcBackgroundUtilization 22 23 // gcBackgroundUtilization is the fixed CPU utilization for background 24 // marking. It must be <= gcGoalUtilization. The difference between 25 // gcGoalUtilization and gcBackgroundUtilization will be made up by 26 // mark assists. The scheduler will aim to use within 50% of this 27 // goal. 28 // 29 // As a general rule, there's little reason to set gcBackgroundUtilization 30 // < gcGoalUtilization. One reason might be in mostly idle applications, 31 // where goroutines are unlikely to assist at all, so the actual 32 // utilization will be lower than the goal. But this is moot point 33 // because the idle mark workers already soak up idle CPU resources. 34 // These two values are still kept separate however because they are 35 // distinct conceptually, and in previous iterations of the pacer the 36 // distinction was more important. 37 gcBackgroundUtilization = 0.25 38 39 // gcCreditSlack is the amount of scan work credit that can 40 // accumulate locally before updating gcController.heapScanWork and, 41 // optionally, gcController.bgScanCredit. Lower values give a more 42 // accurate assist ratio and make it more likely that assists will 43 // successfully steal background credit. Higher values reduce memory 44 // contention. 45 gcCreditSlack = 2000 46 47 // gcAssistTimeSlack is the nanoseconds of mutator assist time that 48 // can accumulate on a P before updating gcController.assistTime. 49 gcAssistTimeSlack = 5000 50 51 // gcOverAssistWork determines how many extra units of scan work a GC 52 // assist does when an assist happens. This amortizes the cost of an 53 // assist by pre-paying for this many bytes of future allocations. 54 gcOverAssistWork = 64 << 10 55 56 // defaultHeapMinimum is the value of heapMinimum for GOGC==100. 57 defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) + 58 (1-goexperiment.HeapMinimum512KiBInt)*(4<<20) 59 60 // maxStackScanSlack is the bytes of stack space allocated or freed 61 // that can accumulate on a P before updating gcController.stackSize. 62 maxStackScanSlack = 8 << 10 63 64 // memoryLimitHeapGoalHeadroom is the amount of headroom the pacer gives to 65 // the heap goal when operating in the memory-limited regime. That is, 66 // it'll reduce the heap goal by this many extra bytes off of the base 67 // calculation. 68 memoryLimitHeapGoalHeadroom = 1 << 20 69 ) 70 71 // gcController implements the GC pacing controller that determines 72 // when to trigger concurrent garbage collection and how much marking 73 // work to do in mutator assists and background marking. 74 // 75 // It calculates the ratio between the allocation rate (in terms of CPU 76 // time) and the GC scan throughput to determine the heap size at which to 77 // trigger a GC cycle such that no GC assists are required to finish on time. 78 // This algorithm thus optimizes GC CPU utilization to the dedicated background 79 // mark utilization of 25% of GOMAXPROCS by minimizing GC assists. 80 // GOMAXPROCS. The high-level design of this algorithm is documented 81 // at https://github.com/golang/proposal/blob/master/design/44167-gc-pacer-redesign.md. 82 // See https://golang.org/s/go15gcpacing for additional historical context. 83 var gcController gcControllerState 84 85 type gcControllerState struct { 86 // Initialized from GOGC. GOGC=off means no GC. 87 gcPercent atomic.Int32 88 89 // memoryLimit is the soft memory limit in bytes. 90 // 91 // Initialized from GOMEMLIMIT. GOMEMLIMIT=off is equivalent to MaxInt64 92 // which means no soft memory limit in practice. 93 // 94 // This is an int64 instead of a uint64 to more easily maintain parity with 95 // the SetMemoryLimit API, which sets a maximum at MaxInt64. This value 96 // should never be negative. 97 memoryLimit atomic.Int64 98 99 // heapMinimum is the minimum heap size at which to trigger GC. 100 // For small heaps, this overrides the usual GOGC*live set rule. 101 // 102 // When there is a very small live set but a lot of allocation, simply 103 // collecting when the heap reaches GOGC*live results in many GC 104 // cycles and high total per-GC overhead. This minimum amortizes this 105 // per-GC overhead while keeping the heap reasonably small. 106 // 107 // During initialization this is set to 4MB*GOGC/100. In the case of 108 // GOGC==0, this will set heapMinimum to 0, resulting in constant 109 // collection even when the heap size is small, which is useful for 110 // debugging. 111 heapMinimum uint64 112 113 // runway is the amount of runway in heap bytes allocated by the 114 // application that we want to give the GC once it starts. 115 // 116 // This is computed from consMark during mark termination. 117 runway atomic.Uint64 118 119 // consMark is the estimated per-CPU consMark ratio for the application. 120 // 121 // It represents the ratio between the application's allocation 122 // rate, as bytes allocated per CPU-time, and the GC's scan rate, 123 // as bytes scanned per CPU-time. 124 // The units of this ratio are (B / cpu-ns) / (B / cpu-ns). 125 // 126 // At a high level, this value is computed as the bytes of memory 127 // allocated (cons) per unit of scan work completed (mark) in a GC 128 // cycle, divided by the CPU time spent on each activity. 129 // 130 // Updated at the end of each GC cycle, in endCycle. 131 consMark float64 132 133 // lastConsMark is the computed cons/mark value for the previous 4 GC 134 // cycles. Note that this is *not* the last value of consMark, but the 135 // measured cons/mark value in endCycle. 136 lastConsMark [4]float64 137 138 // gcPercentHeapGoal is the goal heapLive for when next GC ends derived 139 // from gcPercent. 140 // 141 // Set to ^uint64(0) if gcPercent is disabled. 142 gcPercentHeapGoal atomic.Uint64 143 144 // sweepDistMinTrigger is the minimum trigger to ensure a minimum 145 // sweep distance. 146 // 147 // This bound is also special because it applies to both the trigger 148 // *and* the goal (all other trigger bounds must be based *on* the goal). 149 // 150 // It is computed ahead of time, at commit time. The theory is that, 151 // absent a sudden change to a parameter like gcPercent, the trigger 152 // will be chosen to always give the sweeper enough headroom. However, 153 // such a change might dramatically and suddenly move up the trigger, 154 // in which case we need to ensure the sweeper still has enough headroom. 155 sweepDistMinTrigger atomic.Uint64 156 157 // triggered is the point at which the current GC cycle actually triggered. 158 // Only valid during the mark phase of a GC cycle, otherwise set to ^uint64(0). 159 // 160 // Updated while the world is stopped. 161 triggered uint64 162 163 // lastHeapGoal is the value of heapGoal at the moment the last GC 164 // ended. Note that this is distinct from the last value heapGoal had, 165 // because it could change if e.g. gcPercent changes. 166 // 167 // Read and written with the world stopped or with mheap_.lock held. 168 lastHeapGoal uint64 169 170 // heapLive is the number of bytes considered live by the GC. 171 // That is: retained by the most recent GC plus allocated 172 // since then. heapLive ≤ memstats.totalAlloc-memstats.totalFree, since 173 // heapAlloc includes unmarked objects that have not yet been swept (and 174 // hence goes up as we allocate and down as we sweep) while heapLive 175 // excludes these objects (and hence only goes up between GCs). 176 // 177 // To reduce contention, this is updated only when obtaining a span 178 // from an mcentral and at this point it counts all of the unallocated 179 // slots in that span (which will be allocated before that mcache 180 // obtains another span from that mcentral). Hence, it slightly 181 // overestimates the "true" live heap size. It's better to overestimate 182 // than to underestimate because 1) this triggers the GC earlier than 183 // necessary rather than potentially too late and 2) this leads to a 184 // conservative GC rate rather than a GC rate that is potentially too 185 // low. 186 // 187 // Whenever this is updated, call traceHeapAlloc() and 188 // this gcControllerState's revise() method. 189 heapLive atomic.Uint64 190 191 // heapScan is the number of bytes of "scannable" heap. This is the 192 // live heap (as counted by heapLive), but omitting no-scan objects and 193 // no-scan tails of objects. 194 // 195 // This value is fixed at the start of a GC cycle. It represents the 196 // maximum scannable heap. 197 heapScan atomic.Uint64 198 199 // lastHeapScan is the number of bytes of heap that were scanned 200 // last GC cycle. It is the same as heapMarked, but only 201 // includes the "scannable" parts of objects. 202 // 203 // Updated when the world is stopped. 204 lastHeapScan uint64 205 206 // lastStackScan is the number of bytes of stack that were scanned 207 // last GC cycle. 208 lastStackScan atomic.Uint64 209 210 // maxStackScan is the amount of allocated goroutine stack space in 211 // use by goroutines. 212 // 213 // This number tracks allocated goroutine stack space rather than used 214 // goroutine stack space (i.e. what is actually scanned) because used 215 // goroutine stack space is much harder to measure cheaply. By using 216 // allocated space, we make an overestimate; this is OK, it's better 217 // to conservatively overcount than undercount. 218 maxStackScan atomic.Uint64 219 220 // globalsScan is the total amount of global variable space 221 // that is scannable. 222 globalsScan atomic.Uint64 223 224 // heapMarked is the number of bytes marked by the previous 225 // GC. After mark termination, heapLive == heapMarked, but 226 // unlike heapLive, heapMarked does not change until the 227 // next mark termination. 228 heapMarked uint64 229 230 // heapScanWork is the total heap scan work performed this cycle. 231 // stackScanWork is the total stack scan work performed this cycle. 232 // globalsScanWork is the total globals scan work performed this cycle. 233 // 234 // These are updated atomically during the cycle. Updates occur in 235 // bounded batches, since they are both written and read 236 // throughout the cycle. At the end of the cycle, heapScanWork is how 237 // much of the retained heap is scannable. 238 // 239 // Currently these are measured in bytes. For most uses, this is an 240 // opaque unit of work, but for estimation the definition is important. 241 // 242 // Note that stackScanWork includes only stack space scanned, not all 243 // of the allocated stack. 244 heapScanWork atomic.Int64 245 stackScanWork atomic.Int64 246 globalsScanWork atomic.Int64 247 248 // bgScanCredit is the scan work credit accumulated by the concurrent 249 // background scan. This credit is accumulated by the background scan 250 // and stolen by mutator assists. Updates occur in bounded batches, 251 // since it is both written and read throughout the cycle. 252 bgScanCredit atomic.Int64 253 254 // assistTime is the nanoseconds spent in mutator assists 255 // during this cycle. This is updated atomically, and must also 256 // be updated atomically even during a STW, because it is read 257 // by sysmon. Updates occur in bounded batches, since it is both 258 // written and read throughout the cycle. 259 assistTime atomic.Int64 260 261 // dedicatedMarkTime is the nanoseconds spent in dedicated mark workers 262 // during this cycle. This is updated at the end of the concurrent mark 263 // phase. 264 dedicatedMarkTime atomic.Int64 265 266 // fractionalMarkTime is the nanoseconds spent in the fractional mark 267 // worker during this cycle. This is updated throughout the cycle and 268 // will be up-to-date if the fractional mark worker is not currently 269 // running. 270 fractionalMarkTime atomic.Int64 271 272 // idleMarkTime is the nanoseconds spent in idle marking during this 273 // cycle. This is updated throughout the cycle. 274 idleMarkTime atomic.Int64 275 276 // markStartTime is the absolute start time in nanoseconds 277 // that assists and background mark workers started. 278 markStartTime int64 279 280 // dedicatedMarkWorkersNeeded is the number of dedicated mark workers 281 // that need to be started. This is computed at the beginning of each 282 // cycle and decremented as dedicated mark workers get started. 283 dedicatedMarkWorkersNeeded atomic.Int64 284 285 // idleMarkWorkers is two packed int32 values in a single uint64. 286 // These two values are always updated simultaneously. 287 // 288 // The bottom int32 is the current number of idle mark workers executing. 289 // 290 // The top int32 is the maximum number of idle mark workers allowed to 291 // execute concurrently. Normally, this number is just gomaxprocs. However, 292 // during periodic GC cycles it is set to 0 because the system is idle 293 // anyway; there's no need to go full blast on all of GOMAXPROCS. 294 // 295 // The maximum number of idle mark workers is used to prevent new workers 296 // from starting, but it is not a hard maximum. It is possible (but 297 // exceedingly rare) for the current number of idle mark workers to 298 // transiently exceed the maximum. This could happen if the maximum changes 299 // just after a GC ends, and an M with no P. 300 // 301 // Note that if we have no dedicated mark workers, we set this value to 302 // 1 in this case we only have fractional GC workers which aren't scheduled 303 // strictly enough to ensure GC progress. As a result, idle-priority mark 304 // workers are vital to GC progress in these situations. 305 // 306 // For example, consider a situation in which goroutines block on the GC 307 // (such as via runtime.GOMAXPROCS) and only fractional mark workers are 308 // scheduled (e.g. GOMAXPROCS=1). Without idle-priority mark workers, the 309 // last running M might skip scheduling a fractional mark worker if its 310 // utilization goal is met, such that once it goes to sleep (because there's 311 // nothing to do), there will be nothing else to spin up a new M for the 312 // fractional worker in the future, stalling GC progress and causing a 313 // deadlock. However, idle-priority workers will *always* run when there is 314 // nothing left to do, ensuring the GC makes progress. 315 // 316 // See github.com/golang/go/issues/44163 for more details. 317 idleMarkWorkers atomic.Uint64 318 319 // assistWorkPerByte is the ratio of scan work to allocated 320 // bytes that should be performed by mutator assists. This is 321 // computed at the beginning of each cycle and updated every 322 // time heapScan is updated. 323 assistWorkPerByte atomic.Float64 324 325 // assistBytesPerWork is 1/assistWorkPerByte. 326 // 327 // Note that because this is read and written independently 328 // from assistWorkPerByte users may notice a skew between 329 // the two values, and such a state should be safe. 330 assistBytesPerWork atomic.Float64 331 332 // fractionalUtilizationGoal is the fraction of wall clock 333 // time that should be spent in the fractional mark worker on 334 // each P that isn't running a dedicated worker. 335 // 336 // For example, if the utilization goal is 25% and there are 337 // no dedicated workers, this will be 0.25. If the goal is 338 // 25%, there is one dedicated worker, and GOMAXPROCS is 5, 339 // this will be 0.05 to make up the missing 5%. 340 // 341 // If this is zero, no fractional workers are needed. 342 fractionalUtilizationGoal float64 343 344 // These memory stats are effectively duplicates of fields from 345 // memstats.heapStats but are updated atomically or with the world 346 // stopped and don't provide the same consistency guarantees. 347 // 348 // Because the runtime is responsible for managing a memory limit, it's 349 // useful to couple these stats more tightly to the gcController, which 350 // is intimately connected to how that memory limit is maintained. 351 heapInUse sysMemStat // bytes in mSpanInUse spans 352 heapReleased sysMemStat // bytes released to the OS 353 heapFree sysMemStat // bytes not in any span, but not released to the OS 354 totalAlloc atomic.Uint64 // total bytes allocated 355 totalFree atomic.Uint64 // total bytes freed 356 mappedReady atomic.Uint64 // total virtual memory in the Ready state (see mem.go). 357 358 // test indicates that this is a test-only copy of gcControllerState. 359 test bool 360 361 _ cpu.CacheLinePad 362 } 363 364 func (c *gcControllerState) init(gcPercent int32, memoryLimit int64) { 365 c.heapMinimum = defaultHeapMinimum 366 c.triggered = ^uint64(0) 367 c.setGCPercent(gcPercent) 368 c.setMemoryLimit(memoryLimit) 369 c.commit(true) // No sweep phase in the first GC cycle. 370 // N.B. Don't bother calling traceHeapGoal. Tracing is never enabled at 371 // initialization time. 372 // N.B. No need to call revise; there's no GC enabled during 373 // initialization. 374 } 375 376 // startCycle resets the GC controller's state and computes estimates 377 // for a new GC cycle. The caller must hold worldsema and the world 378 // must be stopped. 379 func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger) { 380 c.heapScanWork.Store(0) 381 c.stackScanWork.Store(0) 382 c.globalsScanWork.Store(0) 383 c.bgScanCredit.Store(0) 384 c.assistTime.Store(0) 385 c.dedicatedMarkTime.Store(0) 386 c.fractionalMarkTime.Store(0) 387 c.idleMarkTime.Store(0) 388 c.markStartTime = markStartTime 389 c.triggered = c.heapLive.Load() 390 391 // Compute the background mark utilization goal. In general, 392 // this may not come out exactly. We round the number of 393 // dedicated workers so that the utilization is closest to 394 // 25%. For small GOMAXPROCS, this would introduce too much 395 // error, so we add fractional workers in that case. 396 totalUtilizationGoal := float64(procs) * gcBackgroundUtilization 397 dedicatedMarkWorkersNeeded := int64(totalUtilizationGoal + 0.5) 398 utilError := float64(dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1 399 const maxUtilError = 0.3 400 if utilError < -maxUtilError || utilError > maxUtilError { 401 // Rounding put us more than 30% off our goal. With 402 // gcBackgroundUtilization of 25%, this happens for 403 // GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional 404 // workers to compensate. 405 if float64(dedicatedMarkWorkersNeeded) > totalUtilizationGoal { 406 // Too many dedicated workers. 407 dedicatedMarkWorkersNeeded-- 408 } 409 c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(dedicatedMarkWorkersNeeded)) / float64(procs) 410 } else { 411 c.fractionalUtilizationGoal = 0 412 } 413 414 // In STW mode, we just want dedicated workers. 415 if debug.gcstoptheworld > 0 { 416 dedicatedMarkWorkersNeeded = int64(procs) 417 c.fractionalUtilizationGoal = 0 418 } 419 420 // Clear per-P state 421 for _, p := range allp { 422 p.gcAssistTime = 0 423 p.gcFractionalMarkTime = 0 424 } 425 426 if trigger.kind == gcTriggerTime { 427 // During a periodic GC cycle, reduce the number of idle mark workers 428 // required. However, we need at least one dedicated mark worker or 429 // idle GC worker to ensure GC progress in some scenarios (see comment 430 // on maxIdleMarkWorkers). 431 if dedicatedMarkWorkersNeeded > 0 { 432 c.setMaxIdleMarkWorkers(0) 433 } else { 434 // TODO(mknyszek): The fundamental reason why we need this is because 435 // we can't count on the fractional mark worker to get scheduled. 436 // Fix that by ensuring it gets scheduled according to its quota even 437 // if the rest of the application is idle. 438 c.setMaxIdleMarkWorkers(1) 439 } 440 } else { 441 // N.B. gomaxprocs and dedicatedMarkWorkersNeeded are guaranteed not to 442 // change during a GC cycle. 443 c.setMaxIdleMarkWorkers(int32(procs) - int32(dedicatedMarkWorkersNeeded)) 444 } 445 446 // Compute initial values for controls that are updated 447 // throughout the cycle. 448 c.dedicatedMarkWorkersNeeded.Store(dedicatedMarkWorkersNeeded) 449 c.revise() 450 451 if debug.gcpacertrace > 0 { 452 heapGoal := c.heapGoal() 453 assistRatio := c.assistWorkPerByte.Load() 454 print("pacer: assist ratio=", assistRatio, 455 " (scan ", gcController.heapScan.Load()>>20, " MB in ", 456 work.initialHeapLive>>20, "->", 457 heapGoal>>20, " MB)", 458 " workers=", dedicatedMarkWorkersNeeded, 459 "+", c.fractionalUtilizationGoal, "\n") 460 } 461 } 462 463 // revise updates the assist ratio during the GC cycle to account for 464 // improved estimates. This should be called whenever gcController.heapScan, 465 // gcController.heapLive, or if any inputs to gcController.heapGoal are 466 // updated. It is safe to call concurrently, but it may race with other 467 // calls to revise. 468 // 469 // The result of this race is that the two assist ratio values may not line 470 // up or may be stale. In practice this is OK because the assist ratio 471 // moves slowly throughout a GC cycle, and the assist ratio is a best-effort 472 // heuristic anyway. Furthermore, no part of the heuristic depends on 473 // the two assist ratio values being exact reciprocals of one another, since 474 // the two values are used to convert values from different sources. 475 // 476 // The worst case result of this raciness is that we may miss a larger shift 477 // in the ratio (say, if we decide to pace more aggressively against the 478 // hard heap goal) but even this "hard goal" is best-effort (see #40460). 479 // The dedicated GC should ensure we don't exceed the hard goal by too much 480 // in the rare case we do exceed it. 481 // 482 // It should only be called when gcBlackenEnabled != 0 (because this 483 // is when assists are enabled and the necessary statistics are 484 // available). 485 func (c *gcControllerState) revise() { 486 gcPercent := c.gcPercent.Load() 487 if gcPercent < 0 { 488 // If GC is disabled but we're running a forced GC, 489 // act like GOGC is huge for the below calculations. 490 gcPercent = 100000 491 } 492 live := c.heapLive.Load() 493 scan := c.heapScan.Load() 494 work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load() 495 496 // Assume we're under the soft goal. Pace GC to complete at 497 // heapGoal assuming the heap is in steady-state. 498 heapGoal := int64(c.heapGoal()) 499 500 // The expected scan work is computed as the amount of bytes scanned last 501 // GC cycle (both heap and stack), plus our estimate of globals work for this cycle. 502 scanWorkExpected := int64(c.lastHeapScan + c.lastStackScan.Load() + c.globalsScan.Load()) 503 504 // maxScanWork is a worst-case estimate of the amount of scan work that 505 // needs to be performed in this GC cycle. Specifically, it represents 506 // the case where *all* scannable memory turns out to be live, and 507 // *all* allocated stack space is scannable. 508 maxStackScan := c.maxStackScan.Load() 509 maxScanWork := int64(scan + maxStackScan + c.globalsScan.Load()) 510 if work > scanWorkExpected { 511 // We've already done more scan work than expected. Because our expectation 512 // is based on a steady-state scannable heap size, we assume this means our 513 // heap is growing. Compute a new heap goal that takes our existing runway 514 // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case 515 // scan work. This keeps our assist ratio stable if the heap continues to grow. 516 // 517 // The effect of this mechanism is that assists stay flat in the face of heap 518 // growths. It's OK to use more memory this cycle to scan all the live heap, 519 // because the next GC cycle is inevitably going to use *at least* that much 520 // memory anyway. 521 extHeapGoal := int64(float64(heapGoal-int64(c.triggered))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.triggered) 522 scanWorkExpected = maxScanWork 523 524 // hardGoal is a hard limit on the amount that we're willing to push back the 525 // heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or 526 // stacks and/or globals grow to twice their size, this limits the current GC cycle's 527 // growth to 4x the original live heap's size). 528 // 529 // This maintains the invariant that we use no more memory than the next GC cycle 530 // will anyway. 531 hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal)) 532 if extHeapGoal > hardGoal { 533 extHeapGoal = hardGoal 534 } 535 heapGoal = extHeapGoal 536 } 537 if int64(live) > heapGoal { 538 // We're already past our heap goal, even the extrapolated one. 539 // Leave ourselves some extra runway, so in the worst case we 540 // finish by that point. 541 const maxOvershoot = 1.1 542 heapGoal = int64(float64(heapGoal) * maxOvershoot) 543 544 // Compute the upper bound on the scan work remaining. 545 scanWorkExpected = maxScanWork 546 } 547 548 // Compute the remaining scan work estimate. 549 // 550 // Note that we currently count allocations during GC as both 551 // scannable heap (heapScan) and scan work completed 552 // (scanWork), so allocation will change this difference 553 // slowly in the soft regime and not at all in the hard 554 // regime. 555 scanWorkRemaining := scanWorkExpected - work 556 if scanWorkRemaining < 1000 { 557 // We set a somewhat arbitrary lower bound on 558 // remaining scan work since if we aim a little high, 559 // we can miss by a little. 560 // 561 // We *do* need to enforce that this is at least 1, 562 // since marking is racy and double-scanning objects 563 // may legitimately make the remaining scan work 564 // negative, even in the hard goal regime. 565 scanWorkRemaining = 1000 566 } 567 568 // Compute the heap distance remaining. 569 heapRemaining := heapGoal - int64(live) 570 if heapRemaining <= 0 { 571 // This shouldn't happen, but if it does, avoid 572 // dividing by zero or setting the assist negative. 573 heapRemaining = 1 574 } 575 576 // Compute the mutator assist ratio so by the time the mutator 577 // allocates the remaining heap bytes up to heapGoal, it will 578 // have done (or stolen) the remaining amount of scan work. 579 // Note that the assist ratio values are updated atomically 580 // but not together. This means there may be some degree of 581 // skew between the two values. This is generally OK as the 582 // values shift relatively slowly over the course of a GC 583 // cycle. 584 assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining) 585 assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining) 586 c.assistWorkPerByte.Store(assistWorkPerByte) 587 c.assistBytesPerWork.Store(assistBytesPerWork) 588 } 589 590 // endCycle computes the consMark estimate for the next cycle. 591 // userForced indicates whether the current GC cycle was forced 592 // by the application. 593 func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) { 594 // Record last heap goal for the scavenger. 595 // We'll be updating the heap goal soon. 596 gcController.lastHeapGoal = c.heapGoal() 597 598 // Compute the duration of time for which assists were turned on. 599 assistDuration := now - c.markStartTime 600 601 // Assume background mark hit its utilization goal. 602 utilization := gcBackgroundUtilization 603 // Add assist utilization; avoid divide by zero. 604 if assistDuration > 0 { 605 utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs)) 606 } 607 608 if c.heapLive.Load() <= c.triggered { 609 // Shouldn't happen, but let's be very safe about this in case the 610 // GC is somehow extremely short. 611 // 612 // In this case though, the only reasonable value for c.heapLive-c.triggered 613 // would be 0, which isn't really all that useful, i.e. the GC was so short 614 // that it didn't matter. 615 // 616 // Ignore this case and don't update anything. 617 return 618 } 619 idleUtilization := 0.0 620 if assistDuration > 0 { 621 idleUtilization = float64(c.idleMarkTime.Load()) / float64(assistDuration*int64(procs)) 622 } 623 // Determine the cons/mark ratio. 624 // 625 // The units we want for the numerator and denominator are both B / cpu-ns. 626 // We get this by taking the bytes allocated or scanned, and divide by the amount of 627 // CPU time it took for those operations. For allocations, that CPU time is 628 // 629 // assistDuration * procs * (1 - utilization) 630 // 631 // Where utilization includes just background GC workers and assists. It does *not* 632 // include idle GC work time, because in theory the mutator is free to take that at 633 // any point. 634 // 635 // For scanning, that CPU time is 636 // 637 // assistDuration * procs * (utilization + idleUtilization) 638 // 639 // In this case, we *include* idle utilization, because that is additional CPU time that 640 // the GC had available to it. 641 // 642 // In effect, idle GC time is sort of double-counted here, but it's very weird compared 643 // to other kinds of GC work, because of how fluid it is. Namely, because the mutator is 644 // *always* free to take it. 645 // 646 // So this calculation is really: 647 // (heapLive-trigger) / (assistDuration * procs * (1-utilization)) / 648 // (scanWork) / (assistDuration * procs * (utilization+idleUtilization)) 649 // 650 // Note that because we only care about the ratio, assistDuration and procs cancel out. 651 scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load() 652 currentConsMark := (float64(c.heapLive.Load()-c.triggered) * (utilization + idleUtilization)) / 653 (float64(scanWork) * (1 - utilization)) 654 655 // Update our cons/mark estimate. This is the maximum of the value we just computed and the last 656 // 4 cons/mark values we measured. The reason we take the maximum here is to bias a noisy 657 // cons/mark measurement toward fewer assists at the expense of additional GC cycles (starting 658 // earlier). 659 oldConsMark := c.consMark 660 c.consMark = currentConsMark 661 for i := range c.lastConsMark { 662 if c.lastConsMark[i] > c.consMark { 663 c.consMark = c.lastConsMark[i] 664 } 665 } 666 copy(c.lastConsMark[:], c.lastConsMark[1:]) 667 c.lastConsMark[len(c.lastConsMark)-1] = currentConsMark 668 669 if debug.gcpacertrace > 0 { 670 printlock() 671 goal := gcGoalUtilization * 100 672 print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ") 673 print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load(), " B exp.) ") 674 live := c.heapLive.Load() 675 print("in ", c.triggered, " B -> ", live, " B (∆goal ", int64(live)-int64(c.lastHeapGoal), ", cons/mark ", oldConsMark, ")") 676 println() 677 printunlock() 678 } 679 } 680 681 // enlistWorker encourages another dedicated mark worker to start on 682 // another P if there are spare worker slots. It is used by putfull 683 // when more work is made available. 684 // 685 //go:nowritebarrier 686 func (c *gcControllerState) enlistWorker() { 687 // If there are idle Ps, wake one so it will run an idle worker. 688 // NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112. 689 // 690 // if sched.npidle.Load() != 0 && sched.nmspinning.Load() == 0 { 691 // wakep() 692 // return 693 // } 694 695 // There are no idle Ps. If we need more dedicated workers, 696 // try to preempt a running P so it will switch to a worker. 697 if c.dedicatedMarkWorkersNeeded.Load() <= 0 { 698 return 699 } 700 // Pick a random other P to preempt. 701 if gomaxprocs <= 1 { 702 return 703 } 704 gp := getg() 705 if gp == nil || gp.m == nil || gp.m.p == 0 { 706 return 707 } 708 myID := gp.m.p.ptr().id 709 for tries := 0; tries < 5; tries++ { 710 id := int32(fastrandn(uint32(gomaxprocs - 1))) 711 if id >= myID { 712 id++ 713 } 714 p := allp[id] 715 if p.status != _Prunning { 716 continue 717 } 718 if preemptone(p) { 719 return 720 } 721 } 722 } 723 724 // findRunnableGCWorker returns a background mark worker for pp if it 725 // should be run. This must only be called when gcBlackenEnabled != 0. 726 func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { 727 if gcBlackenEnabled == 0 { 728 throw("gcControllerState.findRunnable: blackening not enabled") 729 } 730 731 // Since we have the current time, check if the GC CPU limiter 732 // hasn't had an update in a while. This check is necessary in 733 // case the limiter is on but hasn't been checked in a while and 734 // so may have left sufficient headroom to turn off again. 735 if now == 0 { 736 now = nanotime() 737 } 738 if gcCPULimiter.needUpdate(now) { 739 gcCPULimiter.update(now) 740 } 741 742 if !gcMarkWorkAvailable(pp) { 743 // No work to be done right now. This can happen at 744 // the end of the mark phase when there are still 745 // assists tapering off. Don't bother running a worker 746 // now because it'll just return immediately. 747 return nil, now 748 } 749 750 // Grab a worker before we commit to running below. 751 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) 752 if node == nil { 753 // There is at least one worker per P, so normally there are 754 // enough workers to run on all Ps, if necessary. However, once 755 // a worker enters gcMarkDone it may park without rejoining the 756 // pool, thus freeing a P with no corresponding worker. 757 // gcMarkDone never depends on another worker doing work, so it 758 // is safe to simply do nothing here. 759 // 760 // If gcMarkDone bails out without completing the mark phase, 761 // it will always do so with queued global work. Thus, that P 762 // will be immediately eligible to re-run the worker G it was 763 // just using, ensuring work can complete. 764 return nil, now 765 } 766 767 decIfPositive := func(val *atomic.Int64) bool { 768 for { 769 v := val.Load() 770 if v <= 0 { 771 return false 772 } 773 774 if val.CompareAndSwap(v, v-1) { 775 return true 776 } 777 } 778 } 779 780 if decIfPositive(&c.dedicatedMarkWorkersNeeded) { 781 // This P is now dedicated to marking until the end of 782 // the concurrent mark phase. 783 pp.gcMarkWorkerMode = gcMarkWorkerDedicatedMode 784 } else if c.fractionalUtilizationGoal == 0 { 785 // No need for fractional workers. 786 gcBgMarkWorkerPool.push(&node.node) 787 return nil, now 788 } else { 789 // Is this P behind on the fractional utilization 790 // goal? 791 // 792 // This should be kept in sync with pollFractionalWorkerExit. 793 delta := now - c.markStartTime 794 if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal { 795 // Nope. No need to run a fractional worker. 796 gcBgMarkWorkerPool.push(&node.node) 797 return nil, now 798 } 799 // Run a fractional worker. 800 pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode 801 } 802 803 // Run the background mark worker. 804 gp := node.gp.ptr() 805 casgstatus(gp, _Gwaiting, _Grunnable) 806 if traceEnabled() { 807 traceGoUnpark(gp, 0) 808 } 809 return gp, now 810 } 811 812 // resetLive sets up the controller state for the next mark phase after the end 813 // of the previous one. Must be called after endCycle and before commit, before 814 // the world is started. 815 // 816 // The world must be stopped. 817 func (c *gcControllerState) resetLive(bytesMarked uint64) { 818 c.heapMarked = bytesMarked 819 c.heapLive.Store(bytesMarked) 820 c.heapScan.Store(uint64(c.heapScanWork.Load())) 821 c.lastHeapScan = uint64(c.heapScanWork.Load()) 822 c.lastStackScan.Store(uint64(c.stackScanWork.Load())) 823 c.triggered = ^uint64(0) // Reset triggered. 824 825 // heapLive was updated, so emit a trace event. 826 if traceEnabled() { 827 traceHeapAlloc(bytesMarked) 828 } 829 } 830 831 // markWorkerStop must be called whenever a mark worker stops executing. 832 // 833 // It updates mark work accounting in the controller by a duration of 834 // work in nanoseconds and other bookkeeping. 835 // 836 // Safe to execute at any time. 837 func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64) { 838 switch mode { 839 case gcMarkWorkerDedicatedMode: 840 c.dedicatedMarkTime.Add(duration) 841 c.dedicatedMarkWorkersNeeded.Add(1) 842 case gcMarkWorkerFractionalMode: 843 c.fractionalMarkTime.Add(duration) 844 case gcMarkWorkerIdleMode: 845 c.idleMarkTime.Add(duration) 846 c.removeIdleMarkWorker() 847 default: 848 throw("markWorkerStop: unknown mark worker mode") 849 } 850 } 851 852 func (c *gcControllerState) update(dHeapLive, dHeapScan int64) { 853 if dHeapLive != 0 { 854 live := gcController.heapLive.Add(dHeapLive) 855 if traceEnabled() { 856 // gcController.heapLive changed. 857 traceHeapAlloc(live) 858 } 859 } 860 if gcBlackenEnabled == 0 { 861 // Update heapScan when we're not in a current GC. It is fixed 862 // at the beginning of a cycle. 863 if dHeapScan != 0 { 864 gcController.heapScan.Add(dHeapScan) 865 } 866 } else { 867 // gcController.heapLive changed. 868 c.revise() 869 } 870 } 871 872 func (c *gcControllerState) addScannableStack(pp *p, amount int64) { 873 if pp == nil { 874 c.maxStackScan.Add(amount) 875 return 876 } 877 pp.maxStackScanDelta += amount 878 if pp.maxStackScanDelta >= maxStackScanSlack || pp.maxStackScanDelta <= -maxStackScanSlack { 879 c.maxStackScan.Add(pp.maxStackScanDelta) 880 pp.maxStackScanDelta = 0 881 } 882 } 883 884 func (c *gcControllerState) addGlobals(amount int64) { 885 c.globalsScan.Add(amount) 886 } 887 888 // heapGoal returns the current heap goal. 889 func (c *gcControllerState) heapGoal() uint64 { 890 goal, _ := c.heapGoalInternal() 891 return goal 892 } 893 894 // heapGoalInternal is the implementation of heapGoal which returns additional 895 // information that is necessary for computing the trigger. 896 // 897 // The returned minTrigger is always <= goal. 898 func (c *gcControllerState) heapGoalInternal() (goal, minTrigger uint64) { 899 // Start with the goal calculated for gcPercent. 900 goal = c.gcPercentHeapGoal.Load() 901 902 // Check if the memory-limit-based goal is smaller, and if so, pick that. 903 if newGoal := c.memoryLimitHeapGoal(); newGoal < goal { 904 goal = newGoal 905 } else { 906 // We're not limited by the memory limit goal, so perform a series of 907 // adjustments that might move the goal forward in a variety of circumstances. 908 909 sweepDistTrigger := c.sweepDistMinTrigger.Load() 910 if sweepDistTrigger > goal { 911 // Set the goal to maintain a minimum sweep distance since 912 // the last call to commit. Note that we never want to do this 913 // if we're in the memory limit regime, because it could push 914 // the goal up. 915 goal = sweepDistTrigger 916 } 917 // Since we ignore the sweep distance trigger in the memory 918 // limit regime, we need to ensure we don't propagate it to 919 // the trigger, because it could cause a violation of the 920 // invariant that the trigger < goal. 921 minTrigger = sweepDistTrigger 922 923 // Ensure that the heap goal is at least a little larger than 924 // the point at which we triggered. This may not be the case if GC 925 // start is delayed or if the allocation that pushed gcController.heapLive 926 // over trigger is large or if the trigger is really close to 927 // GOGC. Assist is proportional to this distance, so enforce a 928 // minimum distance, even if it means going over the GOGC goal 929 // by a tiny bit. 930 // 931 // Ignore this if we're in the memory limit regime: we'd prefer to 932 // have the GC respond hard about how close we are to the goal than to 933 // push the goal back in such a manner that it could cause us to exceed 934 // the memory limit. 935 const minRunway = 64 << 10 936 if c.triggered != ^uint64(0) && goal < c.triggered+minRunway { 937 goal = c.triggered + minRunway 938 } 939 } 940 return 941 } 942 943 // memoryLimitHeapGoal returns a heap goal derived from memoryLimit. 944 func (c *gcControllerState) memoryLimitHeapGoal() uint64 { 945 // Start by pulling out some values we'll need. Be careful about overflow. 946 var heapFree, heapAlloc, mappedReady uint64 947 for { 948 heapFree = c.heapFree.load() // Free and unscavenged memory. 949 heapAlloc = c.totalAlloc.Load() - c.totalFree.Load() // Heap object bytes in use. 950 mappedReady = c.mappedReady.Load() // Total unreleased mapped memory. 951 if heapFree+heapAlloc <= mappedReady { 952 break 953 } 954 // It is impossible for total unreleased mapped memory to exceed heap memory, but 955 // because these stats are updated independently, we may observe a partial update 956 // including only some values. Thus, we appear to break the invariant. However, 957 // this condition is necessarily transient, so just try again. In the case of a 958 // persistent accounting error, we'll deadlock here. 959 } 960 961 // Below we compute a goal from memoryLimit. There are a few things to be aware of. 962 // Firstly, the memoryLimit does not easily compare to the heap goal: the former 963 // is total mapped memory by the runtime that hasn't been released, while the latter is 964 // only heap object memory. Intuitively, the way we convert from one to the other is to 965 // subtract everything from memoryLimit that both contributes to the memory limit (so, 966 // ignore scavenged memory) and doesn't contain heap objects. This isn't quite what 967 // lines up with reality, but it's a good starting point. 968 // 969 // In practice this computation looks like the following: 970 // 971 // memoryLimit - ((mappedReady - heapFree - heapAlloc) + max(mappedReady - memoryLimit, 0)) - memoryLimitHeapGoalHeadroom 972 // ^1 ^2 ^3 973 // 974 // Let's break this down. 975 // 976 // The first term (marker 1) is everything that contributes to the memory limit and isn't 977 // or couldn't become heap objects. It represents, broadly speaking, non-heap overheads. 978 // One oddity you may have noticed is that we also subtract out heapFree, i.e. unscavenged 979 // memory that may contain heap objects in the future. 980 // 981 // Let's take a step back. In an ideal world, this term would look something like just 982 // the heap goal. That is, we "reserve" enough space for the heap to grow to the heap 983 // goal, and subtract out everything else. This is of course impossible; the definition 984 // is circular! However, this impossible definition contains a key insight: the amount 985 // we're *going* to use matters just as much as whatever we're currently using. 986 // 987 // Consider if the heap shrinks to 1/10th its size, leaving behind lots of free and 988 // unscavenged memory. mappedReady - heapAlloc will be quite large, because of that free 989 // and unscavenged memory, pushing the goal down significantly. 990 // 991 // heapFree is also safe to exclude from the memory limit because in the steady-state, it's 992 // just a pool of memory for future heap allocations, and making new allocations from heapFree 993 // memory doesn't increase overall memory use. In transient states, the scavenger and the 994 // allocator actively manage the pool of heapFree memory to maintain the memory limit. 995 // 996 // The second term (marker 2) is the amount of memory we've exceeded the limit by, and is 997 // intended to help recover from such a situation. By pushing the heap goal down, we also 998 // push the trigger down, triggering and finishing a GC sooner in order to make room for 999 // other memory sources. Note that since we're effectively reducing the heap goal by X bytes, 1000 // we're actually giving more than X bytes of headroom back, because the heap goal is in 1001 // terms of heap objects, but it takes more than X bytes (e.g. due to fragmentation) to store 1002 // X bytes worth of objects. 1003 // 1004 // The third term (marker 3) subtracts an additional memoryLimitHeapGoalHeadroom bytes from the 1005 // heap goal. As the name implies, this is to provide additional headroom in the face of pacing 1006 // inaccuracies. This is a fixed number of bytes because these inaccuracies disproportionately 1007 // affect small heaps: as heaps get smaller, the pacer's inputs get fuzzier. Shorter GC cycles 1008 // and less GC work means noisy external factors like the OS scheduler have a greater impact. 1009 1010 memoryLimit := uint64(c.memoryLimit.Load()) 1011 1012 // Compute term 1. 1013 nonHeapMemory := mappedReady - heapFree - heapAlloc 1014 1015 // Compute term 2. 1016 var overage uint64 1017 if mappedReady > memoryLimit { 1018 overage = mappedReady - memoryLimit 1019 } 1020 1021 if nonHeapMemory+overage >= memoryLimit { 1022 // We're at a point where non-heap memory exceeds the memory limit on its own. 1023 // There's honestly not much we can do here but just trigger GCs continuously 1024 // and let the CPU limiter reign that in. Something has to give at this point. 1025 // Set it to heapMarked, the lowest possible goal. 1026 return c.heapMarked 1027 } 1028 1029 // Compute the goal. 1030 goal := memoryLimit - (nonHeapMemory + overage) 1031 1032 // Apply some headroom to the goal to account for pacing inaccuracies. 1033 // Be careful about small limits. 1034 if goal < memoryLimitHeapGoalHeadroom || goal-memoryLimitHeapGoalHeadroom < memoryLimitHeapGoalHeadroom { 1035 goal = memoryLimitHeapGoalHeadroom 1036 } else { 1037 goal = goal - memoryLimitHeapGoalHeadroom 1038 } 1039 // Don't let us go below the live heap. A heap goal below the live heap doesn't make sense. 1040 if goal < c.heapMarked { 1041 goal = c.heapMarked 1042 } 1043 return goal 1044 } 1045 1046 const ( 1047 // These constants determine the bounds on the GC trigger as a fraction 1048 // of heap bytes allocated between the start of a GC (heapLive == heapMarked) 1049 // and the end of a GC (heapLive == heapGoal). 1050 // 1051 // The constants are obscured in this way for efficiency. The denominator 1052 // of the fraction is always a power-of-two for a quick division, so that 1053 // the numerator is a single constant integer multiplication. 1054 triggerRatioDen = 64 1055 1056 // The minimum trigger constant was chosen empirically: given a sufficiently 1057 // fast/scalable allocator with 48 Ps that could drive the trigger ratio 1058 // to <0.05, this constant causes applications to retain the same peak 1059 // RSS compared to not having this allocator. 1060 minTriggerRatioNum = 45 // ~0.7 1061 1062 // The maximum trigger constant is chosen somewhat arbitrarily, but the 1063 // current constant has served us well over the years. 1064 maxTriggerRatioNum = 61 // ~0.95 1065 ) 1066 1067 // trigger returns the current point at which a GC should trigger along with 1068 // the heap goal. 1069 // 1070 // The returned value may be compared against heapLive to determine whether 1071 // the GC should trigger. Thus, the GC trigger condition should be (but may 1072 // not be, in the case of small movements for efficiency) checked whenever 1073 // the heap goal may change. 1074 func (c *gcControllerState) trigger() (uint64, uint64) { 1075 goal, minTrigger := c.heapGoalInternal() 1076 1077 // Invariant: the trigger must always be less than the heap goal. 1078 // 1079 // Note that the memory limit sets a hard maximum on our heap goal, 1080 // but the live heap may grow beyond it. 1081 1082 if c.heapMarked >= goal { 1083 // The goal should never be smaller than heapMarked, but let's be 1084 // defensive about it. The only reasonable trigger here is one that 1085 // causes a continuous GC cycle at heapMarked, but respect the goal 1086 // if it came out as smaller than that. 1087 return goal, goal 1088 } 1089 1090 // Below this point, c.heapMarked < goal. 1091 1092 // heapMarked is our absolute minimum, and it's possible the trigger 1093 // bound we get from heapGoalinternal is less than that. 1094 if minTrigger < c.heapMarked { 1095 minTrigger = c.heapMarked 1096 } 1097 1098 // If we let the trigger go too low, then if the application 1099 // is allocating very rapidly we might end up in a situation 1100 // where we're allocating black during a nearly always-on GC. 1101 // The result of this is a growing heap and ultimately an 1102 // increase in RSS. By capping us at a point >0, we're essentially 1103 // saying that we're OK using more CPU during the GC to prevent 1104 // this growth in RSS. 1105 triggerLowerBound := uint64(((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum) + c.heapMarked 1106 if minTrigger < triggerLowerBound { 1107 minTrigger = triggerLowerBound 1108 } 1109 1110 // For small heaps, set the max trigger point at maxTriggerRatio of the way 1111 // from the live heap to the heap goal. This ensures we always have *some* 1112 // headroom when the GC actually starts. For larger heaps, set the max trigger 1113 // point at the goal, minus the minimum heap size. 1114 // 1115 // This choice follows from the fact that the minimum heap size is chosen 1116 // to reflect the costs of a GC with no work to do. With a large heap but 1117 // very little scan work to perform, this gives us exactly as much runway 1118 // as we would need, in the worst case. 1119 maxTrigger := uint64(((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum) + c.heapMarked 1120 if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger { 1121 maxTrigger = goal - defaultHeapMinimum 1122 } 1123 if maxTrigger < minTrigger { 1124 maxTrigger = minTrigger 1125 } 1126 1127 // Compute the trigger from our bounds and the runway stored by commit. 1128 var trigger uint64 1129 runway := c.runway.Load() 1130 if runway > goal { 1131 trigger = minTrigger 1132 } else { 1133 trigger = goal - runway 1134 } 1135 if trigger < minTrigger { 1136 trigger = minTrigger 1137 } 1138 if trigger > maxTrigger { 1139 trigger = maxTrigger 1140 } 1141 if trigger > goal { 1142 print("trigger=", trigger, " heapGoal=", goal, "\n") 1143 print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n") 1144 throw("produced a trigger greater than the heap goal") 1145 } 1146 return trigger, goal 1147 } 1148 1149 // commit recomputes all pacing parameters needed to derive the 1150 // trigger and the heap goal. Namely, the gcPercent-based heap goal, 1151 // and the amount of runway we want to give the GC this cycle. 1152 // 1153 // This can be called any time. If GC is the in the middle of a 1154 // concurrent phase, it will adjust the pacing of that phase. 1155 // 1156 // isSweepDone should be the result of calling isSweepDone(), 1157 // unless we're testing or we know we're executing during a GC cycle. 1158 // 1159 // This depends on gcPercent, gcController.heapMarked, and 1160 // gcController.heapLive. These must be up to date. 1161 // 1162 // Callers must call gcControllerState.revise after calling this 1163 // function if the GC is enabled. 1164 // 1165 // mheap_.lock must be held or the world must be stopped. 1166 func (c *gcControllerState) commit(isSweepDone bool) { 1167 if !c.test { 1168 assertWorldStoppedOrLockHeld(&mheap_.lock) 1169 } 1170 1171 if isSweepDone { 1172 // The sweep is done, so there aren't any restrictions on the trigger 1173 // we need to think about. 1174 c.sweepDistMinTrigger.Store(0) 1175 } else { 1176 // Concurrent sweep happens in the heap growth 1177 // from gcController.heapLive to trigger. Make sure we 1178 // give the sweeper some runway if it doesn't have enough. 1179 c.sweepDistMinTrigger.Store(c.heapLive.Load() + sweepMinHeapDistance) 1180 } 1181 1182 // Compute the next GC goal, which is when the allocated heap 1183 // has grown by GOGC/100 over where it started the last cycle, 1184 // plus additional runway for non-heap sources of GC work. 1185 gcPercentHeapGoal := ^uint64(0) 1186 if gcPercent := c.gcPercent.Load(); gcPercent >= 0 { 1187 gcPercentHeapGoal = c.heapMarked + (c.heapMarked+c.lastStackScan.Load()+c.globalsScan.Load())*uint64(gcPercent)/100 1188 } 1189 // Apply the minimum heap size here. It's defined in terms of gcPercent 1190 // and is only updated by functions that call commit. 1191 if gcPercentHeapGoal < c.heapMinimum { 1192 gcPercentHeapGoal = c.heapMinimum 1193 } 1194 c.gcPercentHeapGoal.Store(gcPercentHeapGoal) 1195 1196 // Compute the amount of runway we want the GC to have by using our 1197 // estimate of the cons/mark ratio. 1198 // 1199 // The idea is to take our expected scan work, and multiply it by 1200 // the cons/mark ratio to determine how long it'll take to complete 1201 // that scan work in terms of bytes allocated. This gives us our GC's 1202 // runway. 1203 // 1204 // However, the cons/mark ratio is a ratio of rates per CPU-second, but 1205 // here we care about the relative rates for some division of CPU 1206 // resources among the mutator and the GC. 1207 // 1208 // To summarize, we have B / cpu-ns, and we want B / ns. We get that 1209 // by multiplying by our desired division of CPU resources. We choose 1210 // to express CPU resources as GOMAPROCS*fraction. Note that because 1211 // we're working with a ratio here, we can omit the number of CPU cores, 1212 // because they'll appear in the numerator and denominator and cancel out. 1213 // As a result, this is basically just "weighing" the cons/mark ratio by 1214 // our desired division of resources. 1215 // 1216 // Furthermore, by setting the runway so that CPU resources are divided 1217 // this way, assuming that the cons/mark ratio is correct, we make that 1218 // division a reality. 1219 c.runway.Store(uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load()))) 1220 } 1221 1222 // setGCPercent updates gcPercent. commit must be called after. 1223 // Returns the old value of gcPercent. 1224 // 1225 // The world must be stopped, or mheap_.lock must be held. 1226 func (c *gcControllerState) setGCPercent(in int32) int32 { 1227 if !c.test { 1228 assertWorldStoppedOrLockHeld(&mheap_.lock) 1229 } 1230 1231 out := c.gcPercent.Load() 1232 if in < 0 { 1233 in = -1 1234 } 1235 c.heapMinimum = defaultHeapMinimum * uint64(in) / 100 1236 c.gcPercent.Store(in) 1237 1238 return out 1239 } 1240 1241 //go:linkname setGCPercent runtime/debug.setGCPercent 1242 func setGCPercent(in int32) (out int32) { 1243 // Run on the system stack since we grab the heap lock. 1244 systemstack(func() { 1245 lock(&mheap_.lock) 1246 out = gcController.setGCPercent(in) 1247 gcControllerCommit() 1248 unlock(&mheap_.lock) 1249 }) 1250 1251 // If we just disabled GC, wait for any concurrent GC mark to 1252 // finish so we always return with no GC running. 1253 if in < 0 { 1254 gcWaitOnMark(work.cycles.Load()) 1255 } 1256 1257 return out 1258 } 1259 1260 func readGOGC() int32 { 1261 p := gogetenv("GOGC") 1262 if p == "off" { 1263 return -1 1264 } 1265 if n, ok := atoi32(p); ok { 1266 return n 1267 } 1268 return 100 1269 } 1270 1271 // setMemoryLimit updates memoryLimit. commit must be called after 1272 // Returns the old value of memoryLimit. 1273 // 1274 // The world must be stopped, or mheap_.lock must be held. 1275 func (c *gcControllerState) setMemoryLimit(in int64) int64 { 1276 if !c.test { 1277 assertWorldStoppedOrLockHeld(&mheap_.lock) 1278 } 1279 1280 out := c.memoryLimit.Load() 1281 if in >= 0 { 1282 c.memoryLimit.Store(in) 1283 } 1284 1285 return out 1286 } 1287 1288 //go:linkname setMemoryLimit runtime/debug.setMemoryLimit 1289 func setMemoryLimit(in int64) (out int64) { 1290 // Run on the system stack since we grab the heap lock. 1291 systemstack(func() { 1292 lock(&mheap_.lock) 1293 out = gcController.setMemoryLimit(in) 1294 if in < 0 || out == in { 1295 // If we're just checking the value or not changing 1296 // it, there's no point in doing the rest. 1297 unlock(&mheap_.lock) 1298 return 1299 } 1300 gcControllerCommit() 1301 unlock(&mheap_.lock) 1302 }) 1303 return out 1304 } 1305 1306 func readGOMEMLIMIT() int64 { 1307 p := gogetenv("GOMEMLIMIT") 1308 if p == "" || p == "off" { 1309 return maxInt64 1310 } 1311 n, ok := parseByteCount(p) 1312 if !ok { 1313 print("GOMEMLIMIT=", p, "\n") 1314 throw("malformed GOMEMLIMIT; see `go doc runtime/debug.SetMemoryLimit`") 1315 } 1316 return n 1317 } 1318 1319 // addIdleMarkWorker attempts to add a new idle mark worker. 1320 // 1321 // If this returns true, the caller must become an idle mark worker unless 1322 // there's no background mark worker goroutines in the pool. This case is 1323 // harmless because there are already background mark workers running. 1324 // If this returns false, the caller must NOT become an idle mark worker. 1325 // 1326 // nosplit because it may be called without a P. 1327 // 1328 //go:nosplit 1329 func (c *gcControllerState) addIdleMarkWorker() bool { 1330 for { 1331 old := c.idleMarkWorkers.Load() 1332 n, max := int32(old&uint64(^uint32(0))), int32(old>>32) 1333 if n >= max { 1334 // See the comment on idleMarkWorkers for why 1335 // n > max is tolerated. 1336 return false 1337 } 1338 if n < 0 { 1339 print("n=", n, " max=", max, "\n") 1340 throw("negative idle mark workers") 1341 } 1342 new := uint64(uint32(n+1)) | (uint64(max) << 32) 1343 if c.idleMarkWorkers.CompareAndSwap(old, new) { 1344 return true 1345 } 1346 } 1347 } 1348 1349 // needIdleMarkWorker is a hint as to whether another idle mark worker is needed. 1350 // 1351 // The caller must still call addIdleMarkWorker to become one. This is mainly 1352 // useful for a quick check before an expensive operation. 1353 // 1354 // nosplit because it may be called without a P. 1355 // 1356 //go:nosplit 1357 func (c *gcControllerState) needIdleMarkWorker() bool { 1358 p := c.idleMarkWorkers.Load() 1359 n, max := int32(p&uint64(^uint32(0))), int32(p>>32) 1360 return n < max 1361 } 1362 1363 // removeIdleMarkWorker must be called when an new idle mark worker stops executing. 1364 func (c *gcControllerState) removeIdleMarkWorker() { 1365 for { 1366 old := c.idleMarkWorkers.Load() 1367 n, max := int32(old&uint64(^uint32(0))), int32(old>>32) 1368 if n-1 < 0 { 1369 print("n=", n, " max=", max, "\n") 1370 throw("negative idle mark workers") 1371 } 1372 new := uint64(uint32(n-1)) | (uint64(max) << 32) 1373 if c.idleMarkWorkers.CompareAndSwap(old, new) { 1374 return 1375 } 1376 } 1377 } 1378 1379 // setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed. 1380 // 1381 // This method is optimistic in that it does not wait for the number of 1382 // idle mark workers to reduce to max before returning; it assumes the workers 1383 // will deschedule themselves. 1384 func (c *gcControllerState) setMaxIdleMarkWorkers(max int32) { 1385 for { 1386 old := c.idleMarkWorkers.Load() 1387 n := int32(old & uint64(^uint32(0))) 1388 if n < 0 { 1389 print("n=", n, " max=", max, "\n") 1390 throw("negative idle mark workers") 1391 } 1392 new := uint64(uint32(n)) | (uint64(max) << 32) 1393 if c.idleMarkWorkers.CompareAndSwap(old, new) { 1394 return 1395 } 1396 } 1397 } 1398 1399 // gcControllerCommit is gcController.commit, but passes arguments from live 1400 // (non-test) data. It also updates any consumers of the GC pacing, such as 1401 // sweep pacing and the background scavenger. 1402 // 1403 // Calls gcController.commit. 1404 // 1405 // The heap lock must be held, so this must be executed on the system stack. 1406 // 1407 //go:systemstack 1408 func gcControllerCommit() { 1409 assertWorldStoppedOrLockHeld(&mheap_.lock) 1410 1411 gcController.commit(isSweepDone()) 1412 1413 // Update mark pacing. 1414 if gcphase != _GCoff { 1415 gcController.revise() 1416 } 1417 1418 // TODO(mknyszek): This isn't really accurate any longer because the heap 1419 // goal is computed dynamically. Still useful to snapshot, but not as useful. 1420 if traceEnabled() { 1421 traceHeapGoal() 1422 } 1423 1424 trigger, heapGoal := gcController.trigger() 1425 gcPaceSweeper(trigger) 1426 gcPaceScavenger(gcController.memoryLimit.Load(), heapGoal, gcController.lastHeapGoal) 1427 }