github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/mgcpacer.go (about) 1 // Copyright 2021 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/cpu" 9 "internal/goexperiment" 10 "runtime/internal/atomic" 11 _ "unsafe" // for go:linkname 12 ) 13 14 // go119MemoryLimitSupport is a feature flag for a number of changes 15 // related to the memory limit feature (#48409). Disabling this flag 16 // disables those features, as well as the memory limit mechanism, 17 // which becomes a no-op. 18 const go119MemoryLimitSupport = true 19 20 const ( 21 // gcGoalUtilization is the goal CPU utilization for 22 // marking as a fraction of GOMAXPROCS. 23 // 24 // Increasing the goal utilization will shorten GC cycles as the GC 25 // has more resources behind it, lessening costs from the write barrier, 26 // but comes at the cost of increasing mutator latency. 27 gcGoalUtilization = gcBackgroundUtilization 28 29 // gcBackgroundUtilization is the fixed CPU utilization for background 30 // marking. It must be <= gcGoalUtilization. The difference between 31 // gcGoalUtilization and gcBackgroundUtilization will be made up by 32 // mark assists. The scheduler will aim to use within 50% of this 33 // goal. 34 // 35 // As a general rule, there's little reason to set gcBackgroundUtilization 36 // < gcGoalUtilization. One reason might be in mostly idle applications, 37 // where goroutines are unlikely to assist at all, so the actual 38 // utilization will be lower than the goal. But this is moot point 39 // because the idle mark workers already soak up idle CPU resources. 40 // These two values are still kept separate however because they are 41 // distinct conceptually, and in previous iterations of the pacer the 42 // distinction was more important. 43 gcBackgroundUtilization = 0.25 44 45 // gcCreditSlack is the amount of scan work credit that can 46 // accumulate locally before updating gcController.heapScanWork and, 47 // optionally, gcController.bgScanCredit. Lower values give a more 48 // accurate assist ratio and make it more likely that assists will 49 // successfully steal background credit. Higher values reduce memory 50 // contention. 51 gcCreditSlack = 2000 52 53 // gcAssistTimeSlack is the nanoseconds of mutator assist time that 54 // can accumulate on a P before updating gcController.assistTime. 55 gcAssistTimeSlack = 5000 56 57 // gcOverAssistWork determines how many extra units of scan work a GC 58 // assist does when an assist happens. This amortizes the cost of an 59 // assist by pre-paying for this many bytes of future allocations. 60 gcOverAssistWork = 64 << 10 61 62 // defaultHeapMinimum is the value of heapMinimum for GOGC==100. 63 defaultHeapMinimum = (goexperiment.HeapMinimum512KiBInt)*(512<<10) + 64 (1-goexperiment.HeapMinimum512KiBInt)*(4<<20) 65 66 // maxStackScanSlack is the bytes of stack space allocated or freed 67 // that can accumulate on a P before updating gcController.stackSize. 68 maxStackScanSlack = 8 << 10 69 70 // memoryLimitHeapGoalHeadroom is the amount of headroom the pacer gives to 71 // the heap goal when operating in the memory-limited regime. That is, 72 // it'll reduce the heap goal by this many extra bytes off of the base 73 // calculation. 74 memoryLimitHeapGoalHeadroom = 1 << 20 75 ) 76 77 // gcController implements the GC pacing controller that determines 78 // when to trigger concurrent garbage collection and how much marking 79 // work to do in mutator assists and background marking. 80 // 81 // It calculates the ratio between the allocation rate (in terms of CPU 82 // time) and the GC scan throughput to determine the heap size at which to 83 // trigger a GC cycle such that no GC assists are required to finish on time. 84 // This algorithm thus optimizes GC CPU utilization to the dedicated background 85 // mark utilization of 25% of GOMAXPROCS by minimizing GC assists. 86 // GOMAXPROCS. The high-level design of this algorithm is documented 87 // at https://github.com/golang/proposal/blob/master/design/44167-gc-pacer-redesign.md. 88 // See https://golang.org/s/go15gcpacing for additional historical context. 89 var gcController gcControllerState 90 91 type gcControllerState struct { 92 // Initialized from GOGC. GOGC=off means no GC. 93 gcPercent atomic.Int32 94 95 // memoryLimit is the soft memory limit in bytes. 96 // 97 // Initialized from GOMEMLIMIT. GOMEMLIMIT=off is equivalent to MaxInt64 98 // which means no soft memory limit in practice. 99 // 100 // This is an int64 instead of a uint64 to more easily maintain parity with 101 // the SetMemoryLimit API, which sets a maximum at MaxInt64. This value 102 // should never be negative. 103 memoryLimit atomic.Int64 104 105 // heapMinimum is the minimum heap size at which to trigger GC. 106 // For small heaps, this overrides the usual GOGC*live set rule. 107 // 108 // When there is a very small live set but a lot of allocation, simply 109 // collecting when the heap reaches GOGC*live results in many GC 110 // cycles and high total per-GC overhead. This minimum amortizes this 111 // per-GC overhead while keeping the heap reasonably small. 112 // 113 // During initialization this is set to 4MB*GOGC/100. In the case of 114 // GOGC==0, this will set heapMinimum to 0, resulting in constant 115 // collection even when the heap size is small, which is useful for 116 // debugging. 117 heapMinimum uint64 118 119 // runway is the amount of runway in heap bytes allocated by the 120 // application that we want to give the GC once it starts. 121 // 122 // This is computed from consMark during mark termination. 123 runway atomic.Uint64 124 125 // consMark is the estimated per-CPU consMark ratio for the application. 126 // 127 // It represents the ratio between the application's allocation 128 // rate, as bytes allocated per CPU-time, and the GC's scan rate, 129 // as bytes scanned per CPU-time. 130 // The units of this ratio are (B / cpu-ns) / (B / cpu-ns). 131 // 132 // At a high level, this value is computed as the bytes of memory 133 // allocated (cons) per unit of scan work completed (mark) in a GC 134 // cycle, divided by the CPU time spent on each activity. 135 // 136 // Updated at the end of each GC cycle, in endCycle. 137 consMark float64 138 139 // consMarkController holds the state for the mark-cons ratio 140 // estimation over time. 141 // 142 // Its purpose is to smooth out noisiness in the computation of 143 // consMark; see consMark for details. 144 consMarkController piController 145 146 // gcPercentHeapGoal is the goal heapLive for when next GC ends derived 147 // from gcPercent. 148 // 149 // Set to ^uint64(0) if gcPercent is disabled. 150 gcPercentHeapGoal atomic.Uint64 151 152 // sweepDistMinTrigger is the minimum trigger to ensure a minimum 153 // sweep distance. 154 // 155 // This bound is also special because it applies to both the trigger 156 // *and* the goal (all other trigger bounds must be based *on* the goal). 157 // 158 // It is computed ahead of time, at commit time. The theory is that, 159 // absent a sudden change to a parameter like gcPercent, the trigger 160 // will be chosen to always give the sweeper enough headroom. However, 161 // such a change might dramatically and suddenly move up the trigger, 162 // in which case we need to ensure the sweeper still has enough headroom. 163 sweepDistMinTrigger atomic.Uint64 164 165 // triggered is the point at which the current GC cycle actually triggered. 166 // Only valid during the mark phase of a GC cycle, otherwise set to ^uint64(0). 167 // 168 // Updated while the world is stopped. 169 triggered uint64 170 171 // lastHeapGoal is the value of heapGoal at the moment the last GC 172 // ended. Note that this is distinct from the last value heapGoal had, 173 // because it could change if e.g. gcPercent changes. 174 // 175 // Read and written with the world stopped or with mheap_.lock held. 176 lastHeapGoal uint64 177 178 // heapLive is the number of bytes considered live by the GC. 179 // That is: retained by the most recent GC plus allocated 180 // since then. heapLive ≤ memstats.totalAlloc-memstats.totalFree, since 181 // heapAlloc includes unmarked objects that have not yet been swept (and 182 // hence goes up as we allocate and down as we sweep) while heapLive 183 // excludes these objects (and hence only goes up between GCs). 184 // 185 // To reduce contention, this is updated only when obtaining a span 186 // from an mcentral and at this point it counts all of the unallocated 187 // slots in that span (which will be allocated before that mcache 188 // obtains another span from that mcentral). Hence, it slightly 189 // overestimates the "true" live heap size. It's better to overestimate 190 // than to underestimate because 1) this triggers the GC earlier than 191 // necessary rather than potentially too late and 2) this leads to a 192 // conservative GC rate rather than a GC rate that is potentially too 193 // low. 194 // 195 // Whenever this is updated, call traceHeapAlloc() and 196 // this gcControllerState's revise() method. 197 heapLive atomic.Uint64 198 199 // heapScan is the number of bytes of "scannable" heap. This is the 200 // live heap (as counted by heapLive), but omitting no-scan objects and 201 // no-scan tails of objects. 202 // 203 // This value is fixed at the start of a GC cycle. It represents the 204 // maximum scannable heap. 205 heapScan atomic.Uint64 206 207 // lastHeapScan is the number of bytes of heap that were scanned 208 // last GC cycle. It is the same as heapMarked, but only 209 // includes the "scannable" parts of objects. 210 // 211 // Updated when the world is stopped. 212 lastHeapScan uint64 213 214 // lastStackScan is the number of bytes of stack that were scanned 215 // last GC cycle. 216 lastStackScan atomic.Uint64 217 218 // maxStackScan is the amount of allocated goroutine stack space in 219 // use by goroutines. 220 // 221 // This number tracks allocated goroutine stack space rather than used 222 // goroutine stack space (i.e. what is actually scanned) because used 223 // goroutine stack space is much harder to measure cheaply. By using 224 // allocated space, we make an overestimate; this is OK, it's better 225 // to conservatively overcount than undercount. 226 maxStackScan atomic.Uint64 227 228 // globalsScan is the total amount of global variable space 229 // that is scannable. 230 globalsScan atomic.Uint64 231 232 // heapMarked is the number of bytes marked by the previous 233 // GC. After mark termination, heapLive == heapMarked, but 234 // unlike heapLive, heapMarked does not change until the 235 // next mark termination. 236 heapMarked uint64 237 238 // heapScanWork is the total heap scan work performed this cycle. 239 // stackScanWork is the total stack scan work performed this cycle. 240 // globalsScanWork is the total globals scan work performed this cycle. 241 // 242 // These are updated atomically during the cycle. Updates occur in 243 // bounded batches, since they are both written and read 244 // throughout the cycle. At the end of the cycle, heapScanWork is how 245 // much of the retained heap is scannable. 246 // 247 // Currently these are measured in bytes. For most uses, this is an 248 // opaque unit of work, but for estimation the definition is important. 249 // 250 // Note that stackScanWork includes only stack space scanned, not all 251 // of the allocated stack. 252 heapScanWork atomic.Int64 253 stackScanWork atomic.Int64 254 globalsScanWork atomic.Int64 255 256 // bgScanCredit is the scan work credit accumulated by the concurrent 257 // background scan. This credit is accumulated by the background scan 258 // and stolen by mutator assists. Updates occur in bounded batches, 259 // since it is both written and read throughout the cycle. 260 bgScanCredit atomic.Int64 261 262 // assistTime is the nanoseconds spent in mutator assists 263 // during this cycle. This is updated atomically, and must also 264 // be updated atomically even during a STW, because it is read 265 // by sysmon. Updates occur in bounded batches, since it is both 266 // written and read throughout the cycle. 267 assistTime atomic.Int64 268 269 // dedicatedMarkTime is the nanoseconds spent in dedicated mark workers 270 // during this cycle. This is updated at the end of the concurrent mark 271 // phase. 272 dedicatedMarkTime atomic.Int64 273 274 // fractionalMarkTime is the nanoseconds spent in the fractional mark 275 // worker during this cycle. This is updated throughout the cycle and 276 // will be up-to-date if the fractional mark worker is not currently 277 // running. 278 fractionalMarkTime atomic.Int64 279 280 // idleMarkTime is the nanoseconds spent in idle marking during this 281 // cycle. This is updated throughout the cycle. 282 idleMarkTime atomic.Int64 283 284 // markStartTime is the absolute start time in nanoseconds 285 // that assists and background mark workers started. 286 markStartTime int64 287 288 // dedicatedMarkWorkersNeeded is the number of dedicated mark workers 289 // that need to be started. This is computed at the beginning of each 290 // cycle and decremented as dedicated mark workers get started. 291 dedicatedMarkWorkersNeeded atomic.Int64 292 293 // idleMarkWorkers is two packed int32 values in a single uint64. 294 // These two values are always updated simultaneously. 295 // 296 // The bottom int32 is the current number of idle mark workers executing. 297 // 298 // The top int32 is the maximum number of idle mark workers allowed to 299 // execute concurrently. Normally, this number is just gomaxprocs. However, 300 // during periodic GC cycles it is set to 0 because the system is idle 301 // anyway; there's no need to go full blast on all of GOMAXPROCS. 302 // 303 // The maximum number of idle mark workers is used to prevent new workers 304 // from starting, but it is not a hard maximum. It is possible (but 305 // exceedingly rare) for the current number of idle mark workers to 306 // transiently exceed the maximum. This could happen if the maximum changes 307 // just after a GC ends, and an M with no P. 308 // 309 // Note that if we have no dedicated mark workers, we set this value to 310 // 1 in this case we only have fractional GC workers which aren't scheduled 311 // strictly enough to ensure GC progress. As a result, idle-priority mark 312 // workers are vital to GC progress in these situations. 313 // 314 // For example, consider a situation in which goroutines block on the GC 315 // (such as via runtime.GOMAXPROCS) and only fractional mark workers are 316 // scheduled (e.g. GOMAXPROCS=1). Without idle-priority mark workers, the 317 // last running M might skip scheduling a fractional mark worker if its 318 // utilization goal is met, such that once it goes to sleep (because there's 319 // nothing to do), there will be nothing else to spin up a new M for the 320 // fractional worker in the future, stalling GC progress and causing a 321 // deadlock. However, idle-priority workers will *always* run when there is 322 // nothing left to do, ensuring the GC makes progress. 323 // 324 // See github.com/golang/go/issues/44163 for more details. 325 idleMarkWorkers atomic.Uint64 326 327 // assistWorkPerByte is the ratio of scan work to allocated 328 // bytes that should be performed by mutator assists. This is 329 // computed at the beginning of each cycle and updated every 330 // time heapScan is updated. 331 assistWorkPerByte atomic.Float64 332 333 // assistBytesPerWork is 1/assistWorkPerByte. 334 // 335 // Note that because this is read and written independently 336 // from assistWorkPerByte users may notice a skew between 337 // the two values, and such a state should be safe. 338 assistBytesPerWork atomic.Float64 339 340 // fractionalUtilizationGoal is the fraction of wall clock 341 // time that should be spent in the fractional mark worker on 342 // each P that isn't running a dedicated worker. 343 // 344 // For example, if the utilization goal is 25% and there are 345 // no dedicated workers, this will be 0.25. If the goal is 346 // 25%, there is one dedicated worker, and GOMAXPROCS is 5, 347 // this will be 0.05 to make up the missing 5%. 348 // 349 // If this is zero, no fractional workers are needed. 350 fractionalUtilizationGoal float64 351 352 // These memory stats are effectively duplicates of fields from 353 // memstats.heapStats but are updated atomically or with the world 354 // stopped and don't provide the same consistency guarantees. 355 // 356 // Because the runtime is responsible for managing a memory limit, it's 357 // useful to couple these stats more tightly to the gcController, which 358 // is intimately connected to how that memory limit is maintained. 359 heapInUse sysMemStat // bytes in mSpanInUse spans 360 heapReleased sysMemStat // bytes released to the OS 361 heapFree sysMemStat // bytes not in any span, but not released to the OS 362 totalAlloc atomic.Uint64 // total bytes allocated 363 totalFree atomic.Uint64 // total bytes freed 364 mappedReady atomic.Uint64 // total virtual memory in the Ready state (see mem.go). 365 366 // test indicates that this is a test-only copy of gcControllerState. 367 test bool 368 369 _ cpu.CacheLinePad 370 } 371 372 func (c *gcControllerState) init(gcPercent int32, memoryLimit int64) { 373 c.heapMinimum = defaultHeapMinimum 374 c.triggered = ^uint64(0) 375 376 c.consMarkController = piController{ 377 // Tuned first via the Ziegler-Nichols process in simulation, 378 // then the integral time was manually tuned against real-world 379 // applications to deal with noisiness in the measured cons/mark 380 // ratio. 381 kp: 0.9, 382 ti: 4.0, 383 384 // Set a high reset time in GC cycles. 385 // This is inversely proportional to the rate at which we 386 // accumulate error from clipping. By making this very high 387 // we make the accumulation slow. In general, clipping is 388 // OK in our situation, hence the choice. 389 // 390 // Tune this if we get unintended effects from clipping for 391 // a long time. 392 tt: 1000, 393 min: -1000, 394 max: 1000, 395 } 396 397 c.setGCPercent(gcPercent) 398 c.setMemoryLimit(memoryLimit) 399 c.commit(true) // No sweep phase in the first GC cycle. 400 // N.B. Don't bother calling traceHeapGoal. Tracing is never enabled at 401 // initialization time. 402 // N.B. No need to call revise; there's no GC enabled during 403 // initialization. 404 } 405 406 // startCycle resets the GC controller's state and computes estimates 407 // for a new GC cycle. The caller must hold worldsema and the world 408 // must be stopped. 409 func (c *gcControllerState) startCycle(markStartTime int64, procs int, trigger gcTrigger) { 410 c.heapScanWork.Store(0) 411 c.stackScanWork.Store(0) 412 c.globalsScanWork.Store(0) 413 c.bgScanCredit.Store(0) 414 c.assistTime.Store(0) 415 c.dedicatedMarkTime.Store(0) 416 c.fractionalMarkTime.Store(0) 417 c.idleMarkTime.Store(0) 418 c.markStartTime = markStartTime 419 420 // TODO(mknyszek): This is supposed to be the actual trigger point for the heap, but 421 // causes regressions in memory use. The cause is that the PI controller used to smooth 422 // the cons/mark ratio measurements tends to flail when using the less accurate precomputed 423 // trigger for the cons/mark calculation, and this results in the controller being more 424 // conservative about steady-states it tries to find in the future. 425 // 426 // This conservatism is transient, but these transient states tend to matter for short-lived 427 // programs, especially because the PI controller is overdamped, partially because it is 428 // configured with a relatively large time constant. 429 // 430 // Ultimately, I think this is just two mistakes piled on one another: the choice of a swingy 431 // smoothing function that recalls a fairly long history (due to its overdamped time constant) 432 // coupled with an inaccurate cons/mark calculation. It just so happens this works better 433 // today, and it makes it harder to change things in the future. 434 // 435 // This is described in #53738. Fix this for #53892 by changing back to the actual trigger 436 // point and simplifying the smoothing function. 437 heapTrigger, heapGoal := c.trigger() 438 c.triggered = heapTrigger 439 440 // Compute the background mark utilization goal. In general, 441 // this may not come out exactly. We round the number of 442 // dedicated workers so that the utilization is closest to 443 // 25%. For small GOMAXPROCS, this would introduce too much 444 // error, so we add fractional workers in that case. 445 totalUtilizationGoal := float64(procs) * gcBackgroundUtilization 446 dedicatedMarkWorkersNeeded := int64(totalUtilizationGoal + 0.5) 447 utilError := float64(dedicatedMarkWorkersNeeded)/totalUtilizationGoal - 1 448 const maxUtilError = 0.3 449 if utilError < -maxUtilError || utilError > maxUtilError { 450 // Rounding put us more than 30% off our goal. With 451 // gcBackgroundUtilization of 25%, this happens for 452 // GOMAXPROCS<=3 or GOMAXPROCS=6. Enable fractional 453 // workers to compensate. 454 if float64(dedicatedMarkWorkersNeeded) > totalUtilizationGoal { 455 // Too many dedicated workers. 456 dedicatedMarkWorkersNeeded-- 457 } 458 c.fractionalUtilizationGoal = (totalUtilizationGoal - float64(dedicatedMarkWorkersNeeded)) / float64(procs) 459 } else { 460 c.fractionalUtilizationGoal = 0 461 } 462 463 // In STW mode, we just want dedicated workers. 464 if debug.gcstoptheworld > 0 { 465 dedicatedMarkWorkersNeeded = int64(procs) 466 c.fractionalUtilizationGoal = 0 467 } 468 469 // Clear per-P state 470 for _, p := range allp { 471 p.gcAssistTime = 0 472 p.gcFractionalMarkTime = 0 473 } 474 475 if trigger.kind == gcTriggerTime { 476 // During a periodic GC cycle, reduce the number of idle mark workers 477 // required. However, we need at least one dedicated mark worker or 478 // idle GC worker to ensure GC progress in some scenarios (see comment 479 // on maxIdleMarkWorkers). 480 if dedicatedMarkWorkersNeeded > 0 { 481 c.setMaxIdleMarkWorkers(0) 482 } else { 483 // TODO(mknyszek): The fundamental reason why we need this is because 484 // we can't count on the fractional mark worker to get scheduled. 485 // Fix that by ensuring it gets scheduled according to its quota even 486 // if the rest of the application is idle. 487 c.setMaxIdleMarkWorkers(1) 488 } 489 } else { 490 // N.B. gomaxprocs and dedicatedMarkWorkersNeeded are guaranteed not to 491 // change during a GC cycle. 492 c.setMaxIdleMarkWorkers(int32(procs) - int32(dedicatedMarkWorkersNeeded)) 493 } 494 495 // Compute initial values for controls that are updated 496 // throughout the cycle. 497 c.dedicatedMarkWorkersNeeded.Store(dedicatedMarkWorkersNeeded) 498 c.revise() 499 500 if debug.gcpacertrace > 0 { 501 assistRatio := c.assistWorkPerByte.Load() 502 print("pacer: assist ratio=", assistRatio, 503 " (scan ", gcController.heapScan.Load()>>20, " MB in ", 504 work.initialHeapLive>>20, "->", 505 heapGoal>>20, " MB)", 506 " workers=", dedicatedMarkWorkersNeeded, 507 "+", c.fractionalUtilizationGoal, "\n") 508 } 509 } 510 511 // revise updates the assist ratio during the GC cycle to account for 512 // improved estimates. This should be called whenever gcController.heapScan, 513 // gcController.heapLive, or if any inputs to gcController.heapGoal are 514 // updated. It is safe to call concurrently, but it may race with other 515 // calls to revise. 516 // 517 // The result of this race is that the two assist ratio values may not line 518 // up or may be stale. In practice this is OK because the assist ratio 519 // moves slowly throughout a GC cycle, and the assist ratio is a best-effort 520 // heuristic anyway. Furthermore, no part of the heuristic depends on 521 // the two assist ratio values being exact reciprocals of one another, since 522 // the two values are used to convert values from different sources. 523 // 524 // The worst case result of this raciness is that we may miss a larger shift 525 // in the ratio (say, if we decide to pace more aggressively against the 526 // hard heap goal) but even this "hard goal" is best-effort (see #40460). 527 // The dedicated GC should ensure we don't exceed the hard goal by too much 528 // in the rare case we do exceed it. 529 // 530 // It should only be called when gcBlackenEnabled != 0 (because this 531 // is when assists are enabled and the necessary statistics are 532 // available). 533 func (c *gcControllerState) revise() { 534 gcPercent := c.gcPercent.Load() 535 if gcPercent < 0 { 536 // If GC is disabled but we're running a forced GC, 537 // act like GOGC is huge for the below calculations. 538 gcPercent = 100000 539 } 540 live := c.heapLive.Load() 541 scan := c.heapScan.Load() 542 work := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load() 543 544 // Assume we're under the soft goal. Pace GC to complete at 545 // heapGoal assuming the heap is in steady-state. 546 heapGoal := int64(c.heapGoal()) 547 548 // The expected scan work is computed as the amount of bytes scanned last 549 // GC cycle (both heap and stack), plus our estimate of globals work for this cycle. 550 scanWorkExpected := int64(c.lastHeapScan + c.lastStackScan.Load() + c.globalsScan.Load()) 551 552 // maxScanWork is a worst-case estimate of the amount of scan work that 553 // needs to be performed in this GC cycle. Specifically, it represents 554 // the case where *all* scannable memory turns out to be live, and 555 // *all* allocated stack space is scannable. 556 maxStackScan := c.maxStackScan.Load() 557 maxScanWork := int64(scan + maxStackScan + c.globalsScan.Load()) 558 if work > scanWorkExpected { 559 // We've already done more scan work than expected. Because our expectation 560 // is based on a steady-state scannable heap size, we assume this means our 561 // heap is growing. Compute a new heap goal that takes our existing runway 562 // computed for scanWorkExpected and extrapolates it to maxScanWork, the worst-case 563 // scan work. This keeps our assist ratio stable if the heap continues to grow. 564 // 565 // The effect of this mechanism is that assists stay flat in the face of heap 566 // growths. It's OK to use more memory this cycle to scan all the live heap, 567 // because the next GC cycle is inevitably going to use *at least* that much 568 // memory anyway. 569 extHeapGoal := int64(float64(heapGoal-int64(c.triggered))/float64(scanWorkExpected)*float64(maxScanWork)) + int64(c.triggered) 570 scanWorkExpected = maxScanWork 571 572 // hardGoal is a hard limit on the amount that we're willing to push back the 573 // heap goal, and that's twice the heap goal (i.e. if GOGC=100 and the heap and/or 574 // stacks and/or globals grow to twice their size, this limits the current GC cycle's 575 // growth to 4x the original live heap's size). 576 // 577 // This maintains the invariant that we use no more memory than the next GC cycle 578 // will anyway. 579 hardGoal := int64((1.0 + float64(gcPercent)/100.0) * float64(heapGoal)) 580 if extHeapGoal > hardGoal { 581 extHeapGoal = hardGoal 582 } 583 heapGoal = extHeapGoal 584 } 585 if int64(live) > heapGoal { 586 // We're already past our heap goal, even the extrapolated one. 587 // Leave ourselves some extra runway, so in the worst case we 588 // finish by that point. 589 const maxOvershoot = 1.1 590 heapGoal = int64(float64(heapGoal) * maxOvershoot) 591 592 // Compute the upper bound on the scan work remaining. 593 scanWorkExpected = maxScanWork 594 } 595 596 // Compute the remaining scan work estimate. 597 // 598 // Note that we currently count allocations during GC as both 599 // scannable heap (heapScan) and scan work completed 600 // (scanWork), so allocation will change this difference 601 // slowly in the soft regime and not at all in the hard 602 // regime. 603 scanWorkRemaining := scanWorkExpected - work 604 if scanWorkRemaining < 1000 { 605 // We set a somewhat arbitrary lower bound on 606 // remaining scan work since if we aim a little high, 607 // we can miss by a little. 608 // 609 // We *do* need to enforce that this is at least 1, 610 // since marking is racy and double-scanning objects 611 // may legitimately make the remaining scan work 612 // negative, even in the hard goal regime. 613 scanWorkRemaining = 1000 614 } 615 616 // Compute the heap distance remaining. 617 heapRemaining := heapGoal - int64(live) 618 if heapRemaining <= 0 { 619 // This shouldn't happen, but if it does, avoid 620 // dividing by zero or setting the assist negative. 621 heapRemaining = 1 622 } 623 624 // Compute the mutator assist ratio so by the time the mutator 625 // allocates the remaining heap bytes up to heapGoal, it will 626 // have done (or stolen) the remaining amount of scan work. 627 // Note that the assist ratio values are updated atomically 628 // but not together. This means there may be some degree of 629 // skew between the two values. This is generally OK as the 630 // values shift relatively slowly over the course of a GC 631 // cycle. 632 assistWorkPerByte := float64(scanWorkRemaining) / float64(heapRemaining) 633 assistBytesPerWork := float64(heapRemaining) / float64(scanWorkRemaining) 634 c.assistWorkPerByte.Store(assistWorkPerByte) 635 c.assistBytesPerWork.Store(assistBytesPerWork) 636 } 637 638 // endCycle computes the consMark estimate for the next cycle. 639 // userForced indicates whether the current GC cycle was forced 640 // by the application. 641 func (c *gcControllerState) endCycle(now int64, procs int, userForced bool) { 642 // Record last heap goal for the scavenger. 643 // We'll be updating the heap goal soon. 644 gcController.lastHeapGoal = c.heapGoal() 645 646 // Compute the duration of time for which assists were turned on. 647 assistDuration := now - c.markStartTime 648 649 // Assume background mark hit its utilization goal. 650 utilization := gcBackgroundUtilization 651 // Add assist utilization; avoid divide by zero. 652 if assistDuration > 0 { 653 utilization += float64(c.assistTime.Load()) / float64(assistDuration*int64(procs)) 654 } 655 656 if c.heapLive.Load() <= c.triggered { 657 // Shouldn't happen, but let's be very safe about this in case the 658 // GC is somehow extremely short. 659 // 660 // In this case though, the only reasonable value for c.heapLive-c.triggered 661 // would be 0, which isn't really all that useful, i.e. the GC was so short 662 // that it didn't matter. 663 // 664 // Ignore this case and don't update anything. 665 return 666 } 667 idleUtilization := 0.0 668 if assistDuration > 0 { 669 idleUtilization = float64(c.idleMarkTime.Load()) / float64(assistDuration*int64(procs)) 670 } 671 // Determine the cons/mark ratio. 672 // 673 // The units we want for the numerator and denominator are both B / cpu-ns. 674 // We get this by taking the bytes allocated or scanned, and divide by the amount of 675 // CPU time it took for those operations. For allocations, that CPU time is 676 // 677 // assistDuration * procs * (1 - utilization) 678 // 679 // Where utilization includes just background GC workers and assists. It does *not* 680 // include idle GC work time, because in theory the mutator is free to take that at 681 // any point. 682 // 683 // For scanning, that CPU time is 684 // 685 // assistDuration * procs * (utilization + idleUtilization) 686 // 687 // In this case, we *include* idle utilization, because that is additional CPU time that 688 // the GC had available to it. 689 // 690 // In effect, idle GC time is sort of double-counted here, but it's very weird compared 691 // to other kinds of GC work, because of how fluid it is. Namely, because the mutator is 692 // *always* free to take it. 693 // 694 // So this calculation is really: 695 // (heapLive-trigger) / (assistDuration * procs * (1-utilization)) / 696 // (scanWork) / (assistDuration * procs * (utilization+idleUtilization) 697 // 698 // Note that because we only care about the ratio, assistDuration and procs cancel out. 699 scanWork := c.heapScanWork.Load() + c.stackScanWork.Load() + c.globalsScanWork.Load() 700 currentConsMark := (float64(c.heapLive.Load()-c.triggered) * (utilization + idleUtilization)) / 701 (float64(scanWork) * (1 - utilization)) 702 703 // Update cons/mark controller. The time period for this is 1 GC cycle. 704 // 705 // This use of a PI controller might seem strange. So, here's an explanation: 706 // 707 // currentConsMark represents the consMark we *should've* had to be perfectly 708 // on-target for this cycle. Given that we assume the next GC will be like this 709 // one in the steady-state, it stands to reason that we should just pick that 710 // as our next consMark. In practice, however, currentConsMark is too noisy: 711 // we're going to be wildly off-target in each GC cycle if we do that. 712 // 713 // What we do instead is make a long-term assumption: there is some steady-state 714 // consMark value, but it's obscured by noise. By constantly shooting for this 715 // noisy-but-perfect consMark value, the controller will bounce around a bit, 716 // but its average behavior, in aggregate, should be less noisy and closer to 717 // the true long-term consMark value, provided its tuned to be slightly overdamped. 718 var ok bool 719 oldConsMark := c.consMark 720 c.consMark, ok = c.consMarkController.next(c.consMark, currentConsMark, 1.0) 721 if !ok { 722 // The error spiraled out of control. This is incredibly unlikely seeing 723 // as this controller is essentially just a smoothing function, but it might 724 // mean that something went very wrong with how currentConsMark was calculated. 725 // Just reset consMark and keep going. 726 c.consMark = 0 727 } 728 729 if debug.gcpacertrace > 0 { 730 printlock() 731 goal := gcGoalUtilization * 100 732 print("pacer: ", int(utilization*100), "% CPU (", int(goal), " exp.) for ") 733 print(c.heapScanWork.Load(), "+", c.stackScanWork.Load(), "+", c.globalsScanWork.Load(), " B work (", c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load(), " B exp.) ") 734 live := c.heapLive.Load() 735 print("in ", c.triggered, " B -> ", live, " B (∆goal ", int64(live)-int64(c.lastHeapGoal), ", cons/mark ", oldConsMark, ")") 736 if !ok { 737 print("[controller reset]") 738 } 739 println() 740 printunlock() 741 } 742 } 743 744 // enlistWorker encourages another dedicated mark worker to start on 745 // another P if there are spare worker slots. It is used by putfull 746 // when more work is made available. 747 // 748 //go:nowritebarrier 749 func (c *gcControllerState) enlistWorker() { 750 // If there are idle Ps, wake one so it will run an idle worker. 751 // NOTE: This is suspected of causing deadlocks. See golang.org/issue/19112. 752 // 753 // if sched.npidle.Load() != 0 && sched.nmspinning.Load() == 0 { 754 // wakep() 755 // return 756 // } 757 758 // There are no idle Ps. If we need more dedicated workers, 759 // try to preempt a running P so it will switch to a worker. 760 if c.dedicatedMarkWorkersNeeded.Load() <= 0 { 761 return 762 } 763 // Pick a random other P to preempt. 764 if gomaxprocs <= 1 { 765 return 766 } 767 gp := getg() 768 if gp == nil || gp.m == nil || gp.m.p == 0 { 769 return 770 } 771 myID := gp.m.p.ptr().id 772 for tries := 0; tries < 5; tries++ { 773 id := int32(fastrandn(uint32(gomaxprocs - 1))) 774 if id >= myID { 775 id++ 776 } 777 p := allp[id] 778 if p.status != _Prunning { 779 continue 780 } 781 if preemptone(p) { 782 return 783 } 784 } 785 } 786 787 // findRunnableGCWorker returns a background mark worker for pp if it 788 // should be run. This must only be called when gcBlackenEnabled != 0. 789 func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) { 790 if gcBlackenEnabled == 0 { 791 throw("gcControllerState.findRunnable: blackening not enabled") 792 } 793 794 // Since we have the current time, check if the GC CPU limiter 795 // hasn't had an update in a while. This check is necessary in 796 // case the limiter is on but hasn't been checked in a while and 797 // so may have left sufficient headroom to turn off again. 798 if now == 0 { 799 now = nanotime() 800 } 801 if gcCPULimiter.needUpdate(now) { 802 gcCPULimiter.update(now) 803 } 804 805 if !gcMarkWorkAvailable(pp) { 806 // No work to be done right now. This can happen at 807 // the end of the mark phase when there are still 808 // assists tapering off. Don't bother running a worker 809 // now because it'll just return immediately. 810 return nil, now 811 } 812 813 // Grab a worker before we commit to running below. 814 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) 815 if node == nil { 816 // There is at least one worker per P, so normally there are 817 // enough workers to run on all Ps, if necessary. However, once 818 // a worker enters gcMarkDone it may park without rejoining the 819 // pool, thus freeing a P with no corresponding worker. 820 // gcMarkDone never depends on another worker doing work, so it 821 // is safe to simply do nothing here. 822 // 823 // If gcMarkDone bails out without completing the mark phase, 824 // it will always do so with queued global work. Thus, that P 825 // will be immediately eligible to re-run the worker G it was 826 // just using, ensuring work can complete. 827 return nil, now 828 } 829 830 decIfPositive := func(val *atomic.Int64) bool { 831 for { 832 v := val.Load() 833 if v <= 0 { 834 return false 835 } 836 837 if val.CompareAndSwap(v, v-1) { 838 return true 839 } 840 } 841 } 842 843 if decIfPositive(&c.dedicatedMarkWorkersNeeded) { 844 // This P is now dedicated to marking until the end of 845 // the concurrent mark phase. 846 pp.gcMarkWorkerMode = gcMarkWorkerDedicatedMode 847 } else if c.fractionalUtilizationGoal == 0 { 848 // No need for fractional workers. 849 gcBgMarkWorkerPool.push(&node.node) 850 return nil, now 851 } else { 852 // Is this P behind on the fractional utilization 853 // goal? 854 // 855 // This should be kept in sync with pollFractionalWorkerExit. 856 delta := now - c.markStartTime 857 if delta > 0 && float64(pp.gcFractionalMarkTime)/float64(delta) > c.fractionalUtilizationGoal { 858 // Nope. No need to run a fractional worker. 859 gcBgMarkWorkerPool.push(&node.node) 860 return nil, now 861 } 862 // Run a fractional worker. 863 pp.gcMarkWorkerMode = gcMarkWorkerFractionalMode 864 } 865 866 // Run the background mark worker. 867 gp := node.gp.ptr() 868 casgstatus(gp, _Gwaiting, _Grunnable) 869 if trace.enabled { 870 traceGoUnpark(gp, 0) 871 } 872 return gp, now 873 } 874 875 // resetLive sets up the controller state for the next mark phase after the end 876 // of the previous one. Must be called after endCycle and before commit, before 877 // the world is started. 878 // 879 // The world must be stopped. 880 func (c *gcControllerState) resetLive(bytesMarked uint64) { 881 c.heapMarked = bytesMarked 882 c.heapLive.Store(bytesMarked) 883 c.heapScan.Store(uint64(c.heapScanWork.Load())) 884 c.lastHeapScan = uint64(c.heapScanWork.Load()) 885 c.lastStackScan.Store(uint64(c.stackScanWork.Load())) 886 c.triggered = ^uint64(0) // Reset triggered. 887 888 // heapLive was updated, so emit a trace event. 889 if trace.enabled { 890 traceHeapAlloc(bytesMarked) 891 } 892 } 893 894 // markWorkerStop must be called whenever a mark worker stops executing. 895 // 896 // It updates mark work accounting in the controller by a duration of 897 // work in nanoseconds and other bookkeeping. 898 // 899 // Safe to execute at any time. 900 func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64) { 901 switch mode { 902 case gcMarkWorkerDedicatedMode: 903 c.dedicatedMarkTime.Add(duration) 904 c.dedicatedMarkWorkersNeeded.Add(1) 905 case gcMarkWorkerFractionalMode: 906 c.fractionalMarkTime.Add(duration) 907 case gcMarkWorkerIdleMode: 908 c.idleMarkTime.Add(duration) 909 c.removeIdleMarkWorker() 910 default: 911 throw("markWorkerStop: unknown mark worker mode") 912 } 913 } 914 915 func (c *gcControllerState) update(dHeapLive, dHeapScan int64) { 916 if dHeapLive != 0 { 917 live := gcController.heapLive.Add(dHeapLive) 918 if trace.enabled { 919 // gcController.heapLive changed. 920 traceHeapAlloc(live) 921 } 922 } 923 if gcBlackenEnabled == 0 { 924 // Update heapScan when we're not in a current GC. It is fixed 925 // at the beginning of a cycle. 926 if dHeapScan != 0 { 927 gcController.heapScan.Add(dHeapScan) 928 } 929 } else { 930 // gcController.heapLive changed. 931 c.revise() 932 } 933 } 934 935 func (c *gcControllerState) addScannableStack(pp *p, amount int64) { 936 if pp == nil { 937 c.maxStackScan.Add(amount) 938 return 939 } 940 pp.maxStackScanDelta += amount 941 if pp.maxStackScanDelta >= maxStackScanSlack || pp.maxStackScanDelta <= -maxStackScanSlack { 942 c.maxStackScan.Add(pp.maxStackScanDelta) 943 pp.maxStackScanDelta = 0 944 } 945 } 946 947 func (c *gcControllerState) addGlobals(amount int64) { 948 c.globalsScan.Add(amount) 949 } 950 951 // heapGoal returns the current heap goal. 952 func (c *gcControllerState) heapGoal() uint64 { 953 goal, _ := c.heapGoalInternal() 954 return goal 955 } 956 957 // heapGoalInternal is the implementation of heapGoal which returns additional 958 // information that is necessary for computing the trigger. 959 // 960 // The returned minTrigger is always <= goal. 961 func (c *gcControllerState) heapGoalInternal() (goal, minTrigger uint64) { 962 // Start with the goal calculated for gcPercent. 963 goal = c.gcPercentHeapGoal.Load() 964 965 // Check if the memory-limit-based goal is smaller, and if so, pick that. 966 if newGoal := c.memoryLimitHeapGoal(); go119MemoryLimitSupport && newGoal < goal { 967 goal = newGoal 968 } else { 969 // We're not limited by the memory limit goal, so perform a series of 970 // adjustments that might move the goal forward in a variety of circumstances. 971 972 sweepDistTrigger := c.sweepDistMinTrigger.Load() 973 if sweepDistTrigger > goal { 974 // Set the goal to maintain a minimum sweep distance since 975 // the last call to commit. Note that we never want to do this 976 // if we're in the memory limit regime, because it could push 977 // the goal up. 978 goal = sweepDistTrigger 979 } 980 // Since we ignore the sweep distance trigger in the memory 981 // limit regime, we need to ensure we don't propagate it to 982 // the trigger, because it could cause a violation of the 983 // invariant that the trigger < goal. 984 minTrigger = sweepDistTrigger 985 986 // Ensure that the heap goal is at least a little larger than 987 // the point at which we triggered. This may not be the case if GC 988 // start is delayed or if the allocation that pushed gcController.heapLive 989 // over trigger is large or if the trigger is really close to 990 // GOGC. Assist is proportional to this distance, so enforce a 991 // minimum distance, even if it means going over the GOGC goal 992 // by a tiny bit. 993 // 994 // Ignore this if we're in the memory limit regime: we'd prefer to 995 // have the GC respond hard about how close we are to the goal than to 996 // push the goal back in such a manner that it could cause us to exceed 997 // the memory limit. 998 const minRunway = 64 << 10 999 if c.triggered != ^uint64(0) && goal < c.triggered+minRunway { 1000 goal = c.triggered + minRunway 1001 } 1002 } 1003 return 1004 } 1005 1006 // memoryLimitHeapGoal returns a heap goal derived from memoryLimit. 1007 func (c *gcControllerState) memoryLimitHeapGoal() uint64 { 1008 // Start by pulling out some values we'll need. Be careful about overflow. 1009 var heapFree, heapAlloc, mappedReady uint64 1010 for { 1011 heapFree = c.heapFree.load() // Free and unscavenged memory. 1012 heapAlloc = c.totalAlloc.Load() - c.totalFree.Load() // Heap object bytes in use. 1013 mappedReady = c.mappedReady.Load() // Total unreleased mapped memory. 1014 if heapFree+heapAlloc <= mappedReady { 1015 break 1016 } 1017 // It is impossible for total unreleased mapped memory to exceed heap memory, but 1018 // because these stats are updated independently, we may observe a partial update 1019 // including only some values. Thus, we appear to break the invariant. However, 1020 // this condition is necessarily transient, so just try again. In the case of a 1021 // persistent accounting error, we'll deadlock here. 1022 } 1023 1024 // Below we compute a goal from memoryLimit. There are a few things to be aware of. 1025 // Firstly, the memoryLimit does not easily compare to the heap goal: the former 1026 // is total mapped memory by the runtime that hasn't been released, while the latter is 1027 // only heap object memory. Intuitively, the way we convert from one to the other is to 1028 // subtract everything from memoryLimit that both contributes to the memory limit (so, 1029 // ignore scavenged memory) and doesn't contain heap objects. This isn't quite what 1030 // lines up with reality, but it's a good starting point. 1031 // 1032 // In practice this computation looks like the following: 1033 // 1034 // memoryLimit - ((mappedReady - heapFree - heapAlloc) + max(mappedReady - memoryLimit, 0)) - memoryLimitHeapGoalHeadroom 1035 // ^1 ^2 ^3 1036 // 1037 // Let's break this down. 1038 // 1039 // The first term (marker 1) is everything that contributes to the memory limit and isn't 1040 // or couldn't become heap objects. It represents, broadly speaking, non-heap overheads. 1041 // One oddity you may have noticed is that we also subtract out heapFree, i.e. unscavenged 1042 // memory that may contain heap objects in the future. 1043 // 1044 // Let's take a step back. In an ideal world, this term would look something like just 1045 // the heap goal. That is, we "reserve" enough space for the heap to grow to the heap 1046 // goal, and subtract out everything else. This is of course impossible; the definition 1047 // is circular! However, this impossible definition contains a key insight: the amount 1048 // we're *going* to use matters just as much as whatever we're currently using. 1049 // 1050 // Consider if the heap shrinks to 1/10th its size, leaving behind lots of free and 1051 // unscavenged memory. mappedReady - heapAlloc will be quite large, because of that free 1052 // and unscavenged memory, pushing the goal down significantly. 1053 // 1054 // heapFree is also safe to exclude from the memory limit because in the steady-state, it's 1055 // just a pool of memory for future heap allocations, and making new allocations from heapFree 1056 // memory doesn't increase overall memory use. In transient states, the scavenger and the 1057 // allocator actively manage the pool of heapFree memory to maintain the memory limit. 1058 // 1059 // The second term (marker 2) is the amount of memory we've exceeded the limit by, and is 1060 // intended to help recover from such a situation. By pushing the heap goal down, we also 1061 // push the trigger down, triggering and finishing a GC sooner in order to make room for 1062 // other memory sources. Note that since we're effectively reducing the heap goal by X bytes, 1063 // we're actually giving more than X bytes of headroom back, because the heap goal is in 1064 // terms of heap objects, but it takes more than X bytes (e.g. due to fragmentation) to store 1065 // X bytes worth of objects. 1066 // 1067 // The third term (marker 3) subtracts an additional memoryLimitHeapGoalHeadroom bytes from the 1068 // heap goal. As the name implies, this is to provide additional headroom in the face of pacing 1069 // inaccuracies. This is a fixed number of bytes because these inaccuracies disproportionately 1070 // affect small heaps: as heaps get smaller, the pacer's inputs get fuzzier. Shorter GC cycles 1071 // and less GC work means noisy external factors like the OS scheduler have a greater impact. 1072 1073 memoryLimit := uint64(c.memoryLimit.Load()) 1074 1075 // Compute term 1. 1076 nonHeapMemory := mappedReady - heapFree - heapAlloc 1077 1078 // Compute term 2. 1079 var overage uint64 1080 if mappedReady > memoryLimit { 1081 overage = mappedReady - memoryLimit 1082 } 1083 1084 if nonHeapMemory+overage >= memoryLimit { 1085 // We're at a point where non-heap memory exceeds the memory limit on its own. 1086 // There's honestly not much we can do here but just trigger GCs continuously 1087 // and let the CPU limiter reign that in. Something has to give at this point. 1088 // Set it to heapMarked, the lowest possible goal. 1089 return c.heapMarked 1090 } 1091 1092 // Compute the goal. 1093 goal := memoryLimit - (nonHeapMemory + overage) 1094 1095 // Apply some headroom to the goal to account for pacing inaccuracies. 1096 // Be careful about small limits. 1097 if goal < memoryLimitHeapGoalHeadroom || goal-memoryLimitHeapGoalHeadroom < memoryLimitHeapGoalHeadroom { 1098 goal = memoryLimitHeapGoalHeadroom 1099 } else { 1100 goal = goal - memoryLimitHeapGoalHeadroom 1101 } 1102 // Don't let us go below the live heap. A heap goal below the live heap doesn't make sense. 1103 if goal < c.heapMarked { 1104 goal = c.heapMarked 1105 } 1106 return goal 1107 } 1108 1109 const ( 1110 // These constants determine the bounds on the GC trigger as a fraction 1111 // of heap bytes allocated between the start of a GC (heapLive == heapMarked) 1112 // and the end of a GC (heapLive == heapGoal). 1113 // 1114 // The constants are obscured in this way for efficiency. The denominator 1115 // of the fraction is always a power-of-two for a quick division, so that 1116 // the numerator is a single constant integer multiplication. 1117 triggerRatioDen = 64 1118 1119 // The minimum trigger constant was chosen empirically: given a sufficiently 1120 // fast/scalable allocator with 48 Ps that could drive the trigger ratio 1121 // to <0.05, this constant causes applications to retain the same peak 1122 // RSS compared to not having this allocator. 1123 minTriggerRatioNum = 45 // ~0.7 1124 1125 // The maximum trigger constant is chosen somewhat arbitrarily, but the 1126 // current constant has served us well over the years. 1127 maxTriggerRatioNum = 61 // ~0.95 1128 ) 1129 1130 // trigger returns the current point at which a GC should trigger along with 1131 // the heap goal. 1132 // 1133 // The returned value may be compared against heapLive to determine whether 1134 // the GC should trigger. Thus, the GC trigger condition should be (but may 1135 // not be, in the case of small movements for efficiency) checked whenever 1136 // the heap goal may change. 1137 func (c *gcControllerState) trigger() (uint64, uint64) { 1138 goal, minTrigger := c.heapGoalInternal() 1139 1140 // Invariant: the trigger must always be less than the heap goal. 1141 // 1142 // Note that the memory limit sets a hard maximum on our heap goal, 1143 // but the live heap may grow beyond it. 1144 1145 if c.heapMarked >= goal { 1146 // The goal should never be smaller than heapMarked, but let's be 1147 // defensive about it. The only reasonable trigger here is one that 1148 // causes a continuous GC cycle at heapMarked, but respect the goal 1149 // if it came out as smaller than that. 1150 return goal, goal 1151 } 1152 1153 // Below this point, c.heapMarked < goal. 1154 1155 // heapMarked is our absolute minimum, and it's possible the trigger 1156 // bound we get from heapGoalinternal is less than that. 1157 if minTrigger < c.heapMarked { 1158 minTrigger = c.heapMarked 1159 } 1160 1161 // If we let the trigger go too low, then if the application 1162 // is allocating very rapidly we might end up in a situation 1163 // where we're allocating black during a nearly always-on GC. 1164 // The result of this is a growing heap and ultimately an 1165 // increase in RSS. By capping us at a point >0, we're essentially 1166 // saying that we're OK using more CPU during the GC to prevent 1167 // this growth in RSS. 1168 triggerLowerBound := uint64(((goal-c.heapMarked)/triggerRatioDen)*minTriggerRatioNum) + c.heapMarked 1169 if minTrigger < triggerLowerBound { 1170 minTrigger = triggerLowerBound 1171 } 1172 1173 // For small heaps, set the max trigger point at maxTriggerRatio of the way 1174 // from the live heap to the heap goal. This ensures we always have *some* 1175 // headroom when the GC actually starts. For larger heaps, set the max trigger 1176 // point at the goal, minus the minimum heap size. 1177 // 1178 // This choice follows from the fact that the minimum heap size is chosen 1179 // to reflect the costs of a GC with no work to do. With a large heap but 1180 // very little scan work to perform, this gives us exactly as much runway 1181 // as we would need, in the worst case. 1182 maxTrigger := uint64(((goal-c.heapMarked)/triggerRatioDen)*maxTriggerRatioNum) + c.heapMarked 1183 if goal > defaultHeapMinimum && goal-defaultHeapMinimum > maxTrigger { 1184 maxTrigger = goal - defaultHeapMinimum 1185 } 1186 if maxTrigger < minTrigger { 1187 maxTrigger = minTrigger 1188 } 1189 1190 // Compute the trigger from our bounds and the runway stored by commit. 1191 var trigger uint64 1192 runway := c.runway.Load() 1193 if runway > goal { 1194 trigger = minTrigger 1195 } else { 1196 trigger = goal - runway 1197 } 1198 if trigger < minTrigger { 1199 trigger = minTrigger 1200 } 1201 if trigger > maxTrigger { 1202 trigger = maxTrigger 1203 } 1204 if trigger > goal { 1205 print("trigger=", trigger, " heapGoal=", goal, "\n") 1206 print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n") 1207 throw("produced a trigger greater than the heap goal") 1208 } 1209 return trigger, goal 1210 } 1211 1212 // commit recomputes all pacing parameters needed to derive the 1213 // trigger and the heap goal. Namely, the gcPercent-based heap goal, 1214 // and the amount of runway we want to give the GC this cycle. 1215 // 1216 // This can be called any time. If GC is the in the middle of a 1217 // concurrent phase, it will adjust the pacing of that phase. 1218 // 1219 // isSweepDone should be the result of calling isSweepDone(), 1220 // unless we're testing or we know we're executing during a GC cycle. 1221 // 1222 // This depends on gcPercent, gcController.heapMarked, and 1223 // gcController.heapLive. These must be up to date. 1224 // 1225 // Callers must call gcControllerState.revise after calling this 1226 // function if the GC is enabled. 1227 // 1228 // mheap_.lock must be held or the world must be stopped. 1229 func (c *gcControllerState) commit(isSweepDone bool) { 1230 if !c.test { 1231 assertWorldStoppedOrLockHeld(&mheap_.lock) 1232 } 1233 1234 if isSweepDone { 1235 // The sweep is done, so there aren't any restrictions on the trigger 1236 // we need to think about. 1237 c.sweepDistMinTrigger.Store(0) 1238 } else { 1239 // Concurrent sweep happens in the heap growth 1240 // from gcController.heapLive to trigger. Make sure we 1241 // give the sweeper some runway if it doesn't have enough. 1242 c.sweepDistMinTrigger.Store(c.heapLive.Load() + sweepMinHeapDistance) 1243 } 1244 1245 // Compute the next GC goal, which is when the allocated heap 1246 // has grown by GOGC/100 over where it started the last cycle, 1247 // plus additional runway for non-heap sources of GC work. 1248 gcPercentHeapGoal := ^uint64(0) 1249 if gcPercent := c.gcPercent.Load(); gcPercent >= 0 { 1250 gcPercentHeapGoal = c.heapMarked + (c.heapMarked+c.lastStackScan.Load()+c.globalsScan.Load())*uint64(gcPercent)/100 1251 } 1252 // Apply the minimum heap size here. It's defined in terms of gcPercent 1253 // and is only updated by functions that call commit. 1254 if gcPercentHeapGoal < c.heapMinimum { 1255 gcPercentHeapGoal = c.heapMinimum 1256 } 1257 c.gcPercentHeapGoal.Store(gcPercentHeapGoal) 1258 1259 // Compute the amount of runway we want the GC to have by using our 1260 // estimate of the cons/mark ratio. 1261 // 1262 // The idea is to take our expected scan work, and multiply it by 1263 // the cons/mark ratio to determine how long it'll take to complete 1264 // that scan work in terms of bytes allocated. This gives us our GC's 1265 // runway. 1266 // 1267 // However, the cons/mark ratio is a ratio of rates per CPU-second, but 1268 // here we care about the relative rates for some division of CPU 1269 // resources among the mutator and the GC. 1270 // 1271 // To summarize, we have B / cpu-ns, and we want B / ns. We get that 1272 // by multiplying by our desired division of CPU resources. We choose 1273 // to express CPU resources as GOMAPROCS*fraction. Note that because 1274 // we're working with a ratio here, we can omit the number of CPU cores, 1275 // because they'll appear in the numerator and denominator and cancel out. 1276 // As a result, this is basically just "weighing" the cons/mark ratio by 1277 // our desired division of resources. 1278 // 1279 // Furthermore, by setting the runway so that CPU resources are divided 1280 // this way, assuming that the cons/mark ratio is correct, we make that 1281 // division a reality. 1282 c.runway.Store(uint64((c.consMark * (1 - gcGoalUtilization) / (gcGoalUtilization)) * float64(c.lastHeapScan+c.lastStackScan.Load()+c.globalsScan.Load()))) 1283 } 1284 1285 // setGCPercent updates gcPercent. commit must be called after. 1286 // Returns the old value of gcPercent. 1287 // 1288 // The world must be stopped, or mheap_.lock must be held. 1289 func (c *gcControllerState) setGCPercent(in int32) int32 { 1290 if !c.test { 1291 assertWorldStoppedOrLockHeld(&mheap_.lock) 1292 } 1293 1294 out := c.gcPercent.Load() 1295 if in < 0 { 1296 in = -1 1297 } 1298 c.heapMinimum = defaultHeapMinimum * uint64(in) / 100 1299 c.gcPercent.Store(in) 1300 1301 return out 1302 } 1303 1304 //go:linkname setGCPercent runtime/debug.setGCPercent 1305 func setGCPercent(in int32) (out int32) { 1306 // Run on the system stack since we grab the heap lock. 1307 systemstack(func() { 1308 lock(&mheap_.lock) 1309 out = gcController.setGCPercent(in) 1310 gcControllerCommit() 1311 unlock(&mheap_.lock) 1312 }) 1313 1314 // If we just disabled GC, wait for any concurrent GC mark to 1315 // finish so we always return with no GC running. 1316 if in < 0 { 1317 gcWaitOnMark(work.cycles.Load()) 1318 } 1319 1320 return out 1321 } 1322 1323 func readGOGC() int32 { 1324 p := gogetenv("GOGC") 1325 if p == "off" { 1326 return -1 1327 } 1328 if n, ok := atoi32(p); ok { 1329 return n 1330 } 1331 return 100 1332 } 1333 1334 // setMemoryLimit updates memoryLimit. commit must be called after 1335 // Returns the old value of memoryLimit. 1336 // 1337 // The world must be stopped, or mheap_.lock must be held. 1338 func (c *gcControllerState) setMemoryLimit(in int64) int64 { 1339 if !c.test { 1340 assertWorldStoppedOrLockHeld(&mheap_.lock) 1341 } 1342 1343 out := c.memoryLimit.Load() 1344 if in >= 0 { 1345 c.memoryLimit.Store(in) 1346 } 1347 1348 return out 1349 } 1350 1351 //go:linkname setMemoryLimit runtime/debug.setMemoryLimit 1352 func setMemoryLimit(in int64) (out int64) { 1353 // Run on the system stack since we grab the heap lock. 1354 systemstack(func() { 1355 lock(&mheap_.lock) 1356 out = gcController.setMemoryLimit(in) 1357 if in < 0 || out == in { 1358 // If we're just checking the value or not changing 1359 // it, there's no point in doing the rest. 1360 unlock(&mheap_.lock) 1361 return 1362 } 1363 gcControllerCommit() 1364 unlock(&mheap_.lock) 1365 }) 1366 return out 1367 } 1368 1369 func readGOMEMLIMIT() int64 { 1370 p := gogetenv("GOMEMLIMIT") 1371 if p == "" || p == "off" { 1372 return maxInt64 1373 } 1374 n, ok := parseByteCount(p) 1375 if !ok { 1376 print("GOMEMLIMIT=", p, "\n") 1377 throw("malformed GOMEMLIMIT; see `go doc runtime/debug.SetMemoryLimit`") 1378 } 1379 return n 1380 } 1381 1382 type piController struct { 1383 kp float64 // Proportional constant. 1384 ti float64 // Integral time constant. 1385 tt float64 // Reset time. 1386 1387 min, max float64 // Output boundaries. 1388 1389 // PI controller state. 1390 1391 errIntegral float64 // Integral of the error from t=0 to now. 1392 1393 // Error flags. 1394 errOverflow bool // Set if errIntegral ever overflowed. 1395 inputOverflow bool // Set if an operation with the input overflowed. 1396 } 1397 1398 // next provides a new sample to the controller. 1399 // 1400 // input is the sample, setpoint is the desired point, and period is how much 1401 // time (in whatever unit makes the most sense) has passed since the last sample. 1402 // 1403 // Returns a new value for the variable it's controlling, and whether the operation 1404 // completed successfully. One reason this might fail is if error has been growing 1405 // in an unbounded manner, to the point of overflow. 1406 // 1407 // In the specific case of an error overflow occurs, the errOverflow field will be 1408 // set and the rest of the controller's internal state will be fully reset. 1409 func (c *piController) next(input, setpoint, period float64) (float64, bool) { 1410 // Compute the raw output value. 1411 prop := c.kp * (setpoint - input) 1412 rawOutput := prop + c.errIntegral 1413 1414 // Clamp rawOutput into output. 1415 output := rawOutput 1416 if isInf(output) || isNaN(output) { 1417 // The input had a large enough magnitude that either it was already 1418 // overflowed, or some operation with it overflowed. 1419 // Set a flag and reset. That's the safest thing to do. 1420 c.reset() 1421 c.inputOverflow = true 1422 return c.min, false 1423 } 1424 if output < c.min { 1425 output = c.min 1426 } else if output > c.max { 1427 output = c.max 1428 } 1429 1430 // Update the controller's state. 1431 if c.ti != 0 && c.tt != 0 { 1432 c.errIntegral += (c.kp*period/c.ti)*(setpoint-input) + (period/c.tt)*(output-rawOutput) 1433 if isInf(c.errIntegral) || isNaN(c.errIntegral) { 1434 // So much error has accumulated that we managed to overflow. 1435 // The assumptions around the controller have likely broken down. 1436 // Set a flag and reset. That's the safest thing to do. 1437 c.reset() 1438 c.errOverflow = true 1439 return c.min, false 1440 } 1441 } 1442 return output, true 1443 } 1444 1445 // reset resets the controller state, except for controller error flags. 1446 func (c *piController) reset() { 1447 c.errIntegral = 0 1448 } 1449 1450 // addIdleMarkWorker attempts to add a new idle mark worker. 1451 // 1452 // If this returns true, the caller must become an idle mark worker unless 1453 // there's no background mark worker goroutines in the pool. This case is 1454 // harmless because there are already background mark workers running. 1455 // If this returns false, the caller must NOT become an idle mark worker. 1456 // 1457 // nosplit because it may be called without a P. 1458 // 1459 //go:nosplit 1460 func (c *gcControllerState) addIdleMarkWorker() bool { 1461 for { 1462 old := c.idleMarkWorkers.Load() 1463 n, max := int32(old&uint64(^uint32(0))), int32(old>>32) 1464 if n >= max { 1465 // See the comment on idleMarkWorkers for why 1466 // n > max is tolerated. 1467 return false 1468 } 1469 if n < 0 { 1470 print("n=", n, " max=", max, "\n") 1471 throw("negative idle mark workers") 1472 } 1473 new := uint64(uint32(n+1)) | (uint64(max) << 32) 1474 if c.idleMarkWorkers.CompareAndSwap(old, new) { 1475 return true 1476 } 1477 } 1478 } 1479 1480 // needIdleMarkWorker is a hint as to whether another idle mark worker is needed. 1481 // 1482 // The caller must still call addIdleMarkWorker to become one. This is mainly 1483 // useful for a quick check before an expensive operation. 1484 // 1485 // nosplit because it may be called without a P. 1486 // 1487 //go:nosplit 1488 func (c *gcControllerState) needIdleMarkWorker() bool { 1489 p := c.idleMarkWorkers.Load() 1490 n, max := int32(p&uint64(^uint32(0))), int32(p>>32) 1491 return n < max 1492 } 1493 1494 // removeIdleMarkWorker must be called when an new idle mark worker stops executing. 1495 func (c *gcControllerState) removeIdleMarkWorker() { 1496 for { 1497 old := c.idleMarkWorkers.Load() 1498 n, max := int32(old&uint64(^uint32(0))), int32(old>>32) 1499 if n-1 < 0 { 1500 print("n=", n, " max=", max, "\n") 1501 throw("negative idle mark workers") 1502 } 1503 new := uint64(uint32(n-1)) | (uint64(max) << 32) 1504 if c.idleMarkWorkers.CompareAndSwap(old, new) { 1505 return 1506 } 1507 } 1508 } 1509 1510 // setMaxIdleMarkWorkers sets the maximum number of idle mark workers allowed. 1511 // 1512 // This method is optimistic in that it does not wait for the number of 1513 // idle mark workers to reduce to max before returning; it assumes the workers 1514 // will deschedule themselves. 1515 func (c *gcControllerState) setMaxIdleMarkWorkers(max int32) { 1516 for { 1517 old := c.idleMarkWorkers.Load() 1518 n := int32(old & uint64(^uint32(0))) 1519 if n < 0 { 1520 print("n=", n, " max=", max, "\n") 1521 throw("negative idle mark workers") 1522 } 1523 new := uint64(uint32(n)) | (uint64(max) << 32) 1524 if c.idleMarkWorkers.CompareAndSwap(old, new) { 1525 return 1526 } 1527 } 1528 } 1529 1530 // gcControllerCommit is gcController.commit, but passes arguments from live 1531 // (non-test) data. It also updates any consumers of the GC pacing, such as 1532 // sweep pacing and the background scavenger. 1533 // 1534 // Calls gcController.commit. 1535 // 1536 // The heap lock must be held, so this must be executed on the system stack. 1537 // 1538 //go:systemstack 1539 func gcControllerCommit() { 1540 assertWorldStoppedOrLockHeld(&mheap_.lock) 1541 1542 gcController.commit(isSweepDone()) 1543 1544 // Update mark pacing. 1545 if gcphase != _GCoff { 1546 gcController.revise() 1547 } 1548 1549 // TODO(mknyszek): This isn't really accurate any longer because the heap 1550 // goal is computed dynamically. Still useful to snapshot, but not as useful. 1551 if trace.enabled { 1552 traceHeapGoal() 1553 } 1554 1555 trigger, heapGoal := gcController.trigger() 1556 gcPaceSweeper(trigger) 1557 gcPaceScavenger(gcController.memoryLimit.Load(), heapGoal, gcController.lastHeapGoal) 1558 }