github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/metrics.go (about) 1 // Copyright 2020 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // Metrics implementation exported to runtime/metrics. 8 9 import ( 10 "internal/godebugs" 11 "unsafe" 12 ) 13 14 var ( 15 // metrics is a map of runtime/metrics keys to data used by the runtime 16 // to sample each metric's value. metricsInit indicates it has been 17 // initialized. 18 // 19 // These fields are protected by metricsSema which should be 20 // locked/unlocked with metricsLock() / metricsUnlock(). 21 metricsSema uint32 = 1 22 metricsInit bool 23 metrics map[string]metricData 24 25 sizeClassBuckets []float64 26 timeHistBuckets []float64 27 ) 28 29 type metricData struct { 30 // deps is the set of runtime statistics that this metric 31 // depends on. Before compute is called, the statAggregate 32 // which will be passed must ensure() these dependencies. 33 deps statDepSet 34 35 // compute is a function that populates a metricValue 36 // given a populated statAggregate structure. 37 compute func(in *statAggregate, out *metricValue) 38 } 39 40 func metricsLock() { 41 // Acquire the metricsSema but with handoff. Operations are typically 42 // expensive enough that queueing up goroutines and handing off between 43 // them will be noticeably better-behaved. 44 semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire) 45 if raceenabled { 46 raceacquire(unsafe.Pointer(&metricsSema)) 47 } 48 } 49 50 func metricsUnlock() { 51 if raceenabled { 52 racerelease(unsafe.Pointer(&metricsSema)) 53 } 54 semrelease(&metricsSema) 55 } 56 57 // initMetrics initializes the metrics map if it hasn't been yet. 58 // 59 // metricsSema must be held. 60 func initMetrics() { 61 if metricsInit { 62 return 63 } 64 65 sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1) 66 // Skip size class 0 which is a stand-in for large objects, but large 67 // objects are tracked separately (and they actually get placed in 68 // the last bucket, not the first). 69 sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size. 70 for i := 1; i < _NumSizeClasses; i++ { 71 // Size classes have an inclusive upper-bound 72 // and exclusive lower bound (e.g. 48-byte size class is 73 // (32, 48]) whereas we want and inclusive lower-bound 74 // and exclusive upper-bound (e.g. 48-byte size class is 75 // [33, 49). We can achieve this by shifting all bucket 76 // boundaries up by 1. 77 // 78 // Also, a float64 can precisely represent integers with 79 // value up to 2^53 and size classes are relatively small 80 // (nowhere near 2^48 even) so this will give us exact 81 // boundaries. 82 sizeClassBuckets[i] = float64(class_to_size[i] + 1) 83 } 84 sizeClassBuckets = append(sizeClassBuckets, float64Inf()) 85 86 timeHistBuckets = timeHistogramMetricsBuckets() 87 metrics = map[string]metricData{ 88 "/cgo/go-to-c-calls:calls": { 89 compute: func(_ *statAggregate, out *metricValue) { 90 out.kind = metricKindUint64 91 out.scalar = uint64(NumCgoCall()) 92 }, 93 }, 94 "/cpu/classes/gc/mark/assist:cpu-seconds": { 95 deps: makeStatDepSet(cpuStatsDep), 96 compute: func(in *statAggregate, out *metricValue) { 97 out.kind = metricKindFloat64 98 out.scalar = float64bits(nsToSec(in.cpuStats.gcAssistTime)) 99 }, 100 }, 101 "/cpu/classes/gc/mark/dedicated:cpu-seconds": { 102 deps: makeStatDepSet(cpuStatsDep), 103 compute: func(in *statAggregate, out *metricValue) { 104 out.kind = metricKindFloat64 105 out.scalar = float64bits(nsToSec(in.cpuStats.gcDedicatedTime)) 106 }, 107 }, 108 "/cpu/classes/gc/mark/idle:cpu-seconds": { 109 deps: makeStatDepSet(cpuStatsDep), 110 compute: func(in *statAggregate, out *metricValue) { 111 out.kind = metricKindFloat64 112 out.scalar = float64bits(nsToSec(in.cpuStats.gcIdleTime)) 113 }, 114 }, 115 "/cpu/classes/gc/pause:cpu-seconds": { 116 deps: makeStatDepSet(cpuStatsDep), 117 compute: func(in *statAggregate, out *metricValue) { 118 out.kind = metricKindFloat64 119 out.scalar = float64bits(nsToSec(in.cpuStats.gcPauseTime)) 120 }, 121 }, 122 "/cpu/classes/gc/total:cpu-seconds": { 123 deps: makeStatDepSet(cpuStatsDep), 124 compute: func(in *statAggregate, out *metricValue) { 125 out.kind = metricKindFloat64 126 out.scalar = float64bits(nsToSec(in.cpuStats.gcTotalTime)) 127 }, 128 }, 129 "/cpu/classes/idle:cpu-seconds": { 130 deps: makeStatDepSet(cpuStatsDep), 131 compute: func(in *statAggregate, out *metricValue) { 132 out.kind = metricKindFloat64 133 out.scalar = float64bits(nsToSec(in.cpuStats.idleTime)) 134 }, 135 }, 136 "/cpu/classes/scavenge/assist:cpu-seconds": { 137 deps: makeStatDepSet(cpuStatsDep), 138 compute: func(in *statAggregate, out *metricValue) { 139 out.kind = metricKindFloat64 140 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeAssistTime)) 141 }, 142 }, 143 "/cpu/classes/scavenge/background:cpu-seconds": { 144 deps: makeStatDepSet(cpuStatsDep), 145 compute: func(in *statAggregate, out *metricValue) { 146 out.kind = metricKindFloat64 147 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeBgTime)) 148 }, 149 }, 150 "/cpu/classes/scavenge/total:cpu-seconds": { 151 deps: makeStatDepSet(cpuStatsDep), 152 compute: func(in *statAggregate, out *metricValue) { 153 out.kind = metricKindFloat64 154 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeTotalTime)) 155 }, 156 }, 157 "/cpu/classes/total:cpu-seconds": { 158 deps: makeStatDepSet(cpuStatsDep), 159 compute: func(in *statAggregate, out *metricValue) { 160 out.kind = metricKindFloat64 161 out.scalar = float64bits(nsToSec(in.cpuStats.totalTime)) 162 }, 163 }, 164 "/cpu/classes/user:cpu-seconds": { 165 deps: makeStatDepSet(cpuStatsDep), 166 compute: func(in *statAggregate, out *metricValue) { 167 out.kind = metricKindFloat64 168 out.scalar = float64bits(nsToSec(in.cpuStats.userTime)) 169 }, 170 }, 171 "/gc/cycles/automatic:gc-cycles": { 172 deps: makeStatDepSet(sysStatsDep), 173 compute: func(in *statAggregate, out *metricValue) { 174 out.kind = metricKindUint64 175 out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced 176 }, 177 }, 178 "/gc/cycles/forced:gc-cycles": { 179 deps: makeStatDepSet(sysStatsDep), 180 compute: func(in *statAggregate, out *metricValue) { 181 out.kind = metricKindUint64 182 out.scalar = in.sysStats.gcCyclesForced 183 }, 184 }, 185 "/gc/cycles/total:gc-cycles": { 186 deps: makeStatDepSet(sysStatsDep), 187 compute: func(in *statAggregate, out *metricValue) { 188 out.kind = metricKindUint64 189 out.scalar = in.sysStats.gcCyclesDone 190 }, 191 }, 192 "/gc/heap/allocs-by-size:bytes": { 193 deps: makeStatDepSet(heapStatsDep), 194 compute: func(in *statAggregate, out *metricValue) { 195 hist := out.float64HistOrInit(sizeClassBuckets) 196 hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount) 197 // Cut off the first index which is ostensibly for size class 0, 198 // but large objects are tracked separately so it's actually unused. 199 for i, count := range in.heapStats.smallAllocCount[1:] { 200 hist.counts[i] = uint64(count) 201 } 202 }, 203 }, 204 "/gc/heap/allocs:bytes": { 205 deps: makeStatDepSet(heapStatsDep), 206 compute: func(in *statAggregate, out *metricValue) { 207 out.kind = metricKindUint64 208 out.scalar = in.heapStats.totalAllocated 209 }, 210 }, 211 "/gc/heap/allocs:objects": { 212 deps: makeStatDepSet(heapStatsDep), 213 compute: func(in *statAggregate, out *metricValue) { 214 out.kind = metricKindUint64 215 out.scalar = in.heapStats.totalAllocs 216 }, 217 }, 218 "/gc/heap/frees-by-size:bytes": { 219 deps: makeStatDepSet(heapStatsDep), 220 compute: func(in *statAggregate, out *metricValue) { 221 hist := out.float64HistOrInit(sizeClassBuckets) 222 hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount) 223 // Cut off the first index which is ostensibly for size class 0, 224 // but large objects are tracked separately so it's actually unused. 225 for i, count := range in.heapStats.smallFreeCount[1:] { 226 hist.counts[i] = uint64(count) 227 } 228 }, 229 }, 230 "/gc/heap/frees:bytes": { 231 deps: makeStatDepSet(heapStatsDep), 232 compute: func(in *statAggregate, out *metricValue) { 233 out.kind = metricKindUint64 234 out.scalar = in.heapStats.totalFreed 235 }, 236 }, 237 "/gc/heap/frees:objects": { 238 deps: makeStatDepSet(heapStatsDep), 239 compute: func(in *statAggregate, out *metricValue) { 240 out.kind = metricKindUint64 241 out.scalar = in.heapStats.totalFrees 242 }, 243 }, 244 "/gc/heap/goal:bytes": { 245 deps: makeStatDepSet(sysStatsDep), 246 compute: func(in *statAggregate, out *metricValue) { 247 out.kind = metricKindUint64 248 out.scalar = in.sysStats.heapGoal 249 }, 250 }, 251 "/gc/heap/objects:objects": { 252 deps: makeStatDepSet(heapStatsDep), 253 compute: func(in *statAggregate, out *metricValue) { 254 out.kind = metricKindUint64 255 out.scalar = in.heapStats.numObjects 256 }, 257 }, 258 "/gc/heap/tiny/allocs:objects": { 259 deps: makeStatDepSet(heapStatsDep), 260 compute: func(in *statAggregate, out *metricValue) { 261 out.kind = metricKindUint64 262 out.scalar = uint64(in.heapStats.tinyAllocCount) 263 }, 264 }, 265 "/gc/limiter/last-enabled:gc-cycle": { 266 compute: func(_ *statAggregate, out *metricValue) { 267 out.kind = metricKindUint64 268 out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load()) 269 }, 270 }, 271 "/gc/pauses:seconds": { 272 compute: func(_ *statAggregate, out *metricValue) { 273 hist := out.float64HistOrInit(timeHistBuckets) 274 // The bottom-most bucket, containing negative values, is tracked 275 // as a separately as underflow, so fill that in manually and then 276 // iterate over the rest. 277 hist.counts[0] = memstats.gcPauseDist.underflow.Load() 278 for i := range memstats.gcPauseDist.counts { 279 hist.counts[i+1] = memstats.gcPauseDist.counts[i].Load() 280 } 281 hist.counts[len(hist.counts)-1] = memstats.gcPauseDist.overflow.Load() 282 }, 283 }, 284 "/gc/stack/starting-size:bytes": { 285 compute: func(in *statAggregate, out *metricValue) { 286 out.kind = metricKindUint64 287 out.scalar = uint64(startingStackSize) 288 }, 289 }, 290 "/memory/classes/heap/free:bytes": { 291 deps: makeStatDepSet(heapStatsDep), 292 compute: func(in *statAggregate, out *metricValue) { 293 out.kind = metricKindUint64 294 out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap - 295 in.heapStats.inStacks - in.heapStats.inWorkBufs - 296 in.heapStats.inPtrScalarBits) 297 }, 298 }, 299 "/memory/classes/heap/objects:bytes": { 300 deps: makeStatDepSet(heapStatsDep), 301 compute: func(in *statAggregate, out *metricValue) { 302 out.kind = metricKindUint64 303 out.scalar = in.heapStats.inObjects 304 }, 305 }, 306 "/memory/classes/heap/released:bytes": { 307 deps: makeStatDepSet(heapStatsDep), 308 compute: func(in *statAggregate, out *metricValue) { 309 out.kind = metricKindUint64 310 out.scalar = uint64(in.heapStats.released) 311 }, 312 }, 313 "/memory/classes/heap/stacks:bytes": { 314 deps: makeStatDepSet(heapStatsDep), 315 compute: func(in *statAggregate, out *metricValue) { 316 out.kind = metricKindUint64 317 out.scalar = uint64(in.heapStats.inStacks) 318 }, 319 }, 320 "/memory/classes/heap/unused:bytes": { 321 deps: makeStatDepSet(heapStatsDep), 322 compute: func(in *statAggregate, out *metricValue) { 323 out.kind = metricKindUint64 324 out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects 325 }, 326 }, 327 "/memory/classes/metadata/mcache/free:bytes": { 328 deps: makeStatDepSet(sysStatsDep), 329 compute: func(in *statAggregate, out *metricValue) { 330 out.kind = metricKindUint64 331 out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse 332 }, 333 }, 334 "/memory/classes/metadata/mcache/inuse:bytes": { 335 deps: makeStatDepSet(sysStatsDep), 336 compute: func(in *statAggregate, out *metricValue) { 337 out.kind = metricKindUint64 338 out.scalar = in.sysStats.mCacheInUse 339 }, 340 }, 341 "/memory/classes/metadata/mspan/free:bytes": { 342 deps: makeStatDepSet(sysStatsDep), 343 compute: func(in *statAggregate, out *metricValue) { 344 out.kind = metricKindUint64 345 out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse 346 }, 347 }, 348 "/memory/classes/metadata/mspan/inuse:bytes": { 349 deps: makeStatDepSet(sysStatsDep), 350 compute: func(in *statAggregate, out *metricValue) { 351 out.kind = metricKindUint64 352 out.scalar = in.sysStats.mSpanInUse 353 }, 354 }, 355 "/memory/classes/metadata/other:bytes": { 356 deps: makeStatDepSet(heapStatsDep, sysStatsDep), 357 compute: func(in *statAggregate, out *metricValue) { 358 out.kind = metricKindUint64 359 out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys 360 }, 361 }, 362 "/memory/classes/os-stacks:bytes": { 363 deps: makeStatDepSet(sysStatsDep), 364 compute: func(in *statAggregate, out *metricValue) { 365 out.kind = metricKindUint64 366 out.scalar = in.sysStats.stacksSys 367 }, 368 }, 369 "/memory/classes/other:bytes": { 370 deps: makeStatDepSet(sysStatsDep), 371 compute: func(in *statAggregate, out *metricValue) { 372 out.kind = metricKindUint64 373 out.scalar = in.sysStats.otherSys 374 }, 375 }, 376 "/memory/classes/profiling/buckets:bytes": { 377 deps: makeStatDepSet(sysStatsDep), 378 compute: func(in *statAggregate, out *metricValue) { 379 out.kind = metricKindUint64 380 out.scalar = in.sysStats.buckHashSys 381 }, 382 }, 383 "/memory/classes/total:bytes": { 384 deps: makeStatDepSet(heapStatsDep, sysStatsDep), 385 compute: func(in *statAggregate, out *metricValue) { 386 out.kind = metricKindUint64 387 out.scalar = uint64(in.heapStats.committed+in.heapStats.released) + 388 in.sysStats.stacksSys + in.sysStats.mSpanSys + 389 in.sysStats.mCacheSys + in.sysStats.buckHashSys + 390 in.sysStats.gcMiscSys + in.sysStats.otherSys 391 }, 392 }, 393 "/sched/gomaxprocs:threads": { 394 compute: func(_ *statAggregate, out *metricValue) { 395 out.kind = metricKindUint64 396 out.scalar = uint64(gomaxprocs) 397 }, 398 }, 399 "/sched/goroutines:goroutines": { 400 compute: func(_ *statAggregate, out *metricValue) { 401 out.kind = metricKindUint64 402 out.scalar = uint64(gcount()) 403 }, 404 }, 405 "/sched/latencies:seconds": { 406 compute: func(_ *statAggregate, out *metricValue) { 407 hist := out.float64HistOrInit(timeHistBuckets) 408 hist.counts[0] = sched.timeToRun.underflow.Load() 409 for i := range sched.timeToRun.counts { 410 hist.counts[i+1] = sched.timeToRun.counts[i].Load() 411 } 412 hist.counts[len(hist.counts)-1] = sched.timeToRun.overflow.Load() 413 }, 414 }, 415 "/sync/mutex/wait/total:seconds": { 416 compute: func(_ *statAggregate, out *metricValue) { 417 out.kind = metricKindFloat64 418 out.scalar = float64bits(nsToSec(sched.totalMutexWaitTime.Load())) 419 }, 420 }, 421 } 422 423 for _, info := range godebugs.All { 424 if !info.Opaque { 425 metrics["/godebug/non-default-behavior/"+info.Name+":events"] = metricData{compute: compute0} 426 } 427 } 428 429 metricsInit = true 430 } 431 432 func compute0(_ *statAggregate, out *metricValue) { 433 out.kind = metricKindUint64 434 out.scalar = 0 435 } 436 437 type metricReader func() uint64 438 439 func (f metricReader) compute(_ *statAggregate, out *metricValue) { 440 out.kind = metricKindUint64 441 out.scalar = f() 442 } 443 444 //go:linkname godebug_registerMetric internal/godebug.registerMetric 445 func godebug_registerMetric(name string, read func() uint64) { 446 metricsLock() 447 initMetrics() 448 d, ok := metrics[name] 449 if !ok { 450 throw("runtime: unexpected metric registration for " + name) 451 } 452 d.compute = metricReader(read).compute 453 metrics[name] = d 454 metricsUnlock() 455 } 456 457 // statDep is a dependency on a group of statistics 458 // that a metric might have. 459 type statDep uint 460 461 const ( 462 heapStatsDep statDep = iota // corresponds to heapStatsAggregate 463 sysStatsDep // corresponds to sysStatsAggregate 464 cpuStatsDep // corresponds to cpuStatsAggregate 465 numStatsDeps 466 ) 467 468 // statDepSet represents a set of statDeps. 469 // 470 // Under the hood, it's a bitmap. 471 type statDepSet [1]uint64 472 473 // makeStatDepSet creates a new statDepSet from a list of statDeps. 474 func makeStatDepSet(deps ...statDep) statDepSet { 475 var s statDepSet 476 for _, d := range deps { 477 s[d/64] |= 1 << (d % 64) 478 } 479 return s 480 } 481 482 // difference returns set difference of s from b as a new set. 483 func (s statDepSet) difference(b statDepSet) statDepSet { 484 var c statDepSet 485 for i := range s { 486 c[i] = s[i] &^ b[i] 487 } 488 return c 489 } 490 491 // union returns the union of the two sets as a new set. 492 func (s statDepSet) union(b statDepSet) statDepSet { 493 var c statDepSet 494 for i := range s { 495 c[i] = s[i] | b[i] 496 } 497 return c 498 } 499 500 // empty returns true if there are no dependencies in the set. 501 func (s *statDepSet) empty() bool { 502 for _, c := range s { 503 if c != 0 { 504 return false 505 } 506 } 507 return true 508 } 509 510 // has returns true if the set contains a given statDep. 511 func (s *statDepSet) has(d statDep) bool { 512 return s[d/64]&(1<<(d%64)) != 0 513 } 514 515 // heapStatsAggregate represents memory stats obtained from the 516 // runtime. This set of stats is grouped together because they 517 // depend on each other in some way to make sense of the runtime's 518 // current heap memory use. They're also sharded across Ps, so it 519 // makes sense to grab them all at once. 520 type heapStatsAggregate struct { 521 heapStatsDelta 522 523 // Derived from values in heapStatsDelta. 524 525 // inObjects is the bytes of memory occupied by objects, 526 inObjects uint64 527 528 // numObjects is the number of live objects in the heap. 529 numObjects uint64 530 531 // totalAllocated is the total bytes of heap objects allocated 532 // over the lifetime of the program. 533 totalAllocated uint64 534 535 // totalFreed is the total bytes of heap objects freed 536 // over the lifetime of the program. 537 totalFreed uint64 538 539 // totalAllocs is the number of heap objects allocated over 540 // the lifetime of the program. 541 totalAllocs uint64 542 543 // totalFrees is the number of heap objects freed over 544 // the lifetime of the program. 545 totalFrees uint64 546 } 547 548 // compute populates the heapStatsAggregate with values from the runtime. 549 func (a *heapStatsAggregate) compute() { 550 memstats.heapStats.read(&a.heapStatsDelta) 551 552 // Calculate derived stats. 553 a.totalAllocs = a.largeAllocCount 554 a.totalFrees = a.largeFreeCount 555 a.totalAllocated = a.largeAlloc 556 a.totalFreed = a.largeFree 557 for i := range a.smallAllocCount { 558 na := a.smallAllocCount[i] 559 nf := a.smallFreeCount[i] 560 a.totalAllocs += na 561 a.totalFrees += nf 562 a.totalAllocated += na * uint64(class_to_size[i]) 563 a.totalFreed += nf * uint64(class_to_size[i]) 564 } 565 a.inObjects = a.totalAllocated - a.totalFreed 566 a.numObjects = a.totalAllocs - a.totalFrees 567 } 568 569 // sysStatsAggregate represents system memory stats obtained 570 // from the runtime. This set of stats is grouped together because 571 // they're all relatively cheap to acquire and generally independent 572 // of one another and other runtime memory stats. The fact that they 573 // may be acquired at different times, especially with respect to 574 // heapStatsAggregate, means there could be some skew, but because of 575 // these stats are independent, there's no real consistency issue here. 576 type sysStatsAggregate struct { 577 stacksSys uint64 578 mSpanSys uint64 579 mSpanInUse uint64 580 mCacheSys uint64 581 mCacheInUse uint64 582 buckHashSys uint64 583 gcMiscSys uint64 584 otherSys uint64 585 heapGoal uint64 586 gcCyclesDone uint64 587 gcCyclesForced uint64 588 } 589 590 // compute populates the sysStatsAggregate with values from the runtime. 591 func (a *sysStatsAggregate) compute() { 592 a.stacksSys = memstats.stacks_sys.load() 593 a.buckHashSys = memstats.buckhash_sys.load() 594 a.gcMiscSys = memstats.gcMiscSys.load() 595 a.otherSys = memstats.other_sys.load() 596 a.heapGoal = gcController.heapGoal() 597 a.gcCyclesDone = uint64(memstats.numgc) 598 a.gcCyclesForced = uint64(memstats.numforcedgc) 599 600 systemstack(func() { 601 lock(&mheap_.lock) 602 a.mSpanSys = memstats.mspan_sys.load() 603 a.mSpanInUse = uint64(mheap_.spanalloc.inuse) 604 a.mCacheSys = memstats.mcache_sys.load() 605 a.mCacheInUse = uint64(mheap_.cachealloc.inuse) 606 unlock(&mheap_.lock) 607 }) 608 } 609 610 // cpuStatsAggregate represents CPU stats obtained from the runtime 611 // acquired together to avoid skew and inconsistencies. 612 type cpuStatsAggregate struct { 613 cpuStats 614 } 615 616 // compute populates the cpuStatsAggregate with values from the runtime. 617 func (a *cpuStatsAggregate) compute() { 618 a.cpuStats = work.cpuStats 619 } 620 621 // nsToSec takes a duration in nanoseconds and converts it to seconds as 622 // a float64. 623 func nsToSec(ns int64) float64 { 624 return float64(ns) / 1e9 625 } 626 627 // statAggregate is the main driver of the metrics implementation. 628 // 629 // It contains multiple aggregates of runtime statistics, as well 630 // as a set of these aggregates that it has populated. The aggregates 631 // are populated lazily by its ensure method. 632 type statAggregate struct { 633 ensured statDepSet 634 heapStats heapStatsAggregate 635 sysStats sysStatsAggregate 636 cpuStats cpuStatsAggregate 637 } 638 639 // ensure populates statistics aggregates determined by deps if they 640 // haven't yet been populated. 641 func (a *statAggregate) ensure(deps *statDepSet) { 642 missing := deps.difference(a.ensured) 643 if missing.empty() { 644 return 645 } 646 for i := statDep(0); i < numStatsDeps; i++ { 647 if !missing.has(i) { 648 continue 649 } 650 switch i { 651 case heapStatsDep: 652 a.heapStats.compute() 653 case sysStatsDep: 654 a.sysStats.compute() 655 case cpuStatsDep: 656 a.cpuStats.compute() 657 } 658 } 659 a.ensured = a.ensured.union(missing) 660 } 661 662 // metricKind is a runtime copy of runtime/metrics.ValueKind and 663 // must be kept structurally identical to that type. 664 type metricKind int 665 666 const ( 667 // These values must be kept identical to their corresponding Kind* values 668 // in the runtime/metrics package. 669 metricKindBad metricKind = iota 670 metricKindUint64 671 metricKindFloat64 672 metricKindFloat64Histogram 673 ) 674 675 // metricSample is a runtime copy of runtime/metrics.Sample and 676 // must be kept structurally identical to that type. 677 type metricSample struct { 678 name string 679 value metricValue 680 } 681 682 // metricValue is a runtime copy of runtime/metrics.Sample and 683 // must be kept structurally identical to that type. 684 type metricValue struct { 685 kind metricKind 686 scalar uint64 // contains scalar values for scalar Kinds. 687 pointer unsafe.Pointer // contains non-scalar values. 688 } 689 690 // float64HistOrInit tries to pull out an existing float64Histogram 691 // from the value, but if none exists, then it allocates one with 692 // the given buckets. 693 func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram { 694 var hist *metricFloat64Histogram 695 if v.kind == metricKindFloat64Histogram && v.pointer != nil { 696 hist = (*metricFloat64Histogram)(v.pointer) 697 } else { 698 v.kind = metricKindFloat64Histogram 699 hist = new(metricFloat64Histogram) 700 v.pointer = unsafe.Pointer(hist) 701 } 702 hist.buckets = buckets 703 if len(hist.counts) != len(hist.buckets)-1 { 704 hist.counts = make([]uint64, len(buckets)-1) 705 } 706 return hist 707 } 708 709 // metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram 710 // and must be kept structurally identical to that type. 711 type metricFloat64Histogram struct { 712 counts []uint64 713 buckets []float64 714 } 715 716 // agg is used by readMetrics, and is protected by metricsSema. 717 // 718 // Managed as a global variable because its pointer will be 719 // an argument to a dynamically-defined function, and we'd 720 // like to avoid it escaping to the heap. 721 var agg statAggregate 722 723 type metricName struct { 724 name string 725 kind metricKind 726 } 727 728 // readMetricNames is the implementation of runtime/metrics.readMetricNames, 729 // used by the runtime/metrics test and otherwise unreferenced. 730 // 731 //go:linkname readMetricNames runtime/metrics_test.runtime_readMetricNames 732 func readMetricNames() []string { 733 metricsLock() 734 initMetrics() 735 n := len(metrics) 736 metricsUnlock() 737 738 list := make([]string, 0, n) 739 740 metricsLock() 741 for name := range metrics { 742 list = append(list, name) 743 } 744 metricsUnlock() 745 746 return list 747 } 748 749 // readMetrics is the implementation of runtime/metrics.Read. 750 // 751 //go:linkname readMetrics runtime/metrics.runtime_readMetrics 752 func readMetrics(samplesp unsafe.Pointer, len int, cap int) { 753 // Construct a slice from the args. 754 sl := slice{samplesp, len, cap} 755 samples := *(*[]metricSample)(unsafe.Pointer(&sl)) 756 757 metricsLock() 758 759 // Ensure the map is initialized. 760 initMetrics() 761 762 // Clear agg defensively. 763 agg = statAggregate{} 764 765 // Sample. 766 for i := range samples { 767 sample := &samples[i] 768 data, ok := metrics[sample.name] 769 if !ok { 770 sample.value.kind = metricKindBad 771 continue 772 } 773 // Ensure we have all the stats we need. 774 // agg is populated lazily. 775 agg.ensure(&data.deps) 776 777 // Compute the value based on the stats we have. 778 data.compute(&agg, &sample.value) 779 } 780 781 metricsUnlock() 782 }