github.com/mtsmfm/go/src@v0.0.0-20221020090648-44bdcb9f8fde/runtime/metrics.go (about) 1 // Copyright 2020 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 // Metrics implementation exported to runtime/metrics. 8 9 import ( 10 "unsafe" 11 ) 12 13 var ( 14 // metrics is a map of runtime/metrics keys to data used by the runtime 15 // to sample each metric's value. metricsInit indicates it has been 16 // initialized. 17 // 18 // These fields are protected by metricsSema which should be 19 // locked/unlocked with metricsLock() / metricsUnlock(). 20 metricsSema uint32 = 1 21 metricsInit bool 22 metrics map[string]metricData 23 24 sizeClassBuckets []float64 25 timeHistBuckets []float64 26 ) 27 28 type metricData struct { 29 // deps is the set of runtime statistics that this metric 30 // depends on. Before compute is called, the statAggregate 31 // which will be passed must ensure() these dependencies. 32 deps statDepSet 33 34 // compute is a function that populates a metricValue 35 // given a populated statAggregate structure. 36 compute func(in *statAggregate, out *metricValue) 37 } 38 39 func metricsLock() { 40 // Acquire the metricsSema but with handoff. Operations are typically 41 // expensive enough that queueing up goroutines and handing off between 42 // them will be noticeably better-behaved. 43 semacquire1(&metricsSema, true, 0, 0, waitReasonSemacquire) 44 if raceenabled { 45 raceacquire(unsafe.Pointer(&metricsSema)) 46 } 47 } 48 49 func metricsUnlock() { 50 if raceenabled { 51 racerelease(unsafe.Pointer(&metricsSema)) 52 } 53 semrelease(&metricsSema) 54 } 55 56 // initMetrics initializes the metrics map if it hasn't been yet. 57 // 58 // metricsSema must be held. 59 func initMetrics() { 60 if metricsInit { 61 return 62 } 63 64 sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1) 65 // Skip size class 0 which is a stand-in for large objects, but large 66 // objects are tracked separately (and they actually get placed in 67 // the last bucket, not the first). 68 sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size. 69 for i := 1; i < _NumSizeClasses; i++ { 70 // Size classes have an inclusive upper-bound 71 // and exclusive lower bound (e.g. 48-byte size class is 72 // (32, 48]) whereas we want and inclusive lower-bound 73 // and exclusive upper-bound (e.g. 48-byte size class is 74 // [33, 49). We can achieve this by shifting all bucket 75 // boundaries up by 1. 76 // 77 // Also, a float64 can precisely represent integers with 78 // value up to 2^53 and size classes are relatively small 79 // (nowhere near 2^48 even) so this will give us exact 80 // boundaries. 81 sizeClassBuckets[i] = float64(class_to_size[i] + 1) 82 } 83 sizeClassBuckets = append(sizeClassBuckets, float64Inf()) 84 85 timeHistBuckets = timeHistogramMetricsBuckets() 86 metrics = map[string]metricData{ 87 "/cgo/go-to-c-calls:calls": { 88 compute: func(_ *statAggregate, out *metricValue) { 89 out.kind = metricKindUint64 90 out.scalar = uint64(NumCgoCall()) 91 }, 92 }, 93 "/cpu/classes/gc/mark/assist:cpu-seconds": { 94 deps: makeStatDepSet(cpuStatsDep), 95 compute: func(in *statAggregate, out *metricValue) { 96 out.kind = metricKindFloat64 97 out.scalar = float64bits(nsToSec(in.cpuStats.gcAssistTime)) 98 }, 99 }, 100 "/cpu/classes/gc/mark/dedicated:cpu-seconds": { 101 deps: makeStatDepSet(cpuStatsDep), 102 compute: func(in *statAggregate, out *metricValue) { 103 out.kind = metricKindFloat64 104 out.scalar = float64bits(nsToSec(in.cpuStats.gcDedicatedTime)) 105 }, 106 }, 107 "/cpu/classes/gc/mark/idle:cpu-seconds": { 108 deps: makeStatDepSet(cpuStatsDep), 109 compute: func(in *statAggregate, out *metricValue) { 110 out.kind = metricKindFloat64 111 out.scalar = float64bits(nsToSec(in.cpuStats.gcIdleTime)) 112 }, 113 }, 114 "/cpu/classes/gc/pause:cpu-seconds": { 115 deps: makeStatDepSet(cpuStatsDep), 116 compute: func(in *statAggregate, out *metricValue) { 117 out.kind = metricKindFloat64 118 out.scalar = float64bits(nsToSec(in.cpuStats.gcPauseTime)) 119 }, 120 }, 121 "/cpu/classes/gc/total:cpu-seconds": { 122 deps: makeStatDepSet(cpuStatsDep), 123 compute: func(in *statAggregate, out *metricValue) { 124 out.kind = metricKindFloat64 125 out.scalar = float64bits(nsToSec(in.cpuStats.gcTotalTime)) 126 }, 127 }, 128 "/cpu/classes/idle:cpu-seconds": { 129 deps: makeStatDepSet(cpuStatsDep), 130 compute: func(in *statAggregate, out *metricValue) { 131 out.kind = metricKindFloat64 132 out.scalar = float64bits(nsToSec(in.cpuStats.idleTime)) 133 }, 134 }, 135 "/cpu/classes/scavenge/assist:cpu-seconds": { 136 deps: makeStatDepSet(cpuStatsDep), 137 compute: func(in *statAggregate, out *metricValue) { 138 out.kind = metricKindFloat64 139 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeAssistTime)) 140 }, 141 }, 142 "/cpu/classes/scavenge/background:cpu-seconds": { 143 deps: makeStatDepSet(cpuStatsDep), 144 compute: func(in *statAggregate, out *metricValue) { 145 out.kind = metricKindFloat64 146 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeBgTime)) 147 }, 148 }, 149 "/cpu/classes/scavenge/total:cpu-seconds": { 150 deps: makeStatDepSet(cpuStatsDep), 151 compute: func(in *statAggregate, out *metricValue) { 152 out.kind = metricKindFloat64 153 out.scalar = float64bits(nsToSec(in.cpuStats.scavengeTotalTime)) 154 }, 155 }, 156 "/cpu/classes/total:cpu-seconds": { 157 deps: makeStatDepSet(cpuStatsDep), 158 compute: func(in *statAggregate, out *metricValue) { 159 out.kind = metricKindFloat64 160 out.scalar = float64bits(nsToSec(in.cpuStats.totalTime)) 161 }, 162 }, 163 "/cpu/classes/user:cpu-seconds": { 164 deps: makeStatDepSet(cpuStatsDep), 165 compute: func(in *statAggregate, out *metricValue) { 166 out.kind = metricKindFloat64 167 out.scalar = float64bits(nsToSec(in.cpuStats.userTime)) 168 }, 169 }, 170 "/gc/cycles/automatic:gc-cycles": { 171 deps: makeStatDepSet(sysStatsDep), 172 compute: func(in *statAggregate, out *metricValue) { 173 out.kind = metricKindUint64 174 out.scalar = in.sysStats.gcCyclesDone - in.sysStats.gcCyclesForced 175 }, 176 }, 177 "/gc/cycles/forced:gc-cycles": { 178 deps: makeStatDepSet(sysStatsDep), 179 compute: func(in *statAggregate, out *metricValue) { 180 out.kind = metricKindUint64 181 out.scalar = in.sysStats.gcCyclesForced 182 }, 183 }, 184 "/gc/cycles/total:gc-cycles": { 185 deps: makeStatDepSet(sysStatsDep), 186 compute: func(in *statAggregate, out *metricValue) { 187 out.kind = metricKindUint64 188 out.scalar = in.sysStats.gcCyclesDone 189 }, 190 }, 191 "/gc/heap/allocs-by-size:bytes": { 192 deps: makeStatDepSet(heapStatsDep), 193 compute: func(in *statAggregate, out *metricValue) { 194 hist := out.float64HistOrInit(sizeClassBuckets) 195 hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeAllocCount) 196 // Cut off the first index which is ostensibly for size class 0, 197 // but large objects are tracked separately so it's actually unused. 198 for i, count := range in.heapStats.smallAllocCount[1:] { 199 hist.counts[i] = uint64(count) 200 } 201 }, 202 }, 203 "/gc/heap/allocs:bytes": { 204 deps: makeStatDepSet(heapStatsDep), 205 compute: func(in *statAggregate, out *metricValue) { 206 out.kind = metricKindUint64 207 out.scalar = in.heapStats.totalAllocated 208 }, 209 }, 210 "/gc/heap/allocs:objects": { 211 deps: makeStatDepSet(heapStatsDep), 212 compute: func(in *statAggregate, out *metricValue) { 213 out.kind = metricKindUint64 214 out.scalar = in.heapStats.totalAllocs 215 }, 216 }, 217 "/gc/heap/frees-by-size:bytes": { 218 deps: makeStatDepSet(heapStatsDep), 219 compute: func(in *statAggregate, out *metricValue) { 220 hist := out.float64HistOrInit(sizeClassBuckets) 221 hist.counts[len(hist.counts)-1] = uint64(in.heapStats.largeFreeCount) 222 // Cut off the first index which is ostensibly for size class 0, 223 // but large objects are tracked separately so it's actually unused. 224 for i, count := range in.heapStats.smallFreeCount[1:] { 225 hist.counts[i] = uint64(count) 226 } 227 }, 228 }, 229 "/gc/heap/frees:bytes": { 230 deps: makeStatDepSet(heapStatsDep), 231 compute: func(in *statAggregate, out *metricValue) { 232 out.kind = metricKindUint64 233 out.scalar = in.heapStats.totalFreed 234 }, 235 }, 236 "/gc/heap/frees:objects": { 237 deps: makeStatDepSet(heapStatsDep), 238 compute: func(in *statAggregate, out *metricValue) { 239 out.kind = metricKindUint64 240 out.scalar = in.heapStats.totalFrees 241 }, 242 }, 243 "/gc/heap/goal:bytes": { 244 deps: makeStatDepSet(sysStatsDep), 245 compute: func(in *statAggregate, out *metricValue) { 246 out.kind = metricKindUint64 247 out.scalar = in.sysStats.heapGoal 248 }, 249 }, 250 "/gc/heap/objects:objects": { 251 deps: makeStatDepSet(heapStatsDep), 252 compute: func(in *statAggregate, out *metricValue) { 253 out.kind = metricKindUint64 254 out.scalar = in.heapStats.numObjects 255 }, 256 }, 257 "/gc/heap/tiny/allocs:objects": { 258 deps: makeStatDepSet(heapStatsDep), 259 compute: func(in *statAggregate, out *metricValue) { 260 out.kind = metricKindUint64 261 out.scalar = uint64(in.heapStats.tinyAllocCount) 262 }, 263 }, 264 "/gc/limiter/last-enabled:gc-cycle": { 265 compute: func(_ *statAggregate, out *metricValue) { 266 out.kind = metricKindUint64 267 out.scalar = uint64(gcCPULimiter.lastEnabledCycle.Load()) 268 }, 269 }, 270 "/gc/pauses:seconds": { 271 compute: func(_ *statAggregate, out *metricValue) { 272 hist := out.float64HistOrInit(timeHistBuckets) 273 // The bottom-most bucket, containing negative values, is tracked 274 // as a separately as underflow, so fill that in manually and then 275 // iterate over the rest. 276 hist.counts[0] = memstats.gcPauseDist.underflow.Load() 277 for i := range memstats.gcPauseDist.counts { 278 hist.counts[i+1] = memstats.gcPauseDist.counts[i].Load() 279 } 280 hist.counts[len(hist.counts)-1] = memstats.gcPauseDist.overflow.Load() 281 }, 282 }, 283 "/gc/stack/starting-size:bytes": { 284 compute: func(in *statAggregate, out *metricValue) { 285 out.kind = metricKindUint64 286 out.scalar = uint64(startingStackSize) 287 }, 288 }, 289 "/memory/classes/heap/free:bytes": { 290 deps: makeStatDepSet(heapStatsDep), 291 compute: func(in *statAggregate, out *metricValue) { 292 out.kind = metricKindUint64 293 out.scalar = uint64(in.heapStats.committed - in.heapStats.inHeap - 294 in.heapStats.inStacks - in.heapStats.inWorkBufs - 295 in.heapStats.inPtrScalarBits) 296 }, 297 }, 298 "/memory/classes/heap/objects:bytes": { 299 deps: makeStatDepSet(heapStatsDep), 300 compute: func(in *statAggregate, out *metricValue) { 301 out.kind = metricKindUint64 302 out.scalar = in.heapStats.inObjects 303 }, 304 }, 305 "/memory/classes/heap/released:bytes": { 306 deps: makeStatDepSet(heapStatsDep), 307 compute: func(in *statAggregate, out *metricValue) { 308 out.kind = metricKindUint64 309 out.scalar = uint64(in.heapStats.released) 310 }, 311 }, 312 "/memory/classes/heap/stacks:bytes": { 313 deps: makeStatDepSet(heapStatsDep), 314 compute: func(in *statAggregate, out *metricValue) { 315 out.kind = metricKindUint64 316 out.scalar = uint64(in.heapStats.inStacks) 317 }, 318 }, 319 "/memory/classes/heap/unused:bytes": { 320 deps: makeStatDepSet(heapStatsDep), 321 compute: func(in *statAggregate, out *metricValue) { 322 out.kind = metricKindUint64 323 out.scalar = uint64(in.heapStats.inHeap) - in.heapStats.inObjects 324 }, 325 }, 326 "/memory/classes/metadata/mcache/free:bytes": { 327 deps: makeStatDepSet(sysStatsDep), 328 compute: func(in *statAggregate, out *metricValue) { 329 out.kind = metricKindUint64 330 out.scalar = in.sysStats.mCacheSys - in.sysStats.mCacheInUse 331 }, 332 }, 333 "/memory/classes/metadata/mcache/inuse:bytes": { 334 deps: makeStatDepSet(sysStatsDep), 335 compute: func(in *statAggregate, out *metricValue) { 336 out.kind = metricKindUint64 337 out.scalar = in.sysStats.mCacheInUse 338 }, 339 }, 340 "/memory/classes/metadata/mspan/free:bytes": { 341 deps: makeStatDepSet(sysStatsDep), 342 compute: func(in *statAggregate, out *metricValue) { 343 out.kind = metricKindUint64 344 out.scalar = in.sysStats.mSpanSys - in.sysStats.mSpanInUse 345 }, 346 }, 347 "/memory/classes/metadata/mspan/inuse:bytes": { 348 deps: makeStatDepSet(sysStatsDep), 349 compute: func(in *statAggregate, out *metricValue) { 350 out.kind = metricKindUint64 351 out.scalar = in.sysStats.mSpanInUse 352 }, 353 }, 354 "/memory/classes/metadata/other:bytes": { 355 deps: makeStatDepSet(heapStatsDep, sysStatsDep), 356 compute: func(in *statAggregate, out *metricValue) { 357 out.kind = metricKindUint64 358 out.scalar = uint64(in.heapStats.inWorkBufs+in.heapStats.inPtrScalarBits) + in.sysStats.gcMiscSys 359 }, 360 }, 361 "/memory/classes/os-stacks:bytes": { 362 deps: makeStatDepSet(sysStatsDep), 363 compute: func(in *statAggregate, out *metricValue) { 364 out.kind = metricKindUint64 365 out.scalar = in.sysStats.stacksSys 366 }, 367 }, 368 "/memory/classes/other:bytes": { 369 deps: makeStatDepSet(sysStatsDep), 370 compute: func(in *statAggregate, out *metricValue) { 371 out.kind = metricKindUint64 372 out.scalar = in.sysStats.otherSys 373 }, 374 }, 375 "/memory/classes/profiling/buckets:bytes": { 376 deps: makeStatDepSet(sysStatsDep), 377 compute: func(in *statAggregate, out *metricValue) { 378 out.kind = metricKindUint64 379 out.scalar = in.sysStats.buckHashSys 380 }, 381 }, 382 "/memory/classes/total:bytes": { 383 deps: makeStatDepSet(heapStatsDep, sysStatsDep), 384 compute: func(in *statAggregate, out *metricValue) { 385 out.kind = metricKindUint64 386 out.scalar = uint64(in.heapStats.committed+in.heapStats.released) + 387 in.sysStats.stacksSys + in.sysStats.mSpanSys + 388 in.sysStats.mCacheSys + in.sysStats.buckHashSys + 389 in.sysStats.gcMiscSys + in.sysStats.otherSys 390 }, 391 }, 392 "/sched/gomaxprocs:threads": { 393 compute: func(_ *statAggregate, out *metricValue) { 394 out.kind = metricKindUint64 395 out.scalar = uint64(gomaxprocs) 396 }, 397 }, 398 "/sched/goroutines:goroutines": { 399 compute: func(_ *statAggregate, out *metricValue) { 400 out.kind = metricKindUint64 401 out.scalar = uint64(gcount()) 402 }, 403 }, 404 "/sched/latencies:seconds": { 405 compute: func(_ *statAggregate, out *metricValue) { 406 hist := out.float64HistOrInit(timeHistBuckets) 407 hist.counts[0] = sched.timeToRun.underflow.Load() 408 for i := range sched.timeToRun.counts { 409 hist.counts[i+1] = sched.timeToRun.counts[i].Load() 410 } 411 hist.counts[len(hist.counts)-1] = sched.timeToRun.overflow.Load() 412 }, 413 }, 414 "/sync/mutex/wait/total:seconds": { 415 compute: func(_ *statAggregate, out *metricValue) { 416 out.kind = metricKindFloat64 417 out.scalar = float64bits(nsToSec(sched.totalMutexWaitTime.Load())) 418 }, 419 }, 420 } 421 metricsInit = true 422 } 423 424 // statDep is a dependency on a group of statistics 425 // that a metric might have. 426 type statDep uint 427 428 const ( 429 heapStatsDep statDep = iota // corresponds to heapStatsAggregate 430 sysStatsDep // corresponds to sysStatsAggregate 431 cpuStatsDep // corresponds to cpuStatsAggregate 432 numStatsDeps 433 ) 434 435 // statDepSet represents a set of statDeps. 436 // 437 // Under the hood, it's a bitmap. 438 type statDepSet [1]uint64 439 440 // makeStatDepSet creates a new statDepSet from a list of statDeps. 441 func makeStatDepSet(deps ...statDep) statDepSet { 442 var s statDepSet 443 for _, d := range deps { 444 s[d/64] |= 1 << (d % 64) 445 } 446 return s 447 } 448 449 // differennce returns set difference of s from b as a new set. 450 func (s statDepSet) difference(b statDepSet) statDepSet { 451 var c statDepSet 452 for i := range s { 453 c[i] = s[i] &^ b[i] 454 } 455 return c 456 } 457 458 // union returns the union of the two sets as a new set. 459 func (s statDepSet) union(b statDepSet) statDepSet { 460 var c statDepSet 461 for i := range s { 462 c[i] = s[i] | b[i] 463 } 464 return c 465 } 466 467 // empty returns true if there are no dependencies in the set. 468 func (s *statDepSet) empty() bool { 469 for _, c := range s { 470 if c != 0 { 471 return false 472 } 473 } 474 return true 475 } 476 477 // has returns true if the set contains a given statDep. 478 func (s *statDepSet) has(d statDep) bool { 479 return s[d/64]&(1<<(d%64)) != 0 480 } 481 482 // heapStatsAggregate represents memory stats obtained from the 483 // runtime. This set of stats is grouped together because they 484 // depend on each other in some way to make sense of the runtime's 485 // current heap memory use. They're also sharded across Ps, so it 486 // makes sense to grab them all at once. 487 type heapStatsAggregate struct { 488 heapStatsDelta 489 490 // Derived from values in heapStatsDelta. 491 492 // inObjects is the bytes of memory occupied by objects, 493 inObjects uint64 494 495 // numObjects is the number of live objects in the heap. 496 numObjects uint64 497 498 // totalAllocated is the total bytes of heap objects allocated 499 // over the lifetime of the program. 500 totalAllocated uint64 501 502 // totalFreed is the total bytes of heap objects freed 503 // over the lifetime of the program. 504 totalFreed uint64 505 506 // totalAllocs is the number of heap objects allocated over 507 // the lifetime of the program. 508 totalAllocs uint64 509 510 // totalFrees is the number of heap objects freed over 511 // the lifetime of the program. 512 totalFrees uint64 513 } 514 515 // compute populates the heapStatsAggregate with values from the runtime. 516 func (a *heapStatsAggregate) compute() { 517 memstats.heapStats.read(&a.heapStatsDelta) 518 519 // Calculate derived stats. 520 a.totalAllocs = a.largeAllocCount 521 a.totalFrees = a.largeFreeCount 522 a.totalAllocated = a.largeAlloc 523 a.totalFreed = a.largeFree 524 for i := range a.smallAllocCount { 525 na := a.smallAllocCount[i] 526 nf := a.smallFreeCount[i] 527 a.totalAllocs += na 528 a.totalFrees += nf 529 a.totalAllocated += na * uint64(class_to_size[i]) 530 a.totalFreed += nf * uint64(class_to_size[i]) 531 } 532 a.inObjects = a.totalAllocated - a.totalFreed 533 a.numObjects = a.totalAllocs - a.totalFrees 534 } 535 536 // sysStatsAggregate represents system memory stats obtained 537 // from the runtime. This set of stats is grouped together because 538 // they're all relatively cheap to acquire and generally independent 539 // of one another and other runtime memory stats. The fact that they 540 // may be acquired at different times, especially with respect to 541 // heapStatsAggregate, means there could be some skew, but because of 542 // these stats are independent, there's no real consistency issue here. 543 type sysStatsAggregate struct { 544 stacksSys uint64 545 mSpanSys uint64 546 mSpanInUse uint64 547 mCacheSys uint64 548 mCacheInUse uint64 549 buckHashSys uint64 550 gcMiscSys uint64 551 otherSys uint64 552 heapGoal uint64 553 gcCyclesDone uint64 554 gcCyclesForced uint64 555 } 556 557 // compute populates the sysStatsAggregate with values from the runtime. 558 func (a *sysStatsAggregate) compute() { 559 a.stacksSys = memstats.stacks_sys.load() 560 a.buckHashSys = memstats.buckhash_sys.load() 561 a.gcMiscSys = memstats.gcMiscSys.load() 562 a.otherSys = memstats.other_sys.load() 563 a.heapGoal = gcController.heapGoal() 564 a.gcCyclesDone = uint64(memstats.numgc) 565 a.gcCyclesForced = uint64(memstats.numforcedgc) 566 567 systemstack(func() { 568 lock(&mheap_.lock) 569 a.mSpanSys = memstats.mspan_sys.load() 570 a.mSpanInUse = uint64(mheap_.spanalloc.inuse) 571 a.mCacheSys = memstats.mcache_sys.load() 572 a.mCacheInUse = uint64(mheap_.cachealloc.inuse) 573 unlock(&mheap_.lock) 574 }) 575 } 576 577 // cpuStatsAggregate represents CPU stats obtained from the runtime 578 // acquired together to avoid skew and inconsistencies. 579 type cpuStatsAggregate struct { 580 cpuStats 581 } 582 583 // compute populates the cpuStatsAggregate with values from the runtime. 584 func (a *cpuStatsAggregate) compute() { 585 a.cpuStats = work.cpuStats 586 } 587 588 // nsToSec takes a duration in nanoseconds and converts it to seconds as 589 // a float64. 590 func nsToSec(ns int64) float64 { 591 return float64(ns) / 1e9 592 } 593 594 // statAggregate is the main driver of the metrics implementation. 595 // 596 // It contains multiple aggregates of runtime statistics, as well 597 // as a set of these aggregates that it has populated. The aggergates 598 // are populated lazily by its ensure method. 599 type statAggregate struct { 600 ensured statDepSet 601 heapStats heapStatsAggregate 602 sysStats sysStatsAggregate 603 cpuStats cpuStatsAggregate 604 } 605 606 // ensure populates statistics aggregates determined by deps if they 607 // haven't yet been populated. 608 func (a *statAggregate) ensure(deps *statDepSet) { 609 missing := deps.difference(a.ensured) 610 if missing.empty() { 611 return 612 } 613 for i := statDep(0); i < numStatsDeps; i++ { 614 if !missing.has(i) { 615 continue 616 } 617 switch i { 618 case heapStatsDep: 619 a.heapStats.compute() 620 case sysStatsDep: 621 a.sysStats.compute() 622 case cpuStatsDep: 623 a.cpuStats.compute() 624 } 625 } 626 a.ensured = a.ensured.union(missing) 627 } 628 629 // metricValidKind is a runtime copy of runtime/metrics.ValueKind and 630 // must be kept structurally identical to that type. 631 type metricKind int 632 633 const ( 634 // These values must be kept identical to their corresponding Kind* values 635 // in the runtime/metrics package. 636 metricKindBad metricKind = iota 637 metricKindUint64 638 metricKindFloat64 639 metricKindFloat64Histogram 640 ) 641 642 // metricSample is a runtime copy of runtime/metrics.Sample and 643 // must be kept structurally identical to that type. 644 type metricSample struct { 645 name string 646 value metricValue 647 } 648 649 // metricValue is a runtime copy of runtime/metrics.Sample and 650 // must be kept structurally identical to that type. 651 type metricValue struct { 652 kind metricKind 653 scalar uint64 // contains scalar values for scalar Kinds. 654 pointer unsafe.Pointer // contains non-scalar values. 655 } 656 657 // float64HistOrInit tries to pull out an existing float64Histogram 658 // from the value, but if none exists, then it allocates one with 659 // the given buckets. 660 func (v *metricValue) float64HistOrInit(buckets []float64) *metricFloat64Histogram { 661 var hist *metricFloat64Histogram 662 if v.kind == metricKindFloat64Histogram && v.pointer != nil { 663 hist = (*metricFloat64Histogram)(v.pointer) 664 } else { 665 v.kind = metricKindFloat64Histogram 666 hist = new(metricFloat64Histogram) 667 v.pointer = unsafe.Pointer(hist) 668 } 669 hist.buckets = buckets 670 if len(hist.counts) != len(hist.buckets)-1 { 671 hist.counts = make([]uint64, len(buckets)-1) 672 } 673 return hist 674 } 675 676 // metricFloat64Histogram is a runtime copy of runtime/metrics.Float64Histogram 677 // and must be kept structurally identical to that type. 678 type metricFloat64Histogram struct { 679 counts []uint64 680 buckets []float64 681 } 682 683 // agg is used by readMetrics, and is protected by metricsSema. 684 // 685 // Managed as a global variable because its pointer will be 686 // an argument to a dynamically-defined function, and we'd 687 // like to avoid it escaping to the heap. 688 var agg statAggregate 689 690 // readMetrics is the implementation of runtime/metrics.Read. 691 // 692 //go:linkname readMetrics runtime/metrics.runtime_readMetrics 693 func readMetrics(samplesp unsafe.Pointer, len int, cap int) { 694 // Construct a slice from the args. 695 sl := slice{samplesp, len, cap} 696 samples := *(*[]metricSample)(unsafe.Pointer(&sl)) 697 698 metricsLock() 699 700 // Ensure the map is initialized. 701 initMetrics() 702 703 // Clear agg defensively. 704 agg = statAggregate{} 705 706 // Sample. 707 for i := range samples { 708 sample := &samples[i] 709 data, ok := metrics[sample.name] 710 if !ok { 711 sample.value.kind = metricKindBad 712 continue 713 } 714 // Ensure we have all the stats we need. 715 // agg is populated lazily. 716 agg.ensure(&data.deps) 717 718 // Compute the value based on the stats we have. 719 data.compute(&agg, &sample.value) 720 } 721 722 metricsUnlock() 723 }