gvisor.dev/gvisor@v0.0.0-20240520182842-f9d4d51c7e0f/pkg/sentry/platform/systrap/metrics.go (about) 1 // Copyright 2023 The gVisor Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 package systrap 16 17 import ( 18 "time" 19 20 "gvisor.dev/gvisor/pkg/atomicbitops" 21 "gvisor.dev/gvisor/pkg/hostarch" 22 ) 23 24 // This file contains all logic related to context switch latency metrics. 25 // 26 // Latency metrics are the main method by which fastpath for both stub threads 27 // and the sentry is enabled and disabled. We measure latency in CPU cycles. 28 // 29 // The high level overview of metric collection looks like this: 30 // 1a) When a context is switched from the sentry to the stub, the sentry 31 // records the time it was put into the context queue. 32 // 1b) When a stub thread picks up the context from the context queue, the stub 33 // thread records the time when it's about to switch back to user code. 34 // Getting the diff between these timestamps gives us the stub-bound latency. 35 // 36 // 2a) When a stub thread gives back a context to the sentry for handling, 37 // it records the time just before notifying the sentry task goroutine. 38 // 2b) When the task goroutine sees that it has been notified, it records the 39 // time. 40 // Getting the diff between these timestamps gives us the sentry-bound latency. 41 // 42 // 3) Both latencies are recorded at once via recordLatency(). This means 43 // there is a delay on getting stubBoundLatencies. In practice this should not 44 // matter that much due to our relatively large latency measurement periods. 45 // 46 // There is a bucket array for each latency type, where each bucket is of size 47 // `bucketIncrements`. Latencies are collected in time periods of length 48 // `recordingPeriod`, and measurements for the current period are stored 49 // in the `latencies` variable. 50 51 type latencyBuckets [numLatencyBuckets]atomicbitops.Uint64 52 type cpuTicks uint64 53 54 const ( 55 numLatencyBuckets = 80 56 bucketIncrements = 2048 57 58 // minNecessaryRecordings defines the minimum amount of recordings we 59 // want to see in latencyBuckets in order to get a reasonable median. 60 minNecessaryRecordings = 5 61 ) 62 63 // neverEnableFastPath is used for completely disabling the fast path. 64 // It is set once so doesn't need any synchronizations. 65 var neverEnableFastPath bool 66 67 // latencyRecorder is used to collect latency metrics. 68 type latencyRecorder struct { 69 stubBound latencyBuckets 70 sentryBound latencyBuckets 71 } 72 73 // latencies stores the latency counts for the current measurement period. 74 var latencies latencyRecorder 75 76 // record increments the correct bucket assigned to the given latency l. 77 // 78 //go:nosplit 79 func (b *latencyBuckets) record(l cpuTicks) { 80 bucket := l / bucketIncrements 81 if bucket >= numLatencyBuckets { 82 bucket = numLatencyBuckets - 1 83 } 84 b[bucket].Add(1) 85 } 86 87 // getMedian returns a latency measure in the range of 88 // [bucketIncrements, numLatencyBuckets * bucketIncrements], or 0 if unable to 89 // find a median in the latencyBuckets. 90 func (b *latencyBuckets) getMedian() cpuTicks { 91 i := 0 92 j := numLatencyBuckets - 1 93 var totalForwards, totalBackwards uint64 94 for i <= j { 95 if totalForwards < totalBackwards { 96 totalForwards += b[i].Load() 97 i++ 98 } else { 99 totalBackwards += b[j].Load() 100 j-- 101 } 102 } 103 if totalForwards+totalBackwards < minNecessaryRecordings { 104 return 0 105 } 106 return cpuTicks(max(uint64(i), 1) * bucketIncrements) 107 } 108 109 // merge combines two latencyBuckets instances. 110 func (b *latencyBuckets) merge(other *latencyBuckets) { 111 for i := 0; i < numLatencyBuckets; i++ { 112 b[i].Add(other[i].Load()) 113 } 114 } 115 116 // reset zeroes all buckets. 117 func (b *latencyBuckets) reset() { 118 for i := 0; i < numLatencyBuckets; i++ { 119 b[i].Store(0) 120 } 121 } 122 123 // recordLatency records the latency of both the sentry->stub and the 124 // stub->sentry context switches. 125 // For the stub->sentry context switch, the final timestamp is taken by this 126 // function. 127 // Preconditions: 128 // - ctx.isAcked() is true. 129 // 130 //go:nosplit 131 func (sc *sharedContext) recordLatency() { 132 // Record stub->sentry latency. 133 sentryBoundLatency := sc.getStateChangedTimeDiff() 134 if sentryBoundLatency != 0 { 135 latencies.sentryBound.record(sentryBoundLatency) 136 } 137 138 // Record sentry->stub latency. 139 stubBoundLatency := sc.getAckedTimeDiff() 140 if stubBoundLatency != 0 { 141 latencies.stubBound.record(stubBoundLatency) 142 } 143 144 updateDebugMetrics(stubBoundLatency, sentryBoundLatency) 145 } 146 147 // When a measurement period ends, the latencies are used to determine the fast 148 // path state. Fastpath is independently enabled for both the sentry and stub 149 // threads, and is modeled as the following state machine: 150 // 151 // +----------StubFPOff,SentryFPOff-------+ 152 // | ^ ^ | 153 // V | | V 154 // +-->StubFPOn,SentryFPOff StubFPOff,SentryFPOn<--+ 155 // | | ^ | ^ | 156 // | V | V | | 157 // | StubFPOn,SentryFPOn StubFPOn,SentryFPOn | 158 // | LastEnabledSentryFP LastEnabledStubFP | 159 // | | | | 160 // | | | | 161 // | +---------> StubFPOn,SentryFPOn <-------+ | 162 // | | | | 163 // |______________________________| |___________________________| 164 // 165 // The default state is to have both stub and sentry fastpath OFF. 166 // A state transition to enable one fastpath is done when 167 // fpState.(stub|sentry)FPBackoff reaches 0. (stub|sentry)FPBackoff is 168 // decremented every recording period that the corresponding fastpath is 169 // disabled. 170 // A state transition to disable one fastpath is decided through the predicates 171 // shouldDisableStubFP or shouldDisableSentryFP, and activated with 172 // disableStubFP or disableSentryFP. 173 // 174 // Why have 3 states for both FPs being ON? The logic behind that is to do with 175 // the fact that fastpaths are interdependent. Enabling one fastpath can have 176 // negative effects on the latency metrics of the other in the event that there 177 // are not enough CPUs to run the fastpath. So it's very possible that the system 178 // finds itself in a state where it's beneficial to run one fastpath but not the 179 // other based on the workload it's doing. For this case, we need to remember 180 // what the last stable state was to return to, because the metrics will likely 181 // be bad enough for both sides to be eligible for being disabled. 182 // 183 // Once the system establishes that having both the stub and sentry fastpath ON 184 // is acceptable, it does prioritize disabling stub fastpath over disabling 185 // sentry fastpath, because the sentry fastpath at most takes one thread to spin. 186 187 const ( 188 recordingPeriod = 400 * time.Microsecond 189 fastPathBackoffMin = 2 190 maxRecentFPFailures = 9 191 numConsecutiveFailsToDisableFP = 2 192 ) 193 194 // fastPathState is used to keep track of long term metrics that span beyond 195 // one measurement period. 196 type fastPathState struct { 197 // stubBoundBaselineLatency and sentryBoundBaselineLatency record all 198 // latency measures recorded during periods when their respective 199 // fastpath was OFF. 200 stubBoundBaselineLatency latencyBuckets 201 sentryBoundBaselineLatency latencyBuckets 202 203 // stubFPBackoff and sentryFPBackoff are the periods remaining until 204 // the system attempts to use the fastpath again. 205 stubFPBackoff int 206 sentryFPBackoff int 207 208 // stubFPRecentFailures and sentryFPRecentFailures are counters in the 209 // range [0, maxRecentFPFailures] that are incremented by 210 // disable(Stub|Sentry)FP and decremented by (stub|sentry)FPSuccess. 211 // They are used to set the backoffs. 212 stubFPRecentFailures int 213 sentryFPRecentFailures int 214 215 consecutiveStubFPFailures int 216 consecutiveSentryFPFailures int 217 218 _ [hostarch.CacheLineSize]byte 219 // stubFastPathEnabled is a global flag referenced in other parts of 220 // systrap to determine if the stub fast path is enabled or not. 221 stubFastPathEnabled atomicbitops.Bool 222 223 _ [hostarch.CacheLineSize]byte 224 // sentryFastPathEnabled is a global flag referenced in other parts of 225 // systrap to determine if the sentry fastpath is enabled or not. 226 sentryFastPathEnabled atomicbitops.Bool 227 228 _ [hostarch.CacheLineSize]byte 229 // nrMaxAwakeStubThreads is the maximum number of awake stub threads over 230 // all subprocesses at the this moment. 231 nrMaxAwakeStubThreads atomicbitops.Uint32 232 233 // usedStubFastPath and usedSentryFastPath are reset every recording 234 // period, and are populated in case the system actually used the 235 // fastpath (i.e. stub or dispatcher spun for some time without work). 236 _ [hostarch.CacheLineSize]byte 237 usedStubFastPath atomicbitops.Bool 238 _ [hostarch.CacheLineSize]byte 239 usedSentryFastPath atomicbitops.Bool 240 241 _ [hostarch.CacheLineSize]byte 242 // curState is the current fastpath state function, which is called at 243 // the end of every recording period. 244 curState func(*fastPathState) 245 } 246 247 var ( 248 fastpath = fastPathState{ 249 stubFPBackoff: fastPathBackoffMin, 250 sentryFPBackoff: fastPathBackoffMin, 251 curState: sentryOffStubOff, 252 } 253 254 // fastPathContextLimit is the maximum number of contexts after which the fast 255 // path in stub threads is disabled. Its value can be higher than the number of 256 // CPU-s, because the Sentry is running with higher priority than stub threads, 257 // deepSleepTimeout is much shorter than the Linux scheduler timeslice, so the 258 // only thing that matters here is whether the Sentry handles syscall faster 259 // than the overhead of scheduling another stub thread. 260 // 261 // It is set after maxSysmsgThreads is initialized. 262 fastPathContextLimit = uint32(0) 263 ) 264 265 // controlFastPath is used to spawn a goroutine when creating the Systrap 266 // platform. 267 func controlFastPath() { 268 fastPathContextLimit = uint32(maxSysmsgThreads * 2) 269 270 for { 271 time.Sleep(recordingPeriod) 272 273 fastpath.curState(&fastpath) 274 // Reset FP trackers. 275 fastpath.usedStubFastPath.Store(false) 276 fastpath.usedSentryFastPath.Store(false) 277 } 278 } 279 280 // getBackoff returns the number of recording periods that fastpath should remain 281 // disabled for, based on the num of recentFailures. 282 func getBackoff(recentFailures int) int { 283 return 1 << recentFailures 284 } 285 286 //go:nosplit 287 func (s *fastPathState) sentryFastPath() bool { 288 return s.sentryFastPathEnabled.Load() 289 } 290 291 //go:nosplit 292 func (s *fastPathState) stubFastPath() bool { 293 return s.stubFastPathEnabled.Load() && (s.nrMaxAwakeStubThreads.Load() <= fastPathContextLimit) 294 } 295 296 // enableSentryFP is a wrapper to unconditionally enable sentry FP and increment 297 // a debug metric. 298 func (s *fastPathState) enableSentryFP() { 299 s.sentryFastPathEnabled.Store(true) 300 numTimesSentryFastPathEnabled.Increment() 301 } 302 303 // disableSentryFP returns true if the sentry fastpath was able to be disabled. 304 // 305 // It takes two calls to disableSentryFP without any calls to sentryFPSuccess in 306 // between to disable the sentry fastpath. This is done in order to mitigate the 307 // effects of outlier measures due to rdtsc inaccuracies. 308 func (s *fastPathState) disableSentryFP() bool { 309 s.consecutiveSentryFPFailures++ 310 if s.consecutiveSentryFPFailures < numConsecutiveFailsToDisableFP { 311 return false 312 } 313 s.consecutiveSentryFPFailures = 0 314 s.sentryFastPathEnabled.Store(false) 315 numTimesSentryFastPathDisabled.Increment() 316 317 s.sentryFPBackoff = getBackoff(s.sentryFPRecentFailures) 318 s.sentryFPRecentFailures = min(maxRecentFPFailures, s.sentryFPRecentFailures+1) 319 return true 320 } 321 322 // enableStubFP is a wrapper to unconditionally enable stub FP and increment 323 // a debug metric. 324 func (s *fastPathState) enableStubFP() { 325 s.stubFastPathEnabled.Store(true) 326 numTimesStubFastPathEnabled.Increment() 327 } 328 329 // disableStubFP returns true if the stub fastpath was able to be disabled. 330 // 331 // It takes two calls to disableStubFP without any calls to stubFPSuccess in 332 // between to disable the stub fastpath. This is done in order to mitigate the 333 // effects of outlier measures due to rdtsc inaccuracies. 334 func (s *fastPathState) disableStubFP() bool { 335 s.consecutiveStubFPFailures++ 336 if s.consecutiveStubFPFailures < numConsecutiveFailsToDisableFP { 337 return false 338 } 339 s.consecutiveStubFPFailures = 0 340 s.stubFastPathEnabled.Store(false) 341 numTimesStubFastPathDisabled.Increment() 342 343 s.stubFPBackoff = getBackoff(s.stubFPRecentFailures) 344 s.stubFPRecentFailures = min(maxRecentFPFailures, s.stubFPRecentFailures+1) 345 return true 346 } 347 348 func (s *fastPathState) sentryFPSuccess() { 349 s.sentryFPRecentFailures = max(0, s.sentryFPRecentFailures-1) 350 s.consecutiveSentryFPFailures = 0 351 } 352 353 func (s *fastPathState) stubFPSuccess() { 354 s.stubFPRecentFailures = max(0, s.stubFPRecentFailures-1) 355 s.consecutiveStubFPFailures = 0 356 } 357 358 // shouldDisableSentryFP returns true if the metrics indicate sentry fastpath 359 // should be disabled. 360 func (s *fastPathState) shouldDisableSentryFP(stubMedian, sentryMedian cpuTicks) bool { 361 if !s.usedSentryFastPath.Load() { 362 return false 363 } 364 stubBaseline := s.stubBoundBaselineLatency.getMedian() 365 sentryBaseline := s.sentryBoundBaselineLatency.getMedian() 366 if sentryMedian < sentryBaseline { 367 // Assume the number of productive stubs is the core count on the 368 // system, not counting the 1 core taken by the dispatcher for 369 // the fast path. 370 n := cpuTicks(maxSysmsgThreads - 1) 371 // If the sentry fastpath is causing the stub latency to be 372 // higher than normal, the point at which it's considered to be 373 // too high is when the time saved via the sentry fastpath is 374 // less than the time lost via higher stub latency (with some 375 // error margin). Assume that all possible stub threads are 376 // active for this comparison. 377 diff := (sentryBaseline - sentryMedian) * n 378 errorMargin := stubBaseline / 8 379 return (stubMedian > stubBaseline) && (stubMedian-stubBaseline) > (diff+errorMargin) 380 } 381 // Running the fastpath resulted in higher sentry latency than baseline? 382 // This does not happen often, but it is an indication that the fastpath 383 // wasn't used to full effect: for example the dispatcher kept changing, 384 // and that there was not enough CPU to place a new dispatcher fast 385 // enough. 386 // 387 // If there isn't enough CPU we will most likely see large stub latency 388 // regressions, and should disable the fastpath. 389 return stubMedian > (stubBaseline + stubBaseline/2) 390 } 391 392 // shouldDisableStubFP returns true if the metrics indicate stub fastpath should 393 // be disabled. 394 func (s *fastPathState) shouldDisableStubFP(stubMedian, sentryMedian cpuTicks) bool { 395 if !s.usedStubFastPath.Load() { 396 return false 397 } 398 stubBaseline := s.stubBoundBaselineLatency.getMedian() 399 sentryBaseline := s.sentryBoundBaselineLatency.getMedian() 400 if stubMedian < stubBaseline { 401 // If the stub fastpath is causing the sentry latency to be 402 // higher than normal, the point at which it's considered to be 403 // too high is when the time saved via the stub fastpath is 404 // less than the time lost via higher sentry latency (with some 405 // error margin). Unlike the stub latency, the sentry latency is 406 // largely dependent on one thread (the dispatcher). 407 diff := stubBaseline - stubMedian 408 errorMargin := sentryBaseline / 8 409 return (sentryMedian > sentryBaseline) && (sentryMedian-sentryBaseline) > (diff+errorMargin) 410 } 411 // Running the fastpath resulted in higher stub latency than baseline? 412 // This is either an indication that there isn't enough CPU to schedule 413 // stub threads to run the fastpath, or the user workload has changed to 414 // be such that it returns less often to the sentry. 415 // 416 // If there isn't enough CPU we will most likely see large sentry latency 417 // regressions, and should disable the fastpath. 418 return sentryMedian > (sentryBaseline + sentryBaseline/2) 419 } 420 421 // The following functions are used for state transitions in the sentry/stub 422 // fastpath state machine described above. 423 424 func sentryOffStubOff(s *fastPathState) { 425 if neverEnableFastPath { 426 return 427 } 428 periodStubBoundMedian := latencies.stubBound.getMedian() 429 s.stubBoundBaselineLatency.merge(&latencies.stubBound) 430 latencies.stubBound.reset() 431 if periodStubBoundMedian != 0 { 432 s.stubFPBackoff = max(s.stubFPBackoff-1, 0) 433 } 434 435 periodSentryBoundMedian := latencies.sentryBound.getMedian() 436 s.sentryBoundBaselineLatency.merge(&latencies.sentryBound) 437 latencies.sentryBound.reset() 438 if periodSentryBoundMedian != 0 { 439 s.sentryFPBackoff = max(s.sentryFPBackoff-1, 0) 440 } 441 442 if s.sentryFPBackoff == 0 { 443 s.enableSentryFP() 444 s.curState = sentryOnStubOff 445 } else if s.stubFPBackoff == 0 { 446 s.enableStubFP() 447 s.curState = sentryOffStubOn 448 } 449 } 450 451 func sentryOnStubOff(s *fastPathState) { 452 periodStubBoundMedian := latencies.stubBound.getMedian() 453 periodSentryBoundMedian := latencies.sentryBound.getMedian() 454 if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 { 455 return 456 } 457 458 if s.shouldDisableSentryFP(periodStubBoundMedian, periodSentryBoundMedian) { 459 if s.disableSentryFP() { 460 s.curState = sentryOffStubOff 461 } 462 } else { 463 s.sentryFPSuccess() 464 // If we are going to keep sentry FP on that means stub latency 465 // was fine; update the baseline. 466 s.stubBoundBaselineLatency.merge(&latencies.stubBound) 467 latencies.stubBound.reset() 468 s.stubFPBackoff = max(s.stubFPBackoff-1, 0) 469 if s.stubFPBackoff == 0 { 470 s.enableStubFP() 471 s.curState = sentryOnStubOnLastEnabledStub 472 } 473 } 474 latencies.sentryBound.reset() 475 } 476 477 func sentryOffStubOn(s *fastPathState) { 478 periodStubBoundMedian := latencies.stubBound.getMedian() 479 periodSentryBoundMedian := latencies.sentryBound.getMedian() 480 if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 { 481 return 482 } 483 484 if s.shouldDisableStubFP(periodStubBoundMedian, periodSentryBoundMedian) { 485 if s.disableStubFP() { 486 s.curState = sentryOffStubOff 487 } 488 } else { 489 s.stubFPSuccess() 490 491 s.sentryBoundBaselineLatency.merge(&latencies.sentryBound) 492 latencies.sentryBound.reset() 493 s.sentryFPBackoff = max(s.sentryFPBackoff-1, 0) 494 if s.sentryFPBackoff == 0 { 495 s.enableSentryFP() 496 s.curState = sentryOnStubOnLastEnabledSentry 497 } 498 } 499 latencies.stubBound.reset() 500 } 501 502 func sentryOnStubOnLastEnabledSentry(s *fastPathState) { 503 periodStubBoundMedian := latencies.stubBound.getMedian() 504 periodSentryBoundMedian := latencies.sentryBound.getMedian() 505 if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 { 506 return 507 } 508 509 latencies.stubBound.reset() 510 latencies.sentryBound.reset() 511 512 if s.shouldDisableSentryFP(periodStubBoundMedian, periodSentryBoundMedian) { 513 if s.disableSentryFP() { 514 s.curState = sentryOffStubOn 515 } 516 } else { 517 s.curState = sentryOnStubOn 518 s.sentryFPSuccess() 519 s.stubFPSuccess() 520 } 521 } 522 523 func sentryOnStubOnLastEnabledStub(s *fastPathState) { 524 periodStubBoundMedian := latencies.stubBound.getMedian() 525 periodSentryBoundMedian := latencies.sentryBound.getMedian() 526 if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 { 527 return 528 } 529 530 latencies.stubBound.reset() 531 latencies.sentryBound.reset() 532 533 if s.shouldDisableStubFP(periodStubBoundMedian, periodSentryBoundMedian) { 534 if s.disableStubFP() { 535 s.curState = sentryOnStubOff 536 } 537 } else { 538 s.curState = sentryOnStubOn 539 s.sentryFPSuccess() 540 s.stubFPSuccess() 541 } 542 } 543 544 func sentryOnStubOn(s *fastPathState) { 545 periodStubBoundMedian := latencies.stubBound.getMedian() 546 periodSentryBoundMedian := latencies.sentryBound.getMedian() 547 if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 { 548 return 549 } 550 551 latencies.stubBound.reset() 552 latencies.sentryBound.reset() 553 554 // Prioritize disabling stub fastpath over sentry fastpath, since sentry 555 // only spins with one thread. 556 if s.shouldDisableStubFP(periodStubBoundMedian, periodSentryBoundMedian) { 557 if s.disableStubFP() { 558 s.curState = sentryOnStubOff 559 } 560 } else if s.shouldDisableSentryFP(latencies.stubBound.getMedian(), latencies.sentryBound.getMedian()) { 561 if s.disableSentryFP() { 562 s.curState = sentryOffStubOn 563 } 564 } else { 565 s.sentryFPSuccess() 566 s.stubFPSuccess() 567 } 568 } 569 570 // Profiling metrics intended for debugging purposes. 571 var ( 572 numTimesSentryFastPathDisabled = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesSentryFastPathDisabled", false, "") 573 numTimesSentryFastPathEnabled = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesSentryFastPathEnabled", false, "") 574 numTimesStubFastPathDisabled = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesStubFastPathDisabled", false, "") 575 numTimesStubFastPathEnabled = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesStubFastPathEnabled", false, "") 576 numTimesStubKicked = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesStubKicked", false, "") 577 578 stubLatWithin1kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin1kUS", false, "") 579 stubLatWithin5kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin5kUS", false, "") 580 stubLatWithin10kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin10kUS", false, "") 581 stubLatWithin20kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin20kUS", false, "") 582 stubLatWithin40kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin40kUS", false, "") 583 stubLatGreater40kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatGreater40kUS", false, "") 584 585 sentryLatWithin1kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin1kUS", false, "") 586 sentryLatWithin5kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin5kUS", false, "") 587 sentryLatWithin10kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin10kUS", false, "") 588 sentryLatWithin20kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin20kUS", false, "") 589 sentryLatWithin40kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin40kUS", false, "") 590 sentryLatGreater40kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatGreater40kUS", false, "") 591 )