github.com/metacubex/gvisor@v0.0.0-20240320004321-933faba989ec/pkg/sentry/platform/systrap/metrics.go (about)

     1  // Copyright 2023 The gVisor Authors.
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package systrap
    16  
    17  import (
    18  	"time"
    19  
    20  	"github.com/metacubex/gvisor/pkg/atomicbitops"
    21  	"github.com/metacubex/gvisor/pkg/hostarch"
    22  )
    23  
    24  // This file contains all logic related to context switch latency metrics.
    25  //
    26  // Latency metrics are the main method by which fastpath for both stub threads
    27  // and the sentry is enabled and disabled. We measure latency in CPU cycles.
    28  //
    29  // The high level overview of metric collection looks like this:
    30  //   1a) When a context is switched from the sentry to the stub, the sentry
    31  //   records the time it was put into the context queue.
    32  //   1b) When a stub thread picks up the context from the context queue, the stub
    33  //   thread records the time when it's about to switch back to user code.
    34  //   Getting the diff between these timestamps gives us the stub-bound latency.
    35  //
    36  //   2a) When a stub thread gives back a context to the sentry for handling,
    37  //   it records the time just before notifying the sentry task goroutine.
    38  //   2b) When the task goroutine sees that it has been notified, it records the
    39  //   time.
    40  //   Getting the diff between these timestamps gives us the sentry-bound latency.
    41  //
    42  //   3) Both latencies are recorded at once via recordLatency(). This means
    43  //   there is a delay on getting stubBoundLatencies. In practice this should not
    44  //   matter that much due to our relatively large latency measurement periods.
    45  //
    46  //   There is a bucket array for each latency type, where each bucket is of size
    47  //   `bucketIncrements`. Latencies are collected in time periods of length
    48  //   `recordingPeriod`, and  measurements for the current period are stored
    49  //   in the `latencies` variable.
    50  
    51  type latencyBuckets [numLatencyBuckets]atomicbitops.Uint64
    52  type cpuTicks uint64
    53  
    54  const (
    55  	numLatencyBuckets = 80
    56  	bucketIncrements  = 2048
    57  
    58  	// minNecessaryRecordings defines the minimum amount of recordings we
    59  	// want to see in latencyBuckets in order to get a reasonable median.
    60  	minNecessaryRecordings = 5
    61  )
    62  
    63  // latencyRecorder is used to collect latency metrics.
    64  type latencyRecorder struct {
    65  	stubBound   latencyBuckets
    66  	sentryBound latencyBuckets
    67  }
    68  
    69  // latencies stores the latency counts for the current measurement period.
    70  var latencies latencyRecorder
    71  
    72  // record increments the correct bucket assigned to the given latency l.
    73  //
    74  //go:nosplit
    75  func (b *latencyBuckets) record(l cpuTicks) {
    76  	bucket := l / bucketIncrements
    77  	if bucket >= numLatencyBuckets {
    78  		bucket = numLatencyBuckets - 1
    79  	}
    80  	b[bucket].Add(1)
    81  }
    82  
    83  // getMedian returns a latency measure in the range of
    84  // [bucketIncrements, numLatencyBuckets * bucketIncrements], or 0 if unable to
    85  // find a median in the latencyBuckets.
    86  func (b *latencyBuckets) getMedian() cpuTicks {
    87  	i := 0
    88  	j := numLatencyBuckets - 1
    89  	var totalForwards, totalBackwards uint64
    90  	for i <= j {
    91  		if totalForwards < totalBackwards {
    92  			totalForwards += b[i].Load()
    93  			i++
    94  		} else {
    95  			totalBackwards += b[j].Load()
    96  			j--
    97  		}
    98  	}
    99  	if totalForwards+totalBackwards < minNecessaryRecordings {
   100  		return 0
   101  	}
   102  	return cpuTicks(max(uint64(i), 1) * bucketIncrements)
   103  }
   104  
   105  // merge combines two latencyBuckets instances.
   106  func (b *latencyBuckets) merge(other *latencyBuckets) {
   107  	for i := 0; i < numLatencyBuckets; i++ {
   108  		b[i].Add(other[i].Load())
   109  	}
   110  }
   111  
   112  // reset zeroes all buckets.
   113  func (b *latencyBuckets) reset() {
   114  	for i := 0; i < numLatencyBuckets; i++ {
   115  		b[i].Store(0)
   116  	}
   117  }
   118  
   119  // recordLatency records the latency of both the sentry->stub and the
   120  // stub->sentry context switches.
   121  // For the stub->sentry context switch, the final timestamp is taken by this
   122  // function.
   123  // Preconditions:
   124  //   - ctx.isAcked() is true.
   125  //
   126  //go:nosplit
   127  func (sc *sharedContext) recordLatency() {
   128  	// Record stub->sentry latency.
   129  	sentryBoundLatency := sc.getStateChangedTimeDiff()
   130  	if sentryBoundLatency != 0 {
   131  		latencies.sentryBound.record(sentryBoundLatency)
   132  	}
   133  
   134  	// Record sentry->stub latency.
   135  	stubBoundLatency := sc.getAckedTimeDiff()
   136  	if stubBoundLatency != 0 {
   137  		latencies.stubBound.record(stubBoundLatency)
   138  	}
   139  
   140  	updateDebugMetrics(stubBoundLatency, sentryBoundLatency)
   141  }
   142  
   143  // When a measurement period ends, the latencies are used to determine the fast
   144  // path state. Fastpath is independently enabled for both the sentry and stub
   145  // threads, and is modeled as the following state machine:
   146  //
   147  //                  +----------StubFPOff,SentryFPOff-------+
   148  //                  |          ^                  ^        |
   149  //                  V          |                  |        V
   150  //      +-->StubFPOn,SentryFPOff                StubFPOff,SentryFPOn<--+
   151  //      |        |     ^                                 |     ^       |
   152  //      |        V     |                                 V     |       |
   153  //      |   StubFPOn,SentryFPOn                 StubFPOn,SentryFPOn    |
   154  //      |   LastEnabledSentryFP                   LastEnabledStubFP    |
   155  //      |           |                                       |          |
   156  //      |           |                                       |          |
   157  //      |           +---------> StubFPOn,SentryFPOn <-------+          |
   158  //      |                              |   |                           |
   159  //      |______________________________|   |___________________________|
   160  //
   161  // The default state is to have both stub and sentry fastpath OFF.
   162  // A state transition to enable one fastpath is done when
   163  // fpState.(stub|sentry)FPBackoff reaches 0. (stub|sentry)FPBackoff is
   164  // decremented every recording period that the corresponding fastpath is
   165  // disabled.
   166  // A state transition to disable one fastpath is decided through the predicates
   167  // shouldDisableStubFP or shouldDisableSentryFP, and activated with
   168  // disableStubFP or disableSentryFP.
   169  //
   170  // Why have 3 states for both FPs being ON? The logic behind that is to do with
   171  // the fact that fastpaths are interdependent. Enabling one fastpath can have
   172  // negative effects on the latency metrics of the other in the event that there
   173  // are not enough CPUs to run the fastpath. So it's very possible that the system
   174  // finds itself in a state where it's beneficial to run one fastpath but not the
   175  // other based on the workload it's doing. For this case, we need to remember
   176  // what the last stable state was to return to, because the metrics will likely
   177  // be bad enough for both sides to be eligible for being disabled.
   178  //
   179  // Once the system establishes that having both the stub and sentry fastpath ON
   180  // is acceptable, it does prioritize disabling stub fastpath over disabling
   181  // sentry fastpath, because the sentry fastpath at most takes one thread to spin.
   182  
   183  const (
   184  	recordingPeriod                = 400 * time.Microsecond
   185  	fastPathBackoffMin             = 2
   186  	maxRecentFPFailures            = 9
   187  	numConsecutiveFailsToDisableFP = 2
   188  )
   189  
   190  // fastPathState is used to keep track of long term metrics that span beyond
   191  // one measurement period.
   192  type fastPathState struct {
   193  	// stubBoundBaselineLatency and sentryBoundBaselineLatency record all
   194  	// latency measures recorded during periods when their respective
   195  	// fastpath was OFF.
   196  	stubBoundBaselineLatency   latencyBuckets
   197  	sentryBoundBaselineLatency latencyBuckets
   198  
   199  	// stubFPBackoff and sentryFPBackoff are the periods remaining until
   200  	// the system attempts to use the fastpath again.
   201  	stubFPBackoff   int
   202  	sentryFPBackoff int
   203  
   204  	// stubFPRecentFailures and sentryFPRecentFailures are counters in the
   205  	// range [0, maxRecentFPFailures] that are incremented by
   206  	// disable(Stub|Sentry)FP and decremented by (stub|sentry)FPSuccess.
   207  	// They are used to set the backoffs.
   208  	stubFPRecentFailures   int
   209  	sentryFPRecentFailures int
   210  
   211  	consecutiveStubFPFailures   int
   212  	consecutiveSentryFPFailures int
   213  
   214  	_ [hostarch.CacheLineSize]byte
   215  	// stubFastPathEnabled is a global flag referenced in other parts of
   216  	// systrap to determine if the stub fast path is enabled or not.
   217  	stubFastPathEnabled atomicbitops.Bool
   218  
   219  	_ [hostarch.CacheLineSize]byte
   220  	// sentryFastPathEnabled is a global flag referenced in other parts of
   221  	// systrap to determine if the sentry fastpath is enabled or not.
   222  	sentryFastPathEnabled atomicbitops.Bool
   223  
   224  	_ [hostarch.CacheLineSize]byte
   225  	// nrMaxAwakeStubThreads is the maximum number of awake stub threads over
   226  	// all subprocesses at the this moment.
   227  	nrMaxAwakeStubThreads atomicbitops.Uint32
   228  
   229  	// usedStubFastPath and usedSentryFastPath are reset every recording
   230  	// period, and are populated in case the system actually used the
   231  	// fastpath (i.e. stub or dispatcher spun for some time without work).
   232  	_                  [hostarch.CacheLineSize]byte
   233  	usedStubFastPath   atomicbitops.Bool
   234  	_                  [hostarch.CacheLineSize]byte
   235  	usedSentryFastPath atomicbitops.Bool
   236  
   237  	_ [hostarch.CacheLineSize]byte
   238  	// curState is the current fastpath state function, which is called at
   239  	// the end of every recording period.
   240  	curState func(*fastPathState)
   241  }
   242  
   243  var (
   244  	fastpath = fastPathState{
   245  		stubFPBackoff:   fastPathBackoffMin,
   246  		sentryFPBackoff: fastPathBackoffMin,
   247  		curState:        sentryOffStubOff,
   248  	}
   249  
   250  	// fastPathContextLimit is the maximum number of contexts after which the fast
   251  	// path in stub threads is disabled. Its value can be higher than the number of
   252  	// CPU-s, because the Sentry is running with higher priority than stub threads,
   253  	// deepSleepTimeout is much shorter than the Linux scheduler timeslice, so the
   254  	// only thing that matters here is whether the Sentry handles syscall faster
   255  	// than the overhead of scheduling another stub thread.
   256  	fastPathContextLimit = uint32(maxSysmsgThreads * 2)
   257  )
   258  
   259  // controlFastPath is used to spawn a goroutine when creating the Systrap
   260  // platform.
   261  func controlFastPath() {
   262  	for {
   263  		time.Sleep(recordingPeriod)
   264  
   265  		fastpath.curState(&fastpath)
   266  		// Reset FP trackers.
   267  		fastpath.usedStubFastPath.Store(false)
   268  		fastpath.usedSentryFastPath.Store(false)
   269  	}
   270  }
   271  
   272  // getBackoff returns the number of recording periods that fastpath should remain
   273  // disabled for, based on the num of recentFailures.
   274  func getBackoff(recentFailures int) int {
   275  	return 1 << recentFailures
   276  }
   277  
   278  //go:nosplit
   279  func (s *fastPathState) sentryFastPath() bool {
   280  	return s.sentryFastPathEnabled.Load()
   281  }
   282  
   283  //go:nosplit
   284  func (s *fastPathState) stubFastPath() bool {
   285  	return s.stubFastPathEnabled.Load() && (s.nrMaxAwakeStubThreads.Load() <= fastPathContextLimit)
   286  }
   287  
   288  // enableSentryFP is a wrapper to unconditionally enable sentry FP and increment
   289  // a debug metric.
   290  func (s *fastPathState) enableSentryFP() {
   291  	s.sentryFastPathEnabled.Store(true)
   292  	numTimesSentryFastPathEnabled.Increment()
   293  }
   294  
   295  // disableSentryFP returns true if the sentry fastpath was able to be disabled.
   296  //
   297  // It takes two calls to disableSentryFP without any calls to sentryFPSuccess in
   298  // between to disable the sentry fastpath. This is done in order to mitigate the
   299  // effects of outlier measures due to rdtsc inaccuracies.
   300  func (s *fastPathState) disableSentryFP() bool {
   301  	s.consecutiveSentryFPFailures++
   302  	if s.consecutiveSentryFPFailures < numConsecutiveFailsToDisableFP {
   303  		return false
   304  	}
   305  	s.consecutiveSentryFPFailures = 0
   306  	s.sentryFastPathEnabled.Store(false)
   307  	numTimesSentryFastPathDisabled.Increment()
   308  
   309  	s.sentryFPBackoff = getBackoff(s.sentryFPRecentFailures)
   310  	s.sentryFPRecentFailures = min(maxRecentFPFailures, s.sentryFPRecentFailures+1)
   311  	return true
   312  }
   313  
   314  // enableStubFP is a wrapper to unconditionally enable stub FP and increment
   315  // a debug metric.
   316  func (s *fastPathState) enableStubFP() {
   317  	s.stubFastPathEnabled.Store(true)
   318  	numTimesStubFastPathEnabled.Increment()
   319  }
   320  
   321  // disableStubFP returns true if the stub fastpath was able to be disabled.
   322  //
   323  // It takes two calls to disableStubFP without any calls to stubFPSuccess in
   324  // between to disable the stub fastpath. This is done in order to mitigate the
   325  // effects of outlier measures due to rdtsc inaccuracies.
   326  func (s *fastPathState) disableStubFP() bool {
   327  	s.consecutiveStubFPFailures++
   328  	if s.consecutiveStubFPFailures < numConsecutiveFailsToDisableFP {
   329  		return false
   330  	}
   331  	s.consecutiveStubFPFailures = 0
   332  	s.stubFastPathEnabled.Store(false)
   333  	numTimesStubFastPathDisabled.Increment()
   334  
   335  	s.stubFPBackoff = getBackoff(s.stubFPRecentFailures)
   336  	s.stubFPRecentFailures = min(maxRecentFPFailures, s.stubFPRecentFailures+1)
   337  	return true
   338  }
   339  
   340  func (s *fastPathState) sentryFPSuccess() {
   341  	s.sentryFPRecentFailures = max(0, s.sentryFPRecentFailures-1)
   342  	s.consecutiveSentryFPFailures = 0
   343  }
   344  
   345  func (s *fastPathState) stubFPSuccess() {
   346  	s.stubFPRecentFailures = max(0, s.stubFPRecentFailures-1)
   347  	s.consecutiveStubFPFailures = 0
   348  }
   349  
   350  // shouldDisableSentryFP returns true if the metrics indicate sentry fastpath
   351  // should be disabled.
   352  func (s *fastPathState) shouldDisableSentryFP(stubMedian, sentryMedian cpuTicks) bool {
   353  	if !s.usedSentryFastPath.Load() {
   354  		return false
   355  	}
   356  	stubBaseline := s.stubBoundBaselineLatency.getMedian()
   357  	sentryBaseline := s.sentryBoundBaselineLatency.getMedian()
   358  	if sentryMedian < sentryBaseline {
   359  		// Assume the number of productive stubs is the core count on the
   360  		// system, not counting the 1 core taken by the dispatcher for
   361  		// the fast path.
   362  		n := cpuTicks(maxSysmsgThreads - 1)
   363  		// If the sentry fastpath is causing the stub latency to be
   364  		// higher than normal, the point at which it's considered to be
   365  		// too high is when the time saved via the sentry fastpath is
   366  		// less than the time lost via higher stub latency (with some
   367  		// error margin). Assume that all possible stub threads are
   368  		// active for this comparison.
   369  		diff := (sentryBaseline - sentryMedian) * n
   370  		errorMargin := stubBaseline / 8
   371  		return (stubMedian > stubBaseline) && (stubMedian-stubBaseline) > (diff+errorMargin)
   372  	}
   373  	// Running the fastpath resulted in higher sentry latency than baseline?
   374  	// This does not happen often, but it is an indication that the fastpath
   375  	// wasn't used to full effect: for example the dispatcher kept changing,
   376  	// and that there was not enough CPU to place a new dispatcher fast
   377  	// enough.
   378  	//
   379  	// If there isn't enough CPU we will most likely see large stub latency
   380  	// regressions, and should disable the fastpath.
   381  	return stubMedian > (stubBaseline + stubBaseline/2)
   382  }
   383  
   384  // shouldDisableStubFP returns true if the metrics indicate stub fastpath should
   385  // be disabled.
   386  func (s *fastPathState) shouldDisableStubFP(stubMedian, sentryMedian cpuTicks) bool {
   387  	if !s.usedStubFastPath.Load() {
   388  		return false
   389  	}
   390  	stubBaseline := s.stubBoundBaselineLatency.getMedian()
   391  	sentryBaseline := s.sentryBoundBaselineLatency.getMedian()
   392  	if stubMedian < stubBaseline {
   393  		// If the stub fastpath is causing the sentry latency to be
   394  		// higher than normal, the point at which it's considered to be
   395  		// too high is when the time saved via the stub fastpath is
   396  		// less than the time lost via higher sentry latency (with some
   397  		// error margin). Unlike the stub latency, the sentry latency is
   398  		// largely dependent on one thread (the dispatcher).
   399  		diff := stubBaseline - stubMedian
   400  		errorMargin := sentryBaseline / 8
   401  		return (sentryMedian > sentryBaseline) && (sentryMedian-sentryBaseline) > (diff+errorMargin)
   402  	}
   403  	// Running the fastpath resulted in higher stub latency than baseline?
   404  	// This is either an indication that there isn't enough CPU to schedule
   405  	// stub threads to run the fastpath, or the user workload has changed to
   406  	// be such that it returns less often to the sentry.
   407  	//
   408  	// If there isn't enough CPU we will most likely see large sentry latency
   409  	// regressions, and should disable the fastpath.
   410  	return sentryMedian > (sentryBaseline + sentryBaseline/2)
   411  }
   412  
   413  // The following functions are used for state transitions in the sentry/stub
   414  // fastpath state machine described above.
   415  
   416  func sentryOffStubOff(s *fastPathState) {
   417  	periodStubBoundMedian := latencies.stubBound.getMedian()
   418  	s.stubBoundBaselineLatency.merge(&latencies.stubBound)
   419  	latencies.stubBound.reset()
   420  	if periodStubBoundMedian != 0 {
   421  		s.stubFPBackoff = max(s.stubFPBackoff-1, 0)
   422  	}
   423  
   424  	periodSentryBoundMedian := latencies.sentryBound.getMedian()
   425  	s.sentryBoundBaselineLatency.merge(&latencies.sentryBound)
   426  	latencies.sentryBound.reset()
   427  	if periodSentryBoundMedian != 0 {
   428  		s.sentryFPBackoff = max(s.sentryFPBackoff-1, 0)
   429  	}
   430  
   431  	if s.sentryFPBackoff == 0 {
   432  		s.enableSentryFP()
   433  		s.curState = sentryOnStubOff
   434  	} else if s.stubFPBackoff == 0 {
   435  		s.enableStubFP()
   436  		s.curState = sentryOffStubOn
   437  	}
   438  }
   439  
   440  func sentryOnStubOff(s *fastPathState) {
   441  	periodStubBoundMedian := latencies.stubBound.getMedian()
   442  	periodSentryBoundMedian := latencies.sentryBound.getMedian()
   443  	if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 {
   444  		return
   445  	}
   446  
   447  	if s.shouldDisableSentryFP(periodStubBoundMedian, periodSentryBoundMedian) {
   448  		if s.disableSentryFP() {
   449  			s.curState = sentryOffStubOff
   450  		}
   451  	} else {
   452  		s.sentryFPSuccess()
   453  		// If we are going to keep sentry FP on that means stub latency
   454  		// was fine; update the baseline.
   455  		s.stubBoundBaselineLatency.merge(&latencies.stubBound)
   456  		latencies.stubBound.reset()
   457  		s.stubFPBackoff = max(s.stubFPBackoff-1, 0)
   458  		if s.stubFPBackoff == 0 {
   459  			s.enableStubFP()
   460  			s.curState = sentryOnStubOnLastEnabledStub
   461  		}
   462  	}
   463  	latencies.sentryBound.reset()
   464  }
   465  
   466  func sentryOffStubOn(s *fastPathState) {
   467  	periodStubBoundMedian := latencies.stubBound.getMedian()
   468  	periodSentryBoundMedian := latencies.sentryBound.getMedian()
   469  	if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 {
   470  		return
   471  	}
   472  
   473  	if s.shouldDisableStubFP(periodStubBoundMedian, periodSentryBoundMedian) {
   474  		if s.disableStubFP() {
   475  			s.curState = sentryOffStubOff
   476  		}
   477  	} else {
   478  		s.stubFPSuccess()
   479  
   480  		s.sentryBoundBaselineLatency.merge(&latencies.sentryBound)
   481  		latencies.sentryBound.reset()
   482  		s.sentryFPBackoff = max(s.sentryFPBackoff-1, 0)
   483  		if s.sentryFPBackoff == 0 {
   484  			s.enableSentryFP()
   485  			s.curState = sentryOnStubOnLastEnabledSentry
   486  		}
   487  	}
   488  	latencies.stubBound.reset()
   489  }
   490  
   491  func sentryOnStubOnLastEnabledSentry(s *fastPathState) {
   492  	periodStubBoundMedian := latencies.stubBound.getMedian()
   493  	periodSentryBoundMedian := latencies.sentryBound.getMedian()
   494  	if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 {
   495  		return
   496  	}
   497  
   498  	latencies.stubBound.reset()
   499  	latencies.sentryBound.reset()
   500  
   501  	if s.shouldDisableSentryFP(periodStubBoundMedian, periodSentryBoundMedian) {
   502  		if s.disableSentryFP() {
   503  			s.curState = sentryOffStubOn
   504  		}
   505  	} else {
   506  		s.curState = sentryOnStubOn
   507  		s.sentryFPSuccess()
   508  		s.stubFPSuccess()
   509  	}
   510  }
   511  
   512  func sentryOnStubOnLastEnabledStub(s *fastPathState) {
   513  	periodStubBoundMedian := latencies.stubBound.getMedian()
   514  	periodSentryBoundMedian := latencies.sentryBound.getMedian()
   515  	if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 {
   516  		return
   517  	}
   518  
   519  	latencies.stubBound.reset()
   520  	latencies.sentryBound.reset()
   521  
   522  	if s.shouldDisableStubFP(periodStubBoundMedian, periodSentryBoundMedian) {
   523  		if s.disableStubFP() {
   524  			s.curState = sentryOnStubOff
   525  		}
   526  	} else {
   527  		s.curState = sentryOnStubOn
   528  		s.sentryFPSuccess()
   529  		s.stubFPSuccess()
   530  	}
   531  }
   532  
   533  func sentryOnStubOn(s *fastPathState) {
   534  	periodStubBoundMedian := latencies.stubBound.getMedian()
   535  	periodSentryBoundMedian := latencies.sentryBound.getMedian()
   536  	if periodStubBoundMedian == 0 || periodSentryBoundMedian == 0 {
   537  		return
   538  	}
   539  
   540  	latencies.stubBound.reset()
   541  	latencies.sentryBound.reset()
   542  
   543  	// Prioritize disabling stub fastpath over sentry fastpath, since sentry
   544  	// only spins with one thread.
   545  	if s.shouldDisableStubFP(periodStubBoundMedian, periodSentryBoundMedian) {
   546  		if s.disableStubFP() {
   547  			s.curState = sentryOnStubOff
   548  		}
   549  	} else if s.shouldDisableSentryFP(latencies.stubBound.getMedian(), latencies.sentryBound.getMedian()) {
   550  		if s.disableSentryFP() {
   551  			s.curState = sentryOffStubOn
   552  		}
   553  	} else {
   554  		s.sentryFPSuccess()
   555  		s.stubFPSuccess()
   556  	}
   557  }
   558  
   559  // Profiling metrics intended for debugging purposes.
   560  var (
   561  	numTimesSentryFastPathDisabled = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesSentryFastPathDisabled", false, "")
   562  	numTimesSentryFastPathEnabled  = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesSentryFastPathEnabled", false, "")
   563  	numTimesStubFastPathDisabled   = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesStubFastPathDisabled", false, "")
   564  	numTimesStubFastPathEnabled    = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesStubFastPathEnabled", false, "")
   565  	numTimesStubKicked             = SystrapProfiling.MustCreateNewUint64Metric("/systrap/numTimesStubKicked", false, "")
   566  
   567  	stubLatWithin1kUS   = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin1kUS", false, "")
   568  	stubLatWithin5kUS   = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin5kUS", false, "")
   569  	stubLatWithin10kUS  = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin10kUS", false, "")
   570  	stubLatWithin20kUS  = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin20kUS", false, "")
   571  	stubLatWithin40kUS  = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatWithin40kUS", false, "")
   572  	stubLatGreater40kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/stubLatGreater40kUS", false, "")
   573  
   574  	sentryLatWithin1kUS   = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin1kUS", false, "")
   575  	sentryLatWithin5kUS   = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin5kUS", false, "")
   576  	sentryLatWithin10kUS  = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin10kUS", false, "")
   577  	sentryLatWithin20kUS  = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin20kUS", false, "")
   578  	sentryLatWithin40kUS  = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatWithin40kUS", false, "")
   579  	sentryLatGreater40kUS = SystrapProfiling.MustCreateNewUint64Metric("/systrap/sentryLatGreater40kUS", false, "")
   580  )