github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/metrics_test.go (about)

     1  // Copyright 2020 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"reflect"
     9  	"runtime"
    10  	"runtime/metrics"
    11  	"sort"
    12  	"strings"
    13  	"sync"
    14  	"testing"
    15  	"time"
    16  	"unsafe"
    17  )
    18  
    19  func prepareAllMetricsSamples() (map[string]metrics.Description, []metrics.Sample) {
    20  	all := metrics.All()
    21  	samples := make([]metrics.Sample, len(all))
    22  	descs := make(map[string]metrics.Description)
    23  	for i := range all {
    24  		samples[i].Name = all[i].Name
    25  		descs[all[i].Name] = all[i]
    26  	}
    27  	return descs, samples
    28  }
    29  
    30  func TestReadMetrics(t *testing.T) {
    31  	// Tests whether readMetrics produces values aligning
    32  	// with ReadMemStats while the world is stopped.
    33  	var mstats runtime.MemStats
    34  	_, samples := prepareAllMetricsSamples()
    35  	runtime.ReadMetricsSlow(&mstats, unsafe.Pointer(&samples[0]), len(samples), cap(samples))
    36  
    37  	checkUint64 := func(t *testing.T, m string, got, want uint64) {
    38  		t.Helper()
    39  		if got != want {
    40  			t.Errorf("metric %q: got %d, want %d", m, got, want)
    41  		}
    42  	}
    43  
    44  	// Check to make sure the values we read line up with other values we read.
    45  	var allocsBySize *metrics.Float64Histogram
    46  	var tinyAllocs uint64
    47  	var mallocs, frees uint64
    48  	for i := range samples {
    49  		switch name := samples[i].Name; name {
    50  		case "/cgo/go-to-c-calls:calls":
    51  			checkUint64(t, name, samples[i].Value.Uint64(), uint64(runtime.NumCgoCall()))
    52  		case "/memory/classes/heap/free:bytes":
    53  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapIdle-mstats.HeapReleased)
    54  		case "/memory/classes/heap/released:bytes":
    55  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapReleased)
    56  		case "/memory/classes/heap/objects:bytes":
    57  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapAlloc)
    58  		case "/memory/classes/heap/unused:bytes":
    59  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapInuse-mstats.HeapAlloc)
    60  		case "/memory/classes/heap/stacks:bytes":
    61  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.StackInuse)
    62  		case "/memory/classes/metadata/mcache/free:bytes":
    63  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.MCacheSys-mstats.MCacheInuse)
    64  		case "/memory/classes/metadata/mcache/inuse:bytes":
    65  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.MCacheInuse)
    66  		case "/memory/classes/metadata/mspan/free:bytes":
    67  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.MSpanSys-mstats.MSpanInuse)
    68  		case "/memory/classes/metadata/mspan/inuse:bytes":
    69  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.MSpanInuse)
    70  		case "/memory/classes/metadata/other:bytes":
    71  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.GCSys)
    72  		case "/memory/classes/os-stacks:bytes":
    73  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.StackSys-mstats.StackInuse)
    74  		case "/memory/classes/other:bytes":
    75  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.OtherSys)
    76  		case "/memory/classes/profiling/buckets:bytes":
    77  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.BuckHashSys)
    78  		case "/memory/classes/total:bytes":
    79  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.Sys)
    80  		case "/gc/heap/allocs-by-size:bytes":
    81  			hist := samples[i].Value.Float64Histogram()
    82  			// Skip size class 0 in BySize, because it's always empty and not represented
    83  			// in the histogram.
    84  			for i, sc := range mstats.BySize[1:] {
    85  				if b, s := hist.Buckets[i+1], float64(sc.Size+1); b != s {
    86  					t.Errorf("bucket does not match size class: got %f, want %f", b, s)
    87  					// The rest of the checks aren't expected to work anyway.
    88  					continue
    89  				}
    90  				if c, m := hist.Counts[i], sc.Mallocs; c != m {
    91  					t.Errorf("histogram counts do not much BySize for class %d: got %d, want %d", i, c, m)
    92  				}
    93  			}
    94  			allocsBySize = hist
    95  		case "/gc/heap/allocs:bytes":
    96  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.TotalAlloc)
    97  		case "/gc/heap/frees-by-size:bytes":
    98  			hist := samples[i].Value.Float64Histogram()
    99  			// Skip size class 0 in BySize, because it's always empty and not represented
   100  			// in the histogram.
   101  			for i, sc := range mstats.BySize[1:] {
   102  				if b, s := hist.Buckets[i+1], float64(sc.Size+1); b != s {
   103  					t.Errorf("bucket does not match size class: got %f, want %f", b, s)
   104  					// The rest of the checks aren't expected to work anyway.
   105  					continue
   106  				}
   107  				if c, f := hist.Counts[i], sc.Frees; c != f {
   108  					t.Errorf("histogram counts do not match BySize for class %d: got %d, want %d", i, c, f)
   109  				}
   110  			}
   111  		case "/gc/heap/frees:bytes":
   112  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.TotalAlloc-mstats.HeapAlloc)
   113  		case "/gc/heap/tiny/allocs:objects":
   114  			// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
   115  			// The reason for this is because MemStats couldn't be extended at the time
   116  			// but there was a desire to have Mallocs at least be a little more representative,
   117  			// while having Mallocs - Frees still represent a live object count.
   118  			// Unfortunately, MemStats doesn't actually export a large allocation count,
   119  			// so it's impossible to pull this number out directly.
   120  			//
   121  			// Check tiny allocation count outside of this loop, by using the allocs-by-size
   122  			// histogram in order to figure out how many large objects there are.
   123  			tinyAllocs = samples[i].Value.Uint64()
   124  			// Because the next two metrics tests are checking against Mallocs and Frees,
   125  			// we can't check them directly for the same reason: we need to account for tiny
   126  			// allocations included in Mallocs and Frees.
   127  		case "/gc/heap/allocs:objects":
   128  			mallocs = samples[i].Value.Uint64()
   129  		case "/gc/heap/frees:objects":
   130  			frees = samples[i].Value.Uint64()
   131  		case "/gc/heap/objects:objects":
   132  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.HeapObjects)
   133  		case "/gc/heap/goal:bytes":
   134  			checkUint64(t, name, samples[i].Value.Uint64(), mstats.NextGC)
   135  		case "/gc/cycles/automatic:gc-cycles":
   136  			checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumGC-mstats.NumForcedGC))
   137  		case "/gc/cycles/forced:gc-cycles":
   138  			checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumForcedGC))
   139  		case "/gc/cycles/total:gc-cycles":
   140  			checkUint64(t, name, samples[i].Value.Uint64(), uint64(mstats.NumGC))
   141  		}
   142  	}
   143  
   144  	// Check tinyAllocs.
   145  	nonTinyAllocs := uint64(0)
   146  	for _, c := range allocsBySize.Counts {
   147  		nonTinyAllocs += c
   148  	}
   149  	checkUint64(t, "/gc/heap/tiny/allocs:objects", tinyAllocs, mstats.Mallocs-nonTinyAllocs)
   150  
   151  	// Check allocation and free counts.
   152  	checkUint64(t, "/gc/heap/allocs:objects", mallocs, mstats.Mallocs-tinyAllocs)
   153  	checkUint64(t, "/gc/heap/frees:objects", frees, mstats.Frees-tinyAllocs)
   154  }
   155  
   156  func TestReadMetricsConsistency(t *testing.T) {
   157  	// Tests whether readMetrics produces consistent, sensible values.
   158  	// The values are read concurrently with the runtime doing other
   159  	// things (e.g. allocating) so what we read can't reasonably compared
   160  	// to other runtime values (e.g. MemStats).
   161  
   162  	// Run a few GC cycles to get some of the stats to be non-zero.
   163  	runtime.GC()
   164  	runtime.GC()
   165  	runtime.GC()
   166  
   167  	// Set GOMAXPROCS high then sleep briefly to ensure we generate
   168  	// some idle time.
   169  	oldmaxprocs := runtime.GOMAXPROCS(10)
   170  	time.Sleep(time.Millisecond)
   171  	runtime.GOMAXPROCS(oldmaxprocs)
   172  
   173  	// Read all the supported metrics through the metrics package.
   174  	descs, samples := prepareAllMetricsSamples()
   175  	metrics.Read(samples)
   176  
   177  	// Check to make sure the values we read make sense.
   178  	var totalVirtual struct {
   179  		got, want uint64
   180  	}
   181  	var objects struct {
   182  		alloc, free             *metrics.Float64Histogram
   183  		allocs, frees           uint64
   184  		allocdBytes, freedBytes uint64
   185  		total, totalBytes       uint64
   186  	}
   187  	var gc struct {
   188  		numGC  uint64
   189  		pauses uint64
   190  	}
   191  	var cpu struct {
   192  		gcAssist    float64
   193  		gcDedicated float64
   194  		gcIdle      float64
   195  		gcPause     float64
   196  		gcTotal     float64
   197  
   198  		idle float64
   199  		user float64
   200  
   201  		scavengeAssist float64
   202  		scavengeBg     float64
   203  		scavengeTotal  float64
   204  
   205  		total float64
   206  	}
   207  	for i := range samples {
   208  		kind := samples[i].Value.Kind()
   209  		if want := descs[samples[i].Name].Kind; kind != want {
   210  			t.Errorf("supported metric %q has unexpected kind: got %d, want %d", samples[i].Name, kind, want)
   211  			continue
   212  		}
   213  		if samples[i].Name != "/memory/classes/total:bytes" && strings.HasPrefix(samples[i].Name, "/memory/classes") {
   214  			v := samples[i].Value.Uint64()
   215  			totalVirtual.want += v
   216  
   217  			// None of these stats should ever get this big.
   218  			// If they do, there's probably overflow involved,
   219  			// usually due to bad accounting.
   220  			if int64(v) < 0 {
   221  				t.Errorf("%q has high/negative value: %d", samples[i].Name, v)
   222  			}
   223  		}
   224  		switch samples[i].Name {
   225  		case "/cpu/classes/gc/mark/assist:cpu-seconds":
   226  			cpu.gcAssist = samples[i].Value.Float64()
   227  		case "/cpu/classes/gc/mark/dedicated:cpu-seconds":
   228  			cpu.gcDedicated = samples[i].Value.Float64()
   229  		case "/cpu/classes/gc/mark/idle:cpu-seconds":
   230  			cpu.gcIdle = samples[i].Value.Float64()
   231  		case "/cpu/classes/gc/pause:cpu-seconds":
   232  			cpu.gcPause = samples[i].Value.Float64()
   233  		case "/cpu/classes/gc/total:cpu-seconds":
   234  			cpu.gcTotal = samples[i].Value.Float64()
   235  		case "/cpu/classes/idle:cpu-seconds":
   236  			cpu.idle = samples[i].Value.Float64()
   237  		case "/cpu/classes/scavenge/assist:cpu-seconds":
   238  			cpu.scavengeAssist = samples[i].Value.Float64()
   239  		case "/cpu/classes/scavenge/background:cpu-seconds":
   240  			cpu.scavengeBg = samples[i].Value.Float64()
   241  		case "/cpu/classes/scavenge/total:cpu-seconds":
   242  			cpu.scavengeTotal = samples[i].Value.Float64()
   243  		case "/cpu/classes/total:cpu-seconds":
   244  			cpu.total = samples[i].Value.Float64()
   245  		case "/cpu/classes/user:cpu-seconds":
   246  			cpu.user = samples[i].Value.Float64()
   247  		case "/memory/classes/total:bytes":
   248  			totalVirtual.got = samples[i].Value.Uint64()
   249  		case "/memory/classes/heap/objects:bytes":
   250  			objects.totalBytes = samples[i].Value.Uint64()
   251  		case "/gc/heap/objects:objects":
   252  			objects.total = samples[i].Value.Uint64()
   253  		case "/gc/heap/allocs:bytes":
   254  			objects.allocdBytes = samples[i].Value.Uint64()
   255  		case "/gc/heap/allocs:objects":
   256  			objects.allocs = samples[i].Value.Uint64()
   257  		case "/gc/heap/allocs-by-size:bytes":
   258  			objects.alloc = samples[i].Value.Float64Histogram()
   259  		case "/gc/heap/frees:bytes":
   260  			objects.freedBytes = samples[i].Value.Uint64()
   261  		case "/gc/heap/frees:objects":
   262  			objects.frees = samples[i].Value.Uint64()
   263  		case "/gc/heap/frees-by-size:bytes":
   264  			objects.free = samples[i].Value.Float64Histogram()
   265  		case "/gc/cycles:gc-cycles":
   266  			gc.numGC = samples[i].Value.Uint64()
   267  		case "/gc/pauses:seconds":
   268  			h := samples[i].Value.Float64Histogram()
   269  			gc.pauses = 0
   270  			for i := range h.Counts {
   271  				gc.pauses += h.Counts[i]
   272  			}
   273  		case "/sched/gomaxprocs:threads":
   274  			if got, want := samples[i].Value.Uint64(), uint64(runtime.GOMAXPROCS(-1)); got != want {
   275  				t.Errorf("gomaxprocs doesn't match runtime.GOMAXPROCS: got %d, want %d", got, want)
   276  			}
   277  		case "/sched/goroutines:goroutines":
   278  			if samples[i].Value.Uint64() < 1 {
   279  				t.Error("number of goroutines is less than one")
   280  			}
   281  		}
   282  	}
   283  	// Only check this on Linux where we can be reasonably sure we have a high-resolution timer.
   284  	if runtime.GOOS == "linux" {
   285  		if cpu.gcDedicated <= 0 && cpu.gcAssist <= 0 && cpu.gcIdle <= 0 {
   286  			t.Errorf("found no time spent on GC work: %#v", cpu)
   287  		}
   288  		if cpu.gcPause <= 0 {
   289  			t.Errorf("found no GC pauses: %f", cpu.gcPause)
   290  		}
   291  		if cpu.idle <= 0 {
   292  			t.Errorf("found no idle time: %f", cpu.idle)
   293  		}
   294  		if total := cpu.gcDedicated + cpu.gcAssist + cpu.gcIdle + cpu.gcPause; !withinEpsilon(cpu.gcTotal, total, 0.01) {
   295  			t.Errorf("calculated total GC CPU not within 1%% of sampled total: %f vs. %f", total, cpu.gcTotal)
   296  		}
   297  		if total := cpu.scavengeAssist + cpu.scavengeBg; !withinEpsilon(cpu.scavengeTotal, total, 0.01) {
   298  			t.Errorf("calculated total scavenge CPU not within 1%% of sampled total: %f vs. %f", total, cpu.scavengeTotal)
   299  		}
   300  		if cpu.total <= 0 {
   301  			t.Errorf("found no total CPU time passed")
   302  		}
   303  		if cpu.user <= 0 {
   304  			t.Errorf("found no user time passed")
   305  		}
   306  		if total := cpu.gcTotal + cpu.scavengeTotal + cpu.user + cpu.idle; !withinEpsilon(cpu.total, total, 0.02) {
   307  			t.Errorf("calculated total CPU not within 2%% of sampled total: %f vs. %f", total, cpu.total)
   308  		}
   309  	}
   310  	if totalVirtual.got != totalVirtual.want {
   311  		t.Errorf(`"/memory/classes/total:bytes" does not match sum of /memory/classes/**: got %d, want %d`, totalVirtual.got, totalVirtual.want)
   312  	}
   313  	if got, want := objects.allocs-objects.frees, objects.total; got != want {
   314  		t.Errorf("mismatch between object alloc/free tallies and total: got %d, want %d", got, want)
   315  	}
   316  	if got, want := objects.allocdBytes-objects.freedBytes, objects.totalBytes; got != want {
   317  		t.Errorf("mismatch between object alloc/free tallies and total: got %d, want %d", got, want)
   318  	}
   319  	if b, c := len(objects.alloc.Buckets), len(objects.alloc.Counts); b != c+1 {
   320  		t.Errorf("allocs-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c)
   321  	}
   322  	if b, c := len(objects.free.Buckets), len(objects.free.Counts); b != c+1 {
   323  		t.Errorf("frees-by-size has wrong bucket or counts length: %d buckets, %d counts", b, c)
   324  	}
   325  	if len(objects.alloc.Buckets) != len(objects.free.Buckets) {
   326  		t.Error("allocs-by-size and frees-by-size buckets don't match in length")
   327  	} else if len(objects.alloc.Counts) != len(objects.free.Counts) {
   328  		t.Error("allocs-by-size and frees-by-size counts don't match in length")
   329  	} else {
   330  		for i := range objects.alloc.Buckets {
   331  			ba := objects.alloc.Buckets[i]
   332  			bf := objects.free.Buckets[i]
   333  			if ba != bf {
   334  				t.Errorf("bucket %d is different for alloc and free hists: %f != %f", i, ba, bf)
   335  			}
   336  		}
   337  		if !t.Failed() {
   338  			var gotAlloc, gotFree uint64
   339  			want := objects.total
   340  			for i := range objects.alloc.Counts {
   341  				if objects.alloc.Counts[i] < objects.free.Counts[i] {
   342  					t.Errorf("found more allocs than frees in object dist bucket %d", i)
   343  					continue
   344  				}
   345  				gotAlloc += objects.alloc.Counts[i]
   346  				gotFree += objects.free.Counts[i]
   347  			}
   348  			if got := gotAlloc - gotFree; got != want {
   349  				t.Errorf("object distribution counts don't match count of live objects: got %d, want %d", got, want)
   350  			}
   351  			if gotAlloc != objects.allocs {
   352  				t.Errorf("object distribution counts don't match total allocs: got %d, want %d", gotAlloc, objects.allocs)
   353  			}
   354  			if gotFree != objects.frees {
   355  				t.Errorf("object distribution counts don't match total allocs: got %d, want %d", gotFree, objects.frees)
   356  			}
   357  		}
   358  	}
   359  	// The current GC has at least 2 pauses per GC.
   360  	// Check to see if that value makes sense.
   361  	if gc.pauses < gc.numGC*2 {
   362  		t.Errorf("fewer pauses than expected: got %d, want at least %d", gc.pauses, gc.numGC*2)
   363  	}
   364  }
   365  
   366  func BenchmarkReadMetricsLatency(b *testing.B) {
   367  	stop := applyGCLoad(b)
   368  
   369  	// Spend this much time measuring latencies.
   370  	latencies := make([]time.Duration, 0, 1024)
   371  	_, samples := prepareAllMetricsSamples()
   372  
   373  	// Hit metrics.Read continuously and measure.
   374  	b.ResetTimer()
   375  	for i := 0; i < b.N; i++ {
   376  		start := time.Now()
   377  		metrics.Read(samples)
   378  		latencies = append(latencies, time.Since(start))
   379  	}
   380  	// Make sure to stop the timer before we wait! The load created above
   381  	// is very heavy-weight and not easy to stop, so we could end up
   382  	// confusing the benchmarking framework for small b.N.
   383  	b.StopTimer()
   384  	stop()
   385  
   386  	// Disable the default */op metrics.
   387  	// ns/op doesn't mean anything because it's an average, but we
   388  	// have a sleep in our b.N loop above which skews this significantly.
   389  	b.ReportMetric(0, "ns/op")
   390  	b.ReportMetric(0, "B/op")
   391  	b.ReportMetric(0, "allocs/op")
   392  
   393  	// Sort latencies then report percentiles.
   394  	sort.Slice(latencies, func(i, j int) bool {
   395  		return latencies[i] < latencies[j]
   396  	})
   397  	b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
   398  	b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
   399  	b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
   400  }
   401  
   402  var readMetricsSink [1024]interface{}
   403  
   404  func TestReadMetricsCumulative(t *testing.T) {
   405  	// Set up the set of metrics marked cumulative.
   406  	descs := metrics.All()
   407  	var samples [2][]metrics.Sample
   408  	samples[0] = make([]metrics.Sample, len(descs))
   409  	samples[1] = make([]metrics.Sample, len(descs))
   410  	total := 0
   411  	for i := range samples[0] {
   412  		if !descs[i].Cumulative {
   413  			continue
   414  		}
   415  		samples[0][total].Name = descs[i].Name
   416  		total++
   417  	}
   418  	samples[0] = samples[0][:total]
   419  	samples[1] = samples[1][:total]
   420  	copy(samples[1], samples[0])
   421  
   422  	// Start some noise in the background.
   423  	var wg sync.WaitGroup
   424  	wg.Add(1)
   425  	done := make(chan struct{})
   426  	go func() {
   427  		defer wg.Done()
   428  		for {
   429  			// Add more things here that could influence metrics.
   430  			for i := 0; i < len(readMetricsSink); i++ {
   431  				readMetricsSink[i] = make([]byte, 1024)
   432  				select {
   433  				case <-done:
   434  					return
   435  				default:
   436  				}
   437  			}
   438  			runtime.GC()
   439  		}
   440  	}()
   441  
   442  	sum := func(us []uint64) uint64 {
   443  		total := uint64(0)
   444  		for _, u := range us {
   445  			total += u
   446  		}
   447  		return total
   448  	}
   449  
   450  	// Populate the first generation.
   451  	metrics.Read(samples[0])
   452  
   453  	// Check to make sure that these metrics only grow monotonically.
   454  	for gen := 1; gen < 10; gen++ {
   455  		metrics.Read(samples[gen%2])
   456  		for i := range samples[gen%2] {
   457  			name := samples[gen%2][i].Name
   458  			vNew, vOld := samples[gen%2][i].Value, samples[1-(gen%2)][i].Value
   459  
   460  			switch vNew.Kind() {
   461  			case metrics.KindUint64:
   462  				new := vNew.Uint64()
   463  				old := vOld.Uint64()
   464  				if new < old {
   465  					t.Errorf("%s decreased: %d < %d", name, new, old)
   466  				}
   467  			case metrics.KindFloat64:
   468  				new := vNew.Float64()
   469  				old := vOld.Float64()
   470  				if new < old {
   471  					t.Errorf("%s decreased: %f < %f", name, new, old)
   472  				}
   473  			case metrics.KindFloat64Histogram:
   474  				new := sum(vNew.Float64Histogram().Counts)
   475  				old := sum(vOld.Float64Histogram().Counts)
   476  				if new < old {
   477  					t.Errorf("%s counts decreased: %d < %d", name, new, old)
   478  				}
   479  			}
   480  		}
   481  	}
   482  	close(done)
   483  
   484  	wg.Wait()
   485  }
   486  
   487  func withinEpsilon(v1, v2, e float64) bool {
   488  	return v2-v2*e <= v1 && v1 <= v2+v2*e
   489  }
   490  
   491  func TestMutexWaitTimeMetric(t *testing.T) {
   492  	var sample [1]metrics.Sample
   493  	sample[0].Name = "/sync/mutex/wait/total:seconds"
   494  
   495  	locks := []locker2{
   496  		new(mutex),
   497  		new(rwmutexWrite),
   498  		new(rwmutexReadWrite),
   499  		new(rwmutexWriteRead),
   500  	}
   501  	for _, lock := range locks {
   502  		t.Run(reflect.TypeOf(lock).Elem().Name(), func(t *testing.T) {
   503  			metrics.Read(sample[:])
   504  			before := time.Duration(sample[0].Value.Float64() * 1e9)
   505  
   506  			minMutexWaitTime := generateMutexWaitTime(lock)
   507  
   508  			metrics.Read(sample[:])
   509  			after := time.Duration(sample[0].Value.Float64() * 1e9)
   510  
   511  			if wt := after - before; wt < minMutexWaitTime {
   512  				t.Errorf("too little mutex wait time: got %s, want %s", wt, minMutexWaitTime)
   513  			}
   514  		})
   515  	}
   516  }
   517  
   518  // locker2 represents an API surface of two concurrent goroutines
   519  // locking the same resource, but through different APIs. It's intended
   520  // to abstract over the relationship of two Lock calls or an RLock
   521  // and a Lock call.
   522  type locker2 interface {
   523  	Lock1()
   524  	Unlock1()
   525  	Lock2()
   526  	Unlock2()
   527  }
   528  
   529  type mutex struct {
   530  	mu sync.Mutex
   531  }
   532  
   533  func (m *mutex) Lock1()   { m.mu.Lock() }
   534  func (m *mutex) Unlock1() { m.mu.Unlock() }
   535  func (m *mutex) Lock2()   { m.mu.Lock() }
   536  func (m *mutex) Unlock2() { m.mu.Unlock() }
   537  
   538  type rwmutexWrite struct {
   539  	mu sync.RWMutex
   540  }
   541  
   542  func (m *rwmutexWrite) Lock1()   { m.mu.Lock() }
   543  func (m *rwmutexWrite) Unlock1() { m.mu.Unlock() }
   544  func (m *rwmutexWrite) Lock2()   { m.mu.Lock() }
   545  func (m *rwmutexWrite) Unlock2() { m.mu.Unlock() }
   546  
   547  type rwmutexReadWrite struct {
   548  	mu sync.RWMutex
   549  }
   550  
   551  func (m *rwmutexReadWrite) Lock1()   { m.mu.RLock() }
   552  func (m *rwmutexReadWrite) Unlock1() { m.mu.RUnlock() }
   553  func (m *rwmutexReadWrite) Lock2()   { m.mu.Lock() }
   554  func (m *rwmutexReadWrite) Unlock2() { m.mu.Unlock() }
   555  
   556  type rwmutexWriteRead struct {
   557  	mu sync.RWMutex
   558  }
   559  
   560  func (m *rwmutexWriteRead) Lock1()   { m.mu.Lock() }
   561  func (m *rwmutexWriteRead) Unlock1() { m.mu.Unlock() }
   562  func (m *rwmutexWriteRead) Lock2()   { m.mu.RLock() }
   563  func (m *rwmutexWriteRead) Unlock2() { m.mu.RUnlock() }
   564  
   565  // generateMutexWaitTime causes a couple of goroutines
   566  // to block a whole bunch of times on a sync.Mutex, returning
   567  // the minimum amount of time that should be visible in the
   568  // /sync/mutex-wait:seconds metric.
   569  func generateMutexWaitTime(mu locker2) time.Duration {
   570  	// Set up the runtime to always track casgstatus transitions for metrics.
   571  	*runtime.CasGStatusAlwaysTrack = true
   572  
   573  	mu.Lock1()
   574  
   575  	// Start up a goroutine to wait on the lock.
   576  	gc := make(chan *runtime.G)
   577  	done := make(chan bool)
   578  	go func() {
   579  		gc <- runtime.Getg()
   580  
   581  		for {
   582  			mu.Lock2()
   583  			mu.Unlock2()
   584  			if <-done {
   585  				return
   586  			}
   587  		}
   588  	}()
   589  	gp := <-gc
   590  
   591  	// Set the block time high enough so that it will always show up, even
   592  	// on systems with coarse timer granularity.
   593  	const blockTime = 100 * time.Millisecond
   594  
   595  	// Make sure the goroutine spawned above actually blocks on the lock.
   596  	for {
   597  		if runtime.GIsWaitingOnMutex(gp) {
   598  			break
   599  		}
   600  		runtime.Gosched()
   601  	}
   602  
   603  	// Let some amount of time pass.
   604  	time.Sleep(blockTime)
   605  
   606  	// Let the other goroutine acquire the lock.
   607  	mu.Unlock1()
   608  	done <- true
   609  
   610  	// Reset flag.
   611  	*runtime.CasGStatusAlwaysTrack = false
   612  	return blockTime
   613  }