github.com/c12o16h1/go/src@v0.0.0-20200114212001-5a151c0f00ed/runtime/pprof/pprof_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build !js
     6  
     7  package pprof
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"github.com/c12o16h1/go/src/internal/testenv"
    14  	"io"
    15  	"io/ioutil"
    16  	"math/big"
    17  	"os"
    18  	"os/exec"
    19  	"regexp"
    20  	"runtime"
    21  	"runtime/pprof/internal/profile"
    22  	"strings"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  )
    28  
    29  func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
    30  	// We only need to get one 100 Hz clock tick, so we've got
    31  	// a large safety buffer.
    32  	// But do at least 500 iterations (which should take about 100ms),
    33  	// otherwise TestCPUProfileMultithreaded can fail if only one
    34  	// thread is scheduled during the testing period.
    35  	t0 := time.Now()
    36  	accum := *y
    37  	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
    38  		accum = f(accum)
    39  	}
    40  	*y = accum
    41  }
    42  
    43  var (
    44  	salt1 = 0
    45  	salt2 = 0
    46  )
    47  
    48  // The actual CPU hogging function.
    49  // Must not call other functions nor access heap/globals in the loop,
    50  // otherwise under race detector the samples will be in the race runtime.
    51  func cpuHog1(x int) int {
    52  	return cpuHog0(x, 1e5)
    53  }
    54  
    55  func cpuHog0(x, n int) int {
    56  	foo := x
    57  	for i := 0; i < n; i++ {
    58  		if foo > 0 {
    59  			foo *= foo
    60  		} else {
    61  			foo *= foo + 1
    62  		}
    63  	}
    64  	return foo
    65  }
    66  
    67  func cpuHog2(x int) int {
    68  	foo := x
    69  	for i := 0; i < 1e5; i++ {
    70  		if foo > 0 {
    71  			foo *= foo
    72  		} else {
    73  			foo *= foo + 2
    74  		}
    75  	}
    76  	return foo
    77  }
    78  
    79  // Return a list of functions that we don't want to ever appear in CPU
    80  // profiles. For gccgo, that list includes the sigprof handler itself.
    81  func avoidFunctions() []string {
    82  	if runtime.Compiler == "gccgo" {
    83  		return []string{"runtime.sigprof"}
    84  	}
    85  	return nil
    86  }
    87  
    88  func TestCPUProfile(t *testing.T) {
    89  	testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions(), func(dur time.Duration) {
    90  		cpuHogger(cpuHog1, &salt1, dur)
    91  	})
    92  }
    93  
    94  func TestCPUProfileMultithreaded(t *testing.T) {
    95  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
    96  	testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions(), func(dur time.Duration) {
    97  		c := make(chan int)
    98  		go func() {
    99  			cpuHogger(cpuHog1, &salt1, dur)
   100  			c <- 1
   101  		}()
   102  		cpuHogger(cpuHog2, &salt2, dur)
   103  		<-c
   104  	})
   105  }
   106  
   107  // containsInlinedCall reports whether the function body for the function f is
   108  // known to contain an inlined function call within the first maxBytes bytes.
   109  func containsInlinedCall(f interface{}, maxBytes int) bool {
   110  	_, found := findInlinedCall(f, maxBytes)
   111  	return found
   112  }
   113  
   114  // findInlinedCall returns the PC of an inlined function call within
   115  // the function body for the function f if any.
   116  func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) {
   117  	fFunc := runtime.FuncForPC(uintptr(funcPC(f)))
   118  	if fFunc == nil || fFunc.Entry() == 0 {
   119  		panic("failed to locate function entry")
   120  	}
   121  
   122  	for offset := 0; offset < maxBytes; offset++ {
   123  		innerPC := fFunc.Entry() + uintptr(offset)
   124  		inner := runtime.FuncForPC(innerPC)
   125  		if inner == nil {
   126  			// No function known for this PC value.
   127  			// It might simply be misaligned, so keep searching.
   128  			continue
   129  		}
   130  		if inner.Entry() != fFunc.Entry() {
   131  			// Scanned past f and didn't find any inlined functions.
   132  			break
   133  		}
   134  		if inner.Name() != fFunc.Name() {
   135  			// This PC has f as its entry-point, but is not f. Therefore, it must be a
   136  			// function inlined into f.
   137  			return uint64(innerPC), true
   138  		}
   139  	}
   140  
   141  	return 0, false
   142  }
   143  
   144  func TestCPUProfileInlining(t *testing.T) {
   145  	if !containsInlinedCall(inlinedCaller, 4<<10) {
   146  		t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
   147  	}
   148  
   149  	p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions(), func(dur time.Duration) {
   150  		cpuHogger(inlinedCaller, &salt1, dur)
   151  	})
   152  
   153  	// Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
   154  	for _, loc := range p.Location {
   155  		hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
   156  		for _, line := range loc.Line {
   157  			if line.Function.Name == "runtime/pprof.inlinedCallee" {
   158  				hasInlinedCallee = true
   159  			}
   160  			if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
   161  				hasInlinedCallerAfterInlinedCallee = true
   162  			}
   163  		}
   164  		if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
   165  			t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
   166  		}
   167  	}
   168  }
   169  
   170  func inlinedCaller(x int) int {
   171  	x = inlinedCallee(x, 1e5)
   172  	return x
   173  }
   174  
   175  func inlinedCallee(x, n int) int {
   176  	return cpuHog0(x, n)
   177  }
   178  
   179  func TestCPUProfileRecursion(t *testing.T) {
   180  	p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions(), func(dur time.Duration) {
   181  		cpuHogger(recursionCaller, &salt1, dur)
   182  	})
   183  
   184  	// check the Location encoding was not confused by recursive calls.
   185  	for i, loc := range p.Location {
   186  		recursionFunc := 0
   187  		for _, line := range loc.Line {
   188  			if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
   189  				recursionFunc++
   190  			}
   191  		}
   192  		if recursionFunc > 1 {
   193  			t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
   194  		}
   195  	}
   196  }
   197  
   198  func recursionCaller(x int) int {
   199  	y := recursionCallee(3, x)
   200  	return y
   201  }
   202  
   203  func recursionCallee(n, x int) int {
   204  	if n == 0 {
   205  		return 1
   206  	}
   207  	y := inlinedCallee(x, 1e4)
   208  	return y * recursionCallee(n-1, x)
   209  }
   210  
   211  func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
   212  	p, err := profile.Parse(bytes.NewReader(valBytes))
   213  	if err != nil {
   214  		t.Fatal(err)
   215  	}
   216  	for _, sample := range p.Sample {
   217  		count := uintptr(sample.Value[0])
   218  		f(count, sample.Location, sample.Label)
   219  	}
   220  	return p
   221  }
   222  
   223  // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
   224  // as interpreted by matches, and returns the parsed profile.
   225  func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []string, f func(dur time.Duration)) *profile.Profile {
   226  	switch runtime.GOOS {
   227  	case "darwin":
   228  		switch runtime.GOARCH {
   229  		case "arm", "arm64":
   230  			// nothing
   231  		default:
   232  			out, err := exec.Command("uname", "-a").CombinedOutput()
   233  			if err != nil {
   234  				t.Fatal(err)
   235  			}
   236  			vers := string(out)
   237  			t.Logf("uname -a: %v", vers)
   238  		}
   239  	case "plan9":
   240  		t.Skip("skipping on plan9")
   241  	}
   242  
   243  	broken := false
   244  	switch runtime.GOOS {
   245  	case "darwin", "dragonfly", "netbsd", "illumos", "solaris":
   246  		broken = true
   247  	case "openbsd":
   248  		if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
   249  			broken = true
   250  		}
   251  	}
   252  
   253  	maxDuration := 5 * time.Second
   254  	if testing.Short() && broken {
   255  		// If it's expected to be broken, no point waiting around.
   256  		maxDuration /= 10
   257  	}
   258  
   259  	// If we're running a long test, start with a long duration
   260  	// for tests that try to make sure something *doesn't* happen.
   261  	duration := 5 * time.Second
   262  	if testing.Short() {
   263  		duration = 100 * time.Millisecond
   264  	}
   265  
   266  	// Profiling tests are inherently flaky, especially on a
   267  	// loaded system, such as when this test is running with
   268  	// several others under go test std. If a test fails in a way
   269  	// that could mean it just didn't run long enough, try with a
   270  	// longer duration.
   271  	for duration <= maxDuration {
   272  		var prof bytes.Buffer
   273  		if err := StartCPUProfile(&prof); err != nil {
   274  			t.Fatal(err)
   275  		}
   276  		f(duration)
   277  		StopCPUProfile()
   278  
   279  		if p, ok := profileOk(t, matches, need, avoid, prof, duration); ok {
   280  			return p
   281  		}
   282  
   283  		duration *= 2
   284  		if duration <= maxDuration {
   285  			t.Logf("retrying with %s duration", duration)
   286  		}
   287  	}
   288  
   289  	if broken {
   290  		t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
   291  	}
   292  
   293  	// Ignore the failure if the tests are running in a QEMU-based emulator,
   294  	// QEMU is not perfect at emulating everything.
   295  	// IN_QEMU environmental variable is set by some of the Go builders.
   296  	// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
   297  	if os.Getenv("IN_QEMU") == "1" {
   298  		t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
   299  	}
   300  	t.FailNow()
   301  	return nil
   302  }
   303  
   304  func contains(slice []string, s string) bool {
   305  	for i := range slice {
   306  		if slice[i] == s {
   307  			return true
   308  		}
   309  	}
   310  	return false
   311  }
   312  
   313  // stackContains matches if a function named spec appears anywhere in the stack trace.
   314  func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   315  	for _, loc := range stk {
   316  		for _, line := range loc.Line {
   317  			if strings.Contains(line.Function.Name, spec) {
   318  				return true
   319  			}
   320  		}
   321  	}
   322  	return false
   323  }
   324  
   325  type matchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
   326  
   327  func profileOk(t *testing.T, matches matchFunc, need []string, avoid []string, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
   328  	ok = true
   329  
   330  	// Check that profile is well formed, contains 'need', and does not contain
   331  	// anything from 'avoid'.
   332  	have := make([]uintptr, len(need))
   333  	avoidSamples := make([]uintptr, len(avoid))
   334  	var samples uintptr
   335  	var buf bytes.Buffer
   336  	p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
   337  		fmt.Fprintf(&buf, "%d:", count)
   338  		fprintStack(&buf, stk)
   339  		samples += count
   340  		for i, spec := range need {
   341  			if matches(spec, count, stk, labels) {
   342  				have[i] += count
   343  			}
   344  		}
   345  		for i, name := range avoid {
   346  			for _, loc := range stk {
   347  				for _, line := range loc.Line {
   348  					if strings.Contains(line.Function.Name, name) {
   349  						avoidSamples[i] += count
   350  					}
   351  				}
   352  			}
   353  		}
   354  		fmt.Fprintf(&buf, "\n")
   355  	})
   356  	t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
   357  
   358  	if samples < 10 && runtime.GOOS == "windows" {
   359  		// On some windows machines we end up with
   360  		// not enough samples due to coarse timer
   361  		// resolution. Let it go.
   362  		t.Log("too few samples on Windows (golang.org/issue/10842)")
   363  		return p, false
   364  	}
   365  
   366  	// Check that we got a reasonable number of samples.
   367  	// We used to always require at least ideal/4 samples,
   368  	// but that is too hard to guarantee on a loaded system.
   369  	// Now we accept 10 or more samples, which we take to be
   370  	// enough to show that at least some profiling is occurring.
   371  	if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
   372  		t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
   373  		ok = false
   374  	}
   375  
   376  	for i, name := range avoid {
   377  		bad := avoidSamples[i]
   378  		if bad != 0 {
   379  			t.Logf("found %d samples in avoid-function %s\n", bad, name)
   380  			ok = false
   381  		}
   382  	}
   383  
   384  	if len(need) == 0 {
   385  		return p, ok
   386  	}
   387  
   388  	var total uintptr
   389  	for i, name := range need {
   390  		total += have[i]
   391  		t.Logf("%s: %d\n", name, have[i])
   392  	}
   393  	if total == 0 {
   394  		t.Logf("no samples in expected functions")
   395  		ok = false
   396  	}
   397  	// We'd like to check a reasonable minimum, like
   398  	// total / len(have) / smallconstant, but this test is
   399  	// pretty flaky (see bug 7095).  So we'll just test to
   400  	// make sure we got at least one sample.
   401  	min := uintptr(1)
   402  	for i, name := range need {
   403  		if have[i] < min {
   404  			t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
   405  			ok = false
   406  		}
   407  	}
   408  	return p, ok
   409  }
   410  
   411  // Fork can hang if preempted with signals frequently enough (see issue 5517).
   412  // Ensure that we do not do this.
   413  func TestCPUProfileWithFork(t *testing.T) {
   414  	testenv.MustHaveExec(t)
   415  
   416  	heap := 1 << 30
   417  	if runtime.GOOS == "android" {
   418  		// Use smaller size for Android to avoid crash.
   419  		heap = 100 << 20
   420  	}
   421  	if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
   422  		// Use smaller heap for Windows/ARM to avoid crash.
   423  		heap = 100 << 20
   424  	}
   425  	if testing.Short() {
   426  		heap = 100 << 20
   427  	}
   428  	// This makes fork slower.
   429  	garbage := make([]byte, heap)
   430  	// Need to touch the slice, otherwise it won't be paged in.
   431  	done := make(chan bool)
   432  	go func() {
   433  		for i := range garbage {
   434  			garbage[i] = 42
   435  		}
   436  		done <- true
   437  	}()
   438  	<-done
   439  
   440  	var prof bytes.Buffer
   441  	if err := StartCPUProfile(&prof); err != nil {
   442  		t.Fatal(err)
   443  	}
   444  	defer StopCPUProfile()
   445  
   446  	for i := 0; i < 10; i++ {
   447  		exec.Command(os.Args[0], "-h").CombinedOutput()
   448  	}
   449  }
   450  
   451  // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
   452  // If it did, it would see inconsistent state and would either record an incorrect stack
   453  // or crash because the stack was malformed.
   454  func TestGoroutineSwitch(t *testing.T) {
   455  	if runtime.Compiler == "gccgo" {
   456  		t.Skip("not applicable for gccgo")
   457  	}
   458  	// How much to try. These defaults take about 1 seconds
   459  	// on a 2012 MacBook Pro. The ones in short mode take
   460  	// about 0.1 seconds.
   461  	tries := 10
   462  	count := 1000000
   463  	if testing.Short() {
   464  		tries = 1
   465  	}
   466  	for try := 0; try < tries; try++ {
   467  		var prof bytes.Buffer
   468  		if err := StartCPUProfile(&prof); err != nil {
   469  			t.Fatal(err)
   470  		}
   471  		for i := 0; i < count; i++ {
   472  			runtime.Gosched()
   473  		}
   474  		StopCPUProfile()
   475  
   476  		// Read profile to look for entries for runtime.gogo with an attempt at a traceback.
   477  		// The special entry
   478  		parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
   479  			// An entry with two frames with 'System' in its top frame
   480  			// exists to record a PC without a traceback. Those are okay.
   481  			if len(stk) == 2 {
   482  				name := stk[1].Line[0].Function.Name
   483  				if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
   484  					return
   485  				}
   486  			}
   487  
   488  			// Otherwise, should not see runtime.gogo.
   489  			// The place we'd see it would be the inner most frame.
   490  			name := stk[0].Line[0].Function.Name
   491  			if name == "runtime.gogo" {
   492  				var buf bytes.Buffer
   493  				fprintStack(&buf, stk)
   494  				t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
   495  			}
   496  		})
   497  	}
   498  }
   499  
   500  func fprintStack(w io.Writer, stk []*profile.Location) {
   501  	for _, loc := range stk {
   502  		fmt.Fprintf(w, " %#x", loc.Address)
   503  		fmt.Fprintf(w, " (")
   504  		for i, line := range loc.Line {
   505  			if i > 0 {
   506  				fmt.Fprintf(w, " ")
   507  			}
   508  			fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
   509  		}
   510  		fmt.Fprintf(w, ")")
   511  	}
   512  	fmt.Fprintf(w, "\n")
   513  }
   514  
   515  // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
   516  func TestMathBigDivide(t *testing.T) {
   517  	testCPUProfile(t, nil, nil, nil, func(duration time.Duration) {
   518  		t := time.After(duration)
   519  		pi := new(big.Int)
   520  		for {
   521  			for i := 0; i < 100; i++ {
   522  				n := big.NewInt(2646693125139304345)
   523  				d := big.NewInt(842468587426513207)
   524  				pi.Div(n, d)
   525  			}
   526  			select {
   527  			case <-t:
   528  				return
   529  			default:
   530  			}
   531  		}
   532  	})
   533  }
   534  
   535  // stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
   536  func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   537  	for _, f := range strings.Split(spec, ",") {
   538  		if !stackContains(f, count, stk, labels) {
   539  			return false
   540  		}
   541  	}
   542  	return true
   543  }
   544  
   545  func TestMorestack(t *testing.T) {
   546  	testCPUProfile(t, stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions(), func(duration time.Duration) {
   547  		t := time.After(duration)
   548  		c := make(chan bool)
   549  		for {
   550  			go func() {
   551  				growstack1()
   552  				c <- true
   553  			}()
   554  			select {
   555  			case <-t:
   556  				return
   557  			case <-c:
   558  			}
   559  		}
   560  	})
   561  }
   562  
   563  //go:noinline
   564  func growstack1() {
   565  	growstack()
   566  }
   567  
   568  //go:noinline
   569  func growstack() {
   570  	var buf [8 << 10]byte
   571  	use(buf)
   572  }
   573  
   574  //go:noinline
   575  func use(x [8 << 10]byte) {}
   576  
   577  func TestBlockProfile(t *testing.T) {
   578  	type TestCase struct {
   579  		name string
   580  		f    func()
   581  		stk  []string
   582  		re   string
   583  	}
   584  	tests := [...]TestCase{
   585  		{
   586  			name: "chan recv",
   587  			f:    blockChanRecv,
   588  			stk: []string{
   589  				"runtime.chanrecv1",
   590  				"runtime/pprof.blockChanRecv",
   591  				"runtime/pprof.TestBlockProfile",
   592  			},
   593  			re: `
   594  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   595  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   596  #	0x[0-9a-f]+	runtime/pprof\.blockChanRecv\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   597  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   598  `},
   599  		{
   600  			name: "chan send",
   601  			f:    blockChanSend,
   602  			stk: []string{
   603  				"runtime.chansend1",
   604  				"runtime/pprof.blockChanSend",
   605  				"runtime/pprof.TestBlockProfile",
   606  			},
   607  			re: `
   608  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   609  #	0x[0-9a-f]+	runtime\.chansend1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   610  #	0x[0-9a-f]+	runtime/pprof\.blockChanSend\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   611  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   612  `},
   613  		{
   614  			name: "chan close",
   615  			f:    blockChanClose,
   616  			stk: []string{
   617  				"runtime.chanrecv1",
   618  				"runtime/pprof.blockChanClose",
   619  				"runtime/pprof.TestBlockProfile",
   620  			},
   621  			re: `
   622  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   623  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   624  #	0x[0-9a-f]+	runtime/pprof\.blockChanClose\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   625  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   626  `},
   627  		{
   628  			name: "select recv async",
   629  			f:    blockSelectRecvAsync,
   630  			stk: []string{
   631  				"runtime.selectgo",
   632  				"runtime/pprof.blockSelectRecvAsync",
   633  				"runtime/pprof.TestBlockProfile",
   634  			},
   635  			re: `
   636  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   637  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   638  #	0x[0-9a-f]+	runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   639  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   640  `},
   641  		{
   642  			name: "select send sync",
   643  			f:    blockSelectSendSync,
   644  			stk: []string{
   645  				"runtime.selectgo",
   646  				"runtime/pprof.blockSelectSendSync",
   647  				"runtime/pprof.TestBlockProfile",
   648  			},
   649  			re: `
   650  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   651  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   652  #	0x[0-9a-f]+	runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   653  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   654  `},
   655  		{
   656  			name: "mutex",
   657  			f:    blockMutex,
   658  			stk: []string{
   659  				"sync.(*Mutex).Lock",
   660  				"runtime/pprof.blockMutex",
   661  				"runtime/pprof.TestBlockProfile",
   662  			},
   663  			re: `
   664  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   665  #	0x[0-9a-f]+	sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+	.*/src/sync/mutex\.go:[0-9]+
   666  #	0x[0-9a-f]+	runtime/pprof\.blockMutex\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   667  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   668  `},
   669  		{
   670  			name: "cond",
   671  			f:    blockCond,
   672  			stk: []string{
   673  				"sync.(*Cond).Wait",
   674  				"runtime/pprof.blockCond",
   675  				"runtime/pprof.TestBlockProfile",
   676  			},
   677  			re: `
   678  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   679  #	0x[0-9a-f]+	sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+	.*/src/sync/cond\.go:[0-9]+
   680  #	0x[0-9a-f]+	runtime/pprof\.blockCond\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   681  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   682  `},
   683  	}
   684  
   685  	// Generate block profile
   686  	runtime.SetBlockProfileRate(1)
   687  	defer runtime.SetBlockProfileRate(0)
   688  	for _, test := range tests {
   689  		test.f()
   690  	}
   691  
   692  	t.Run("debug=1", func(t *testing.T) {
   693  		var w bytes.Buffer
   694  		Lookup("block").WriteTo(&w, 1)
   695  		prof := w.String()
   696  
   697  		if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
   698  			t.Fatalf("Bad profile header:\n%v", prof)
   699  		}
   700  
   701  		if strings.HasSuffix(prof, "#\t0x0\n\n") {
   702  			t.Errorf("Useless 0 suffix:\n%v", prof)
   703  		}
   704  
   705  		for _, test := range tests {
   706  			if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
   707  				t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
   708  			}
   709  		}
   710  	})
   711  
   712  	t.Run("proto", func(t *testing.T) {
   713  		// proto format
   714  		var w bytes.Buffer
   715  		Lookup("block").WriteTo(&w, 0)
   716  		p, err := profile.Parse(&w)
   717  		if err != nil {
   718  			t.Fatalf("failed to parse profile: %v", err)
   719  		}
   720  		t.Logf("parsed proto: %s", p)
   721  		if err := p.CheckValid(); err != nil {
   722  			t.Fatalf("invalid profile: %v", err)
   723  		}
   724  
   725  		stks := stacks(p)
   726  		for _, test := range tests {
   727  			if !containsStack(stks, test.stk) {
   728  				t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
   729  			}
   730  		}
   731  	})
   732  
   733  }
   734  
   735  func stacks(p *profile.Profile) (res [][]string) {
   736  	for _, s := range p.Sample {
   737  		var stk []string
   738  		for _, l := range s.Location {
   739  			for _, line := range l.Line {
   740  				stk = append(stk, line.Function.Name)
   741  			}
   742  		}
   743  		res = append(res, stk)
   744  	}
   745  	return res
   746  }
   747  
   748  func containsStack(got [][]string, want []string) bool {
   749  	for _, stk := range got {
   750  		if len(stk) < len(want) {
   751  			continue
   752  		}
   753  		for i, f := range want {
   754  			if f != stk[i] {
   755  				break
   756  			}
   757  			if i == len(want)-1 {
   758  				return true
   759  			}
   760  		}
   761  	}
   762  	return false
   763  }
   764  
   765  const blockDelay = 10 * time.Millisecond
   766  
   767  func blockChanRecv() {
   768  	c := make(chan bool)
   769  	go func() {
   770  		time.Sleep(blockDelay)
   771  		c <- true
   772  	}()
   773  	<-c
   774  }
   775  
   776  func blockChanSend() {
   777  	c := make(chan bool)
   778  	go func() {
   779  		time.Sleep(blockDelay)
   780  		<-c
   781  	}()
   782  	c <- true
   783  }
   784  
   785  func blockChanClose() {
   786  	c := make(chan bool)
   787  	go func() {
   788  		time.Sleep(blockDelay)
   789  		close(c)
   790  	}()
   791  	<-c
   792  }
   793  
   794  func blockSelectRecvAsync() {
   795  	const numTries = 3
   796  	c := make(chan bool, 1)
   797  	c2 := make(chan bool, 1)
   798  	go func() {
   799  		for i := 0; i < numTries; i++ {
   800  			time.Sleep(blockDelay)
   801  			c <- true
   802  		}
   803  	}()
   804  	for i := 0; i < numTries; i++ {
   805  		select {
   806  		case <-c:
   807  		case <-c2:
   808  		}
   809  	}
   810  }
   811  
   812  func blockSelectSendSync() {
   813  	c := make(chan bool)
   814  	c2 := make(chan bool)
   815  	go func() {
   816  		time.Sleep(blockDelay)
   817  		<-c
   818  	}()
   819  	select {
   820  	case c <- true:
   821  	case c2 <- true:
   822  	}
   823  }
   824  
   825  func blockMutex() {
   826  	var mu sync.Mutex
   827  	mu.Lock()
   828  	go func() {
   829  		time.Sleep(blockDelay)
   830  		mu.Unlock()
   831  	}()
   832  	// Note: Unlock releases mu before recording the mutex event,
   833  	// so it's theoretically possible for this to proceed and
   834  	// capture the profile before the event is recorded. As long
   835  	// as this is blocked before the unlock happens, it's okay.
   836  	mu.Lock()
   837  }
   838  
   839  func blockCond() {
   840  	var mu sync.Mutex
   841  	c := sync.NewCond(&mu)
   842  	mu.Lock()
   843  	go func() {
   844  		time.Sleep(blockDelay)
   845  		mu.Lock()
   846  		c.Signal()
   847  		mu.Unlock()
   848  	}()
   849  	c.Wait()
   850  	mu.Unlock()
   851  }
   852  
   853  func TestMutexProfile(t *testing.T) {
   854  	// Generate mutex profile
   855  
   856  	old := runtime.SetMutexProfileFraction(1)
   857  	defer runtime.SetMutexProfileFraction(old)
   858  	if old != 0 {
   859  		t.Fatalf("need MutexProfileRate 0, got %d", old)
   860  	}
   861  
   862  	blockMutex()
   863  
   864  	t.Run("debug=1", func(t *testing.T) {
   865  		var w bytes.Buffer
   866  		Lookup("mutex").WriteTo(&w, 1)
   867  		prof := w.String()
   868  		t.Logf("received profile: %v", prof)
   869  
   870  		if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
   871  			t.Errorf("Bad profile header:\n%v", prof)
   872  		}
   873  		prof = strings.Trim(prof, "\n")
   874  		lines := strings.Split(prof, "\n")
   875  		if len(lines) != 6 {
   876  			t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
   877  		}
   878  		if len(lines) < 6 {
   879  			return
   880  		}
   881  		// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
   882  		r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
   883  		//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
   884  		if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
   885  			t.Errorf("%q didn't match %q", lines[3], r2)
   886  		}
   887  		r3 := "^#.*runtime/pprof.blockMutex.*$"
   888  		if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
   889  			t.Errorf("%q didn't match %q", lines[5], r3)
   890  		}
   891  		t.Logf(prof)
   892  	})
   893  	t.Run("proto", func(t *testing.T) {
   894  		// proto format
   895  		var w bytes.Buffer
   896  		Lookup("mutex").WriteTo(&w, 0)
   897  		p, err := profile.Parse(&w)
   898  		if err != nil {
   899  			t.Fatalf("failed to parse profile: %v", err)
   900  		}
   901  		t.Logf("parsed proto: %s", p)
   902  		if err := p.CheckValid(); err != nil {
   903  			t.Fatalf("invalid profile: %v", err)
   904  		}
   905  
   906  		stks := stacks(p)
   907  		for _, want := range [][]string{
   908  			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
   909  		} {
   910  			if !containsStack(stks, want) {
   911  				t.Errorf("No matching stack entry for %+v", want)
   912  			}
   913  		}
   914  	})
   915  }
   916  
   917  func func1(c chan int) { <-c }
   918  func func2(c chan int) { <-c }
   919  func func3(c chan int) { <-c }
   920  func func4(c chan int) { <-c }
   921  
   922  func TestGoroutineCounts(t *testing.T) {
   923  	// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
   924  	// desired blocking point.
   925  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   926  
   927  	c := make(chan int)
   928  	for i := 0; i < 100; i++ {
   929  		switch {
   930  		case i%10 == 0:
   931  			go func1(c)
   932  		case i%2 == 0:
   933  			go func2(c)
   934  		default:
   935  			go func3(c)
   936  		}
   937  		// Let goroutines block on channel
   938  		for j := 0; j < 5; j++ {
   939  			runtime.Gosched()
   940  		}
   941  	}
   942  
   943  	var w bytes.Buffer
   944  	goroutineProf := Lookup("goroutine")
   945  
   946  	// Check debug profile
   947  	goroutineProf.WriteTo(&w, 1)
   948  	prof := w.String()
   949  
   950  	if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") {
   951  		t.Errorf("expected sorted goroutine counts:\n%s", prof)
   952  	}
   953  
   954  	// Check proto profile
   955  	w.Reset()
   956  	goroutineProf.WriteTo(&w, 0)
   957  	p, err := profile.Parse(&w)
   958  	if err != nil {
   959  		t.Errorf("error parsing protobuf profile: %v", err)
   960  	}
   961  	if err := p.CheckValid(); err != nil {
   962  		t.Errorf("protobuf profile is invalid: %v", err)
   963  	}
   964  	if !containsCounts(p, []int64{50, 40, 10, 1}) {
   965  		t.Errorf("expected count profile to contain goroutines with counts %v, got %v",
   966  			[]int64{50, 40, 10, 1}, p)
   967  	}
   968  
   969  	close(c)
   970  
   971  	time.Sleep(10 * time.Millisecond) // let goroutines exit
   972  }
   973  
   974  func containsInOrder(s string, all ...string) bool {
   975  	for _, t := range all {
   976  		i := strings.Index(s, t)
   977  		if i < 0 {
   978  			return false
   979  		}
   980  		s = s[i+len(t):]
   981  	}
   982  	return true
   983  }
   984  
   985  func containsCounts(prof *profile.Profile, counts []int64) bool {
   986  	m := make(map[int64]int)
   987  	for _, c := range counts {
   988  		m[c]++
   989  	}
   990  	for _, s := range prof.Sample {
   991  		// The count is the single value in the sample
   992  		if len(s.Value) != 1 {
   993  			return false
   994  		}
   995  		m[s.Value[0]]--
   996  	}
   997  	for _, n := range m {
   998  		if n > 0 {
   999  			return false
  1000  		}
  1001  	}
  1002  	return true
  1003  }
  1004  
  1005  var emptyCallStackTestRun int64
  1006  
  1007  // Issue 18836.
  1008  func TestEmptyCallStack(t *testing.T) {
  1009  	name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
  1010  	emptyCallStackTestRun++
  1011  
  1012  	t.Parallel()
  1013  	var buf bytes.Buffer
  1014  	p := NewProfile(name)
  1015  
  1016  	p.Add("foo", 47674)
  1017  	p.WriteTo(&buf, 1)
  1018  	p.Remove("foo")
  1019  	got := buf.String()
  1020  	prefix := name + " profile: total 1\n"
  1021  	if !strings.HasPrefix(got, prefix) {
  1022  		t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
  1023  	}
  1024  	lostevent := "lostProfileEvent"
  1025  	if !strings.Contains(got, lostevent) {
  1026  		t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
  1027  	}
  1028  }
  1029  
  1030  // stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
  1031  // and value and has funcname somewhere in the stack.
  1032  func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
  1033  	semi := strings.Index(spec, ";")
  1034  	if semi == -1 {
  1035  		panic("no semicolon in key/value spec")
  1036  	}
  1037  	kv := strings.SplitN(spec[semi+1:], "=", 2)
  1038  	if len(kv) != 2 {
  1039  		panic("missing = in key/value spec")
  1040  	}
  1041  	if !contains(labels[kv[0]], kv[1]) {
  1042  		return false
  1043  	}
  1044  	return stackContains(spec[:semi], count, stk, labels)
  1045  }
  1046  
  1047  func TestCPUProfileLabel(t *testing.T) {
  1048  	testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) {
  1049  		Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1050  			cpuHogger(cpuHog1, &salt1, dur)
  1051  		})
  1052  	})
  1053  }
  1054  
  1055  func TestLabelRace(t *testing.T) {
  1056  	// Test the race detector annotations for synchronization
  1057  	// between settings labels and consuming them from the
  1058  	// profile.
  1059  	testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil, func(dur time.Duration) {
  1060  		start := time.Now()
  1061  		var wg sync.WaitGroup
  1062  		for time.Since(start) < dur {
  1063  			var salts [10]int
  1064  			for i := 0; i < 10; i++ {
  1065  				wg.Add(1)
  1066  				go func(j int) {
  1067  					Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1068  						cpuHogger(cpuHog1, &salts[j], time.Millisecond)
  1069  					})
  1070  					wg.Done()
  1071  				}(i)
  1072  			}
  1073  			wg.Wait()
  1074  		}
  1075  	})
  1076  }
  1077  
  1078  // Check that there is no deadlock when the program receives SIGPROF while in
  1079  // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
  1080  func TestAtomicLoadStore64(t *testing.T) {
  1081  	f, err := ioutil.TempFile("", "profatomic")
  1082  	if err != nil {
  1083  		t.Fatalf("TempFile: %v", err)
  1084  	}
  1085  	defer os.Remove(f.Name())
  1086  	defer f.Close()
  1087  
  1088  	if err := StartCPUProfile(f); err != nil {
  1089  		t.Fatal(err)
  1090  	}
  1091  	defer StopCPUProfile()
  1092  
  1093  	var flag uint64
  1094  	done := make(chan bool, 1)
  1095  
  1096  	go func() {
  1097  		for atomic.LoadUint64(&flag) == 0 {
  1098  			runtime.Gosched()
  1099  		}
  1100  		done <- true
  1101  	}()
  1102  	time.Sleep(50 * time.Millisecond)
  1103  	atomic.StoreUint64(&flag, 1)
  1104  	<-done
  1105  }
  1106  
  1107  func TestTracebackAll(t *testing.T) {
  1108  	// With gccgo, if a profiling signal arrives at the wrong time
  1109  	// during traceback, it may crash or hang. See issue #29448.
  1110  	f, err := ioutil.TempFile("", "proftraceback")
  1111  	if err != nil {
  1112  		t.Fatalf("TempFile: %v", err)
  1113  	}
  1114  	defer os.Remove(f.Name())
  1115  	defer f.Close()
  1116  
  1117  	if err := StartCPUProfile(f); err != nil {
  1118  		t.Fatal(err)
  1119  	}
  1120  	defer StopCPUProfile()
  1121  
  1122  	ch := make(chan int)
  1123  	defer close(ch)
  1124  
  1125  	count := 10
  1126  	for i := 0; i < count; i++ {
  1127  		go func() {
  1128  			<-ch // block
  1129  		}()
  1130  	}
  1131  
  1132  	N := 10000
  1133  	if testing.Short() {
  1134  		N = 500
  1135  	}
  1136  	buf := make([]byte, 10*1024)
  1137  	for i := 0; i < N; i++ {
  1138  		runtime.Stack(buf, true)
  1139  	}
  1140  }
  1141  
  1142  // TestTryAdd tests the cases that's hard to test with real program execution.
  1143  // For example, the current go compilers may not inline functions involved in recursion
  1144  // but that may not be true in the future compilers. This tests such cases by
  1145  // using fake call sequences and forcing the profile build utilizing
  1146  // translateCPUProfile defined in proto_test.go
  1147  func TestTryAdd(t *testing.T) {
  1148  	inlinedCallerPtr := uint64(funcPC(inlinedCaller)) + 1
  1149  	inlinedCalleePtr, found := findInlinedCall(inlinedCaller, 4<<10)
  1150  	if !found {
  1151  		t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
  1152  	}
  1153  	inlinedCalleePtr += 1 // +1 to be safely inside of the function body.
  1154  
  1155  	period := int64(2000 * 1000) // 1/500*1e9 nanosec.
  1156  
  1157  	testCases := []struct {
  1158  		name        string
  1159  		input       []uint64          // following the input format assumed by profileBuilder.addCPUData.
  1160  		wantLocs    [][]string        // ordered location entries with function names.
  1161  		wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
  1162  	}{{
  1163  		name: "bug35538",
  1164  		input: []uint64{
  1165  			3, 0, 500, // hz = 500. Must match the period.
  1166  			7, 0, 10, inlinedCalleePtr, inlinedCallerPtr, inlinedCalleePtr, inlinedCallerPtr,
  1167  			5, 0, 20, inlinedCalleePtr, inlinedCallerPtr,
  1168  		},
  1169  		wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
  1170  		wantSamples: []*profile.Sample{
  1171  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
  1172  			{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
  1173  		},
  1174  	}, {
  1175  		name: "recursive_inlined_funcs",
  1176  		input: []uint64{
  1177  			3, 0, 500, // hz = 500. Must match the period.
  1178  			5, 0, 30, inlinedCalleePtr, inlinedCalleePtr,
  1179  			4, 0, 40, inlinedCalleePtr,
  1180  		},
  1181  		wantLocs: [][]string{{"runtime/pprof.inlinedCallee"}},
  1182  		wantSamples: []*profile.Sample{
  1183  			{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
  1184  			{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}}},
  1185  		},
  1186  	}, {
  1187  		name: "truncated_stack_trace_later",
  1188  		input: []uint64{
  1189  			3, 0, 500, // hz = 500. Must match the period.
  1190  			5, 0, 50, inlinedCalleePtr, inlinedCallerPtr,
  1191  			4, 0, 60, inlinedCalleePtr,
  1192  		},
  1193  		wantLocs: [][]string{{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}},
  1194  		wantSamples: []*profile.Sample{
  1195  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  1196  			{Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
  1197  		},
  1198  	}, {
  1199  		name: "truncated_stack_trace_first",
  1200  		input: []uint64{
  1201  			3, 0, 500, // hz = 500. Must match the period.
  1202  			4, 0, 70, inlinedCalleePtr,
  1203  			5, 0, 80, inlinedCalleePtr, inlinedCallerPtr,
  1204  		},
  1205  		wantLocs: [][]string{ // the inline info is screwed up, but better than a crash.
  1206  			{"runtime/pprof.inlinedCallee"},
  1207  			{"runtime/pprof.inlinedCaller"}},
  1208  		wantSamples: []*profile.Sample{
  1209  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1210  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
  1211  		},
  1212  	}}
  1213  
  1214  	for _, tc := range testCases {
  1215  		t.Run(tc.name, func(t *testing.T) {
  1216  			p, err := translateCPUProfile(tc.input)
  1217  			if err != nil {
  1218  				t.Fatalf("translating profile: %v", err)
  1219  			}
  1220  			t.Logf("Profile: %v\n", p)
  1221  
  1222  			// One location entry with all inlined functions.
  1223  			var gotLoc [][]string
  1224  			for _, loc := range p.Location {
  1225  				var names []string
  1226  				for _, line := range loc.Line {
  1227  					names = append(names, line.Function.Name)
  1228  				}
  1229  				gotLoc = append(gotLoc, names)
  1230  			}
  1231  			if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
  1232  				t.Errorf("Got Location = %+v\n\twant %+v", got, want)
  1233  			}
  1234  			// All samples should point to one location.
  1235  			var gotSamples []*profile.Sample
  1236  			for _, sample := range p.Sample {
  1237  				var locs []*profile.Location
  1238  				for _, loc := range sample.Location {
  1239  					locs = append(locs, &profile.Location{ID: loc.ID})
  1240  				}
  1241  				gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
  1242  			}
  1243  			if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
  1244  				t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
  1245  			}
  1246  		})
  1247  	}
  1248  }