github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/pprof/pprof_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build !js
     6  
     7  package pprof
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"internal/profile"
    14  	"internal/testenv"
    15  	"io"
    16  	"math/big"
    17  	"os"
    18  	"os/exec"
    19  	"regexp"
    20  	"runtime"
    21  	"strings"
    22  	"sync"
    23  	"sync/atomic"
    24  	"testing"
    25  	"time"
    26  )
    27  
    28  func cpuHogger(f func(x int) int, y *int, dur time.Duration) {
    29  	// We only need to get one 100 Hz clock tick, so we've got
    30  	// a large safety buffer.
    31  	// But do at least 500 iterations (which should take about 100ms),
    32  	// otherwise TestCPUProfileMultithreaded can fail if only one
    33  	// thread is scheduled during the testing period.
    34  	t0 := time.Now()
    35  	accum := *y
    36  	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
    37  		accum = f(accum)
    38  	}
    39  	*y = accum
    40  }
    41  
    42  var (
    43  	salt1 = 0
    44  	salt2 = 0
    45  )
    46  
    47  // The actual CPU hogging function.
    48  // Must not call other functions nor access heap/globals in the loop,
    49  // otherwise under race detector the samples will be in the race runtime.
    50  func cpuHog1(x int) int {
    51  	return cpuHog0(x, 1e5)
    52  }
    53  
    54  func cpuHog0(x, n int) int {
    55  	foo := x
    56  	for i := 0; i < n; i++ {
    57  		if foo > 0 {
    58  			foo *= foo
    59  		} else {
    60  			foo *= foo + 1
    61  		}
    62  	}
    63  	return foo
    64  }
    65  
    66  func cpuHog2(x int) int {
    67  	foo := x
    68  	for i := 0; i < 1e5; i++ {
    69  		if foo > 0 {
    70  			foo *= foo
    71  		} else {
    72  			foo *= foo + 2
    73  		}
    74  	}
    75  	return foo
    76  }
    77  
    78  // Return a list of functions that we don't want to ever appear in CPU
    79  // profiles. For gccgo, that list includes the sigprof handler itself.
    80  func avoidFunctions() []string {
    81  	if runtime.Compiler == "gccgo" {
    82  		return []string{"runtime.sigprof"}
    83  	}
    84  	return nil
    85  }
    86  
    87  func TestCPUProfile(t *testing.T) {
    88  	testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1"}, avoidFunctions(), func(dur time.Duration) {
    89  		cpuHogger(cpuHog1, &salt1, dur)
    90  	})
    91  }
    92  
    93  func TestCPUProfileMultithreaded(t *testing.T) {
    94  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
    95  	testCPUProfile(t, stackContains, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, avoidFunctions(), func(dur time.Duration) {
    96  		c := make(chan int)
    97  		go func() {
    98  			cpuHogger(cpuHog1, &salt1, dur)
    99  			c <- 1
   100  		}()
   101  		cpuHogger(cpuHog2, &salt2, dur)
   102  		<-c
   103  	})
   104  }
   105  
   106  // containsInlinedCall reports whether the function body for the function f is
   107  // known to contain an inlined function call within the first maxBytes bytes.
   108  func containsInlinedCall(f interface{}, maxBytes int) bool {
   109  	_, found := findInlinedCall(f, maxBytes)
   110  	return found
   111  }
   112  
   113  // findInlinedCall returns the PC of an inlined function call within
   114  // the function body for the function f if any.
   115  func findInlinedCall(f interface{}, maxBytes int) (pc uint64, found bool) {
   116  	fFunc := runtime.FuncForPC(uintptr(funcPC(f)))
   117  	if fFunc == nil || fFunc.Entry() == 0 {
   118  		panic("failed to locate function entry")
   119  	}
   120  
   121  	for offset := 0; offset < maxBytes; offset++ {
   122  		innerPC := fFunc.Entry() + uintptr(offset)
   123  		inner := runtime.FuncForPC(innerPC)
   124  		if inner == nil {
   125  			// No function known for this PC value.
   126  			// It might simply be misaligned, so keep searching.
   127  			continue
   128  		}
   129  		if inner.Entry() != fFunc.Entry() {
   130  			// Scanned past f and didn't find any inlined functions.
   131  			break
   132  		}
   133  		if inner.Name() != fFunc.Name() {
   134  			// This PC has f as its entry-point, but is not f. Therefore, it must be a
   135  			// function inlined into f.
   136  			return uint64(innerPC), true
   137  		}
   138  	}
   139  
   140  	return 0, false
   141  }
   142  
   143  func TestCPUProfileInlining(t *testing.T) {
   144  	if !containsInlinedCall(inlinedCaller, 4<<10) {
   145  		t.Skip("Can't determine whether inlinedCallee was inlined into inlinedCaller.")
   146  	}
   147  
   148  	p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, avoidFunctions(), func(dur time.Duration) {
   149  		cpuHogger(inlinedCaller, &salt1, dur)
   150  	})
   151  
   152  	// Check if inlined function locations are encoded correctly. The inlinedCalee and inlinedCaller should be in one location.
   153  	for _, loc := range p.Location {
   154  		hasInlinedCallerAfterInlinedCallee, hasInlinedCallee := false, false
   155  		for _, line := range loc.Line {
   156  			if line.Function.Name == "runtime/pprof.inlinedCallee" {
   157  				hasInlinedCallee = true
   158  			}
   159  			if hasInlinedCallee && line.Function.Name == "runtime/pprof.inlinedCaller" {
   160  				hasInlinedCallerAfterInlinedCallee = true
   161  			}
   162  		}
   163  		if hasInlinedCallee != hasInlinedCallerAfterInlinedCallee {
   164  			t.Fatalf("want inlinedCallee followed by inlinedCaller, got separate Location entries:\n%v", p)
   165  		}
   166  	}
   167  }
   168  
   169  func inlinedCaller(x int) int {
   170  	x = inlinedCallee(x, 1e5)
   171  	return x
   172  }
   173  
   174  func inlinedCallee(x, n int) int {
   175  	return cpuHog0(x, n)
   176  }
   177  
   178  //go:noinline
   179  func dumpCallers(pcs []uintptr) {
   180  	if pcs == nil {
   181  		return
   182  	}
   183  
   184  	skip := 2 // Callers and dumpCallers
   185  	runtime.Callers(skip, pcs)
   186  }
   187  
   188  //go:noinline
   189  func inlinedCallerDump(pcs []uintptr) {
   190  	inlinedCalleeDump(pcs)
   191  }
   192  
   193  func inlinedCalleeDump(pcs []uintptr) {
   194  	dumpCallers(pcs)
   195  }
   196  
   197  func TestCPUProfileRecursion(t *testing.T) {
   198  	p := testCPUProfile(t, stackContains, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.recursionCallee", "runtime/pprof.recursionCaller"}, avoidFunctions(), func(dur time.Duration) {
   199  		cpuHogger(recursionCaller, &salt1, dur)
   200  	})
   201  
   202  	// check the Location encoding was not confused by recursive calls.
   203  	for i, loc := range p.Location {
   204  		recursionFunc := 0
   205  		for _, line := range loc.Line {
   206  			if name := line.Function.Name; name == "runtime/pprof.recursionCaller" || name == "runtime/pprof.recursionCallee" {
   207  				recursionFunc++
   208  			}
   209  		}
   210  		if recursionFunc > 1 {
   211  			t.Fatalf("want at most one recursionCaller or recursionCallee in one Location, got a violating Location (index: %d):\n%v", i, p)
   212  		}
   213  	}
   214  }
   215  
   216  func recursionCaller(x int) int {
   217  	y := recursionCallee(3, x)
   218  	return y
   219  }
   220  
   221  func recursionCallee(n, x int) int {
   222  	if n == 0 {
   223  		return 1
   224  	}
   225  	y := inlinedCallee(x, 1e4)
   226  	return y * recursionCallee(n-1, x)
   227  }
   228  
   229  func recursionChainTop(x int, pcs []uintptr) {
   230  	if x < 0 {
   231  		return
   232  	}
   233  	recursionChainMiddle(x, pcs)
   234  }
   235  
   236  func recursionChainMiddle(x int, pcs []uintptr) {
   237  	recursionChainBottom(x, pcs)
   238  }
   239  
   240  func recursionChainBottom(x int, pcs []uintptr) {
   241  	// This will be called each time, we only care about the last. We
   242  	// can't make this conditional or this function won't be inlined.
   243  	dumpCallers(pcs)
   244  
   245  	recursionChainTop(x-1, pcs)
   246  }
   247  
   248  func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) *profile.Profile {
   249  	p, err := profile.Parse(bytes.NewReader(valBytes))
   250  	if err != nil {
   251  		t.Fatal(err)
   252  	}
   253  	for _, sample := range p.Sample {
   254  		count := uintptr(sample.Value[0])
   255  		f(count, sample.Location, sample.Label)
   256  	}
   257  	return p
   258  }
   259  
   260  // testCPUProfile runs f under the CPU profiler, checking for some conditions specified by need,
   261  // as interpreted by matches, and returns the parsed profile.
   262  func testCPUProfile(t *testing.T, matches matchFunc, need []string, avoid []string, f func(dur time.Duration)) *profile.Profile {
   263  	switch runtime.GOOS {
   264  	case "darwin", "ios":
   265  		switch runtime.GOARCH {
   266  		case "arm64":
   267  			// nothing
   268  		default:
   269  			out, err := exec.Command("uname", "-a").CombinedOutput()
   270  			if err != nil {
   271  				t.Fatal(err)
   272  			}
   273  			vers := string(out)
   274  			t.Logf("uname -a: %v", vers)
   275  		}
   276  	case "plan9":
   277  		t.Skip("skipping on plan9")
   278  	}
   279  
   280  	broken := false
   281  	switch runtime.GOOS {
   282  	case "darwin", "ios", "dragonfly", "netbsd", "illumos", "solaris":
   283  		broken = true
   284  	case "openbsd":
   285  		if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
   286  			broken = true
   287  		}
   288  	case "windows":
   289  		if runtime.GOARCH == "arm" {
   290  			broken = true // See https://golang.org/issues/42862
   291  		}
   292  	}
   293  
   294  	maxDuration := 5 * time.Second
   295  	if testing.Short() && broken {
   296  		// If it's expected to be broken, no point waiting around.
   297  		maxDuration /= 10
   298  	}
   299  
   300  	// If we're running a long test, start with a long duration
   301  	// for tests that try to make sure something *doesn't* happen.
   302  	duration := 5 * time.Second
   303  	if testing.Short() {
   304  		duration = 100 * time.Millisecond
   305  	}
   306  
   307  	// Profiling tests are inherently flaky, especially on a
   308  	// loaded system, such as when this test is running with
   309  	// several others under go test std. If a test fails in a way
   310  	// that could mean it just didn't run long enough, try with a
   311  	// longer duration.
   312  	for duration <= maxDuration {
   313  		var prof bytes.Buffer
   314  		if err := StartCPUProfile(&prof); err != nil {
   315  			t.Fatal(err)
   316  		}
   317  		f(duration)
   318  		StopCPUProfile()
   319  
   320  		if p, ok := profileOk(t, matches, need, avoid, prof, duration); ok {
   321  			return p
   322  		}
   323  
   324  		duration *= 2
   325  		if duration <= maxDuration {
   326  			t.Logf("retrying with %s duration", duration)
   327  		}
   328  	}
   329  
   330  	if broken {
   331  		t.Skipf("ignoring failure on %s/%s; see golang.org/issue/13841", runtime.GOOS, runtime.GOARCH)
   332  	}
   333  
   334  	// Ignore the failure if the tests are running in a QEMU-based emulator,
   335  	// QEMU is not perfect at emulating everything.
   336  	// IN_QEMU environmental variable is set by some of the Go builders.
   337  	// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
   338  	if os.Getenv("IN_QEMU") == "1" {
   339  		t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
   340  	}
   341  	t.FailNow()
   342  	return nil
   343  }
   344  
   345  func contains(slice []string, s string) bool {
   346  	for i := range slice {
   347  		if slice[i] == s {
   348  			return true
   349  		}
   350  	}
   351  	return false
   352  }
   353  
   354  // stackContains matches if a function named spec appears anywhere in the stack trace.
   355  func stackContains(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   356  	for _, loc := range stk {
   357  		for _, line := range loc.Line {
   358  			if strings.Contains(line.Function.Name, spec) {
   359  				return true
   360  			}
   361  		}
   362  	}
   363  	return false
   364  }
   365  
   366  type matchFunc func(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool
   367  
   368  func profileOk(t *testing.T, matches matchFunc, need []string, avoid []string, prof bytes.Buffer, duration time.Duration) (_ *profile.Profile, ok bool) {
   369  	ok = true
   370  
   371  	// Check that profile is well formed, contains 'need', and does not contain
   372  	// anything from 'avoid'.
   373  	have := make([]uintptr, len(need))
   374  	avoidSamples := make([]uintptr, len(avoid))
   375  	var samples uintptr
   376  	var buf bytes.Buffer
   377  	p := parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
   378  		fmt.Fprintf(&buf, "%d:", count)
   379  		fprintStack(&buf, stk)
   380  		samples += count
   381  		for i, spec := range need {
   382  			if matches(spec, count, stk, labels) {
   383  				have[i] += count
   384  			}
   385  		}
   386  		for i, name := range avoid {
   387  			for _, loc := range stk {
   388  				for _, line := range loc.Line {
   389  					if strings.Contains(line.Function.Name, name) {
   390  						avoidSamples[i] += count
   391  					}
   392  				}
   393  			}
   394  		}
   395  		fmt.Fprintf(&buf, "\n")
   396  	})
   397  	t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
   398  
   399  	if samples < 10 && runtime.GOOS == "windows" {
   400  		// On some windows machines we end up with
   401  		// not enough samples due to coarse timer
   402  		// resolution. Let it go.
   403  		t.Log("too few samples on Windows (golang.org/issue/10842)")
   404  		return p, false
   405  	}
   406  
   407  	// Check that we got a reasonable number of samples.
   408  	// We used to always require at least ideal/4 samples,
   409  	// but that is too hard to guarantee on a loaded system.
   410  	// Now we accept 10 or more samples, which we take to be
   411  	// enough to show that at least some profiling is occurring.
   412  	if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
   413  		t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
   414  		ok = false
   415  	}
   416  
   417  	for i, name := range avoid {
   418  		bad := avoidSamples[i]
   419  		if bad != 0 {
   420  			t.Logf("found %d samples in avoid-function %s\n", bad, name)
   421  			ok = false
   422  		}
   423  	}
   424  
   425  	if len(need) == 0 {
   426  		return p, ok
   427  	}
   428  
   429  	var total uintptr
   430  	for i, name := range need {
   431  		total += have[i]
   432  		t.Logf("%s: %d\n", name, have[i])
   433  	}
   434  	if total == 0 {
   435  		t.Logf("no samples in expected functions")
   436  		ok = false
   437  	}
   438  	// We'd like to check a reasonable minimum, like
   439  	// total / len(have) / smallconstant, but this test is
   440  	// pretty flaky (see bug 7095).  So we'll just test to
   441  	// make sure we got at least one sample.
   442  	min := uintptr(1)
   443  	for i, name := range need {
   444  		if have[i] < min {
   445  			t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
   446  			ok = false
   447  		}
   448  	}
   449  	return p, ok
   450  }
   451  
   452  // Fork can hang if preempted with signals frequently enough (see issue 5517).
   453  // Ensure that we do not do this.
   454  func TestCPUProfileWithFork(t *testing.T) {
   455  	testenv.MustHaveExec(t)
   456  
   457  	heap := 1 << 30
   458  	if runtime.GOOS == "android" {
   459  		// Use smaller size for Android to avoid crash.
   460  		heap = 100 << 20
   461  	}
   462  	if runtime.GOOS == "windows" && runtime.GOARCH == "arm" {
   463  		// Use smaller heap for Windows/ARM to avoid crash.
   464  		heap = 100 << 20
   465  	}
   466  	if testing.Short() {
   467  		heap = 100 << 20
   468  	}
   469  	// This makes fork slower.
   470  	garbage := make([]byte, heap)
   471  	// Need to touch the slice, otherwise it won't be paged in.
   472  	done := make(chan bool)
   473  	go func() {
   474  		for i := range garbage {
   475  			garbage[i] = 42
   476  		}
   477  		done <- true
   478  	}()
   479  	<-done
   480  
   481  	var prof bytes.Buffer
   482  	if err := StartCPUProfile(&prof); err != nil {
   483  		t.Fatal(err)
   484  	}
   485  	defer StopCPUProfile()
   486  
   487  	for i := 0; i < 10; i++ {
   488  		exec.Command(os.Args[0], "-h").CombinedOutput()
   489  	}
   490  }
   491  
   492  // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
   493  // If it did, it would see inconsistent state and would either record an incorrect stack
   494  // or crash because the stack was malformed.
   495  func TestGoroutineSwitch(t *testing.T) {
   496  	if runtime.Compiler == "gccgo" {
   497  		t.Skip("not applicable for gccgo")
   498  	}
   499  	// How much to try. These defaults take about 1 seconds
   500  	// on a 2012 MacBook Pro. The ones in short mode take
   501  	// about 0.1 seconds.
   502  	tries := 10
   503  	count := 1000000
   504  	if testing.Short() {
   505  		tries = 1
   506  	}
   507  	for try := 0; try < tries; try++ {
   508  		var prof bytes.Buffer
   509  		if err := StartCPUProfile(&prof); err != nil {
   510  			t.Fatal(err)
   511  		}
   512  		for i := 0; i < count; i++ {
   513  			runtime.Gosched()
   514  		}
   515  		StopCPUProfile()
   516  
   517  		// Read profile to look for entries for runtime.gogo with an attempt at a traceback.
   518  		// The special entry
   519  		parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
   520  			// An entry with two frames with 'System' in its top frame
   521  			// exists to record a PC without a traceback. Those are okay.
   522  			if len(stk) == 2 {
   523  				name := stk[1].Line[0].Function.Name
   524  				if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
   525  					return
   526  				}
   527  			}
   528  
   529  			// Otherwise, should not see runtime.gogo.
   530  			// The place we'd see it would be the inner most frame.
   531  			name := stk[0].Line[0].Function.Name
   532  			if name == "runtime.gogo" {
   533  				var buf bytes.Buffer
   534  				fprintStack(&buf, stk)
   535  				t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
   536  			}
   537  		})
   538  	}
   539  }
   540  
   541  func fprintStack(w io.Writer, stk []*profile.Location) {
   542  	for _, loc := range stk {
   543  		fmt.Fprintf(w, " %#x", loc.Address)
   544  		fmt.Fprintf(w, " (")
   545  		for i, line := range loc.Line {
   546  			if i > 0 {
   547  				fmt.Fprintf(w, " ")
   548  			}
   549  			fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
   550  		}
   551  		fmt.Fprintf(w, ")")
   552  	}
   553  	fmt.Fprintf(w, "\n")
   554  }
   555  
   556  // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
   557  func TestMathBigDivide(t *testing.T) {
   558  	testCPUProfile(t, nil, nil, nil, func(duration time.Duration) {
   559  		t := time.After(duration)
   560  		pi := new(big.Int)
   561  		for {
   562  			for i := 0; i < 100; i++ {
   563  				n := big.NewInt(2646693125139304345)
   564  				d := big.NewInt(842468587426513207)
   565  				pi.Div(n, d)
   566  			}
   567  			select {
   568  			case <-t:
   569  				return
   570  			default:
   571  			}
   572  		}
   573  	})
   574  }
   575  
   576  // stackContainsAll matches if all functions in spec (comma-separated) appear somewhere in the stack trace.
   577  func stackContainsAll(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
   578  	for _, f := range strings.Split(spec, ",") {
   579  		if !stackContains(f, count, stk, labels) {
   580  			return false
   581  		}
   582  	}
   583  	return true
   584  }
   585  
   586  func TestMorestack(t *testing.T) {
   587  	testCPUProfile(t, stackContainsAll, []string{"runtime.newstack,runtime/pprof.growstack"}, avoidFunctions(), func(duration time.Duration) {
   588  		t := time.After(duration)
   589  		c := make(chan bool)
   590  		for {
   591  			go func() {
   592  				growstack1()
   593  				c <- true
   594  			}()
   595  			select {
   596  			case <-t:
   597  				return
   598  			case <-c:
   599  			}
   600  		}
   601  	})
   602  }
   603  
   604  //go:noinline
   605  func growstack1() {
   606  	growstack()
   607  }
   608  
   609  //go:noinline
   610  func growstack() {
   611  	var buf [8 << 10]byte
   612  	use(buf)
   613  }
   614  
   615  //go:noinline
   616  func use(x [8 << 10]byte) {}
   617  
   618  func TestBlockProfile(t *testing.T) {
   619  	type TestCase struct {
   620  		name string
   621  		f    func()
   622  		stk  []string
   623  		re   string
   624  	}
   625  	tests := [...]TestCase{
   626  		{
   627  			name: "chan recv",
   628  			f:    blockChanRecv,
   629  			stk: []string{
   630  				"runtime.chanrecv1",
   631  				"runtime/pprof.blockChanRecv",
   632  				"runtime/pprof.TestBlockProfile",
   633  			},
   634  			re: `
   635  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   636  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   637  #	0x[0-9a-f]+	runtime/pprof\.blockChanRecv\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   638  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   639  `},
   640  		{
   641  			name: "chan send",
   642  			f:    blockChanSend,
   643  			stk: []string{
   644  				"runtime.chansend1",
   645  				"runtime/pprof.blockChanSend",
   646  				"runtime/pprof.TestBlockProfile",
   647  			},
   648  			re: `
   649  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   650  #	0x[0-9a-f]+	runtime\.chansend1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   651  #	0x[0-9a-f]+	runtime/pprof\.blockChanSend\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   652  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   653  `},
   654  		{
   655  			name: "chan close",
   656  			f:    blockChanClose,
   657  			stk: []string{
   658  				"runtime.chanrecv1",
   659  				"runtime/pprof.blockChanClose",
   660  				"runtime/pprof.TestBlockProfile",
   661  			},
   662  			re: `
   663  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   664  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   665  #	0x[0-9a-f]+	runtime/pprof\.blockChanClose\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   666  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   667  `},
   668  		{
   669  			name: "select recv async",
   670  			f:    blockSelectRecvAsync,
   671  			stk: []string{
   672  				"runtime.selectgo",
   673  				"runtime/pprof.blockSelectRecvAsync",
   674  				"runtime/pprof.TestBlockProfile",
   675  			},
   676  			re: `
   677  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   678  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   679  #	0x[0-9a-f]+	runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   680  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   681  `},
   682  		{
   683  			name: "select send sync",
   684  			f:    blockSelectSendSync,
   685  			stk: []string{
   686  				"runtime.selectgo",
   687  				"runtime/pprof.blockSelectSendSync",
   688  				"runtime/pprof.TestBlockProfile",
   689  			},
   690  			re: `
   691  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   692  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   693  #	0x[0-9a-f]+	runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   694  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   695  `},
   696  		{
   697  			name: "mutex",
   698  			f:    blockMutex,
   699  			stk: []string{
   700  				"sync.(*Mutex).Lock",
   701  				"runtime/pprof.blockMutex",
   702  				"runtime/pprof.TestBlockProfile",
   703  			},
   704  			re: `
   705  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   706  #	0x[0-9a-f]+	sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+	.*/src/sync/mutex\.go:[0-9]+
   707  #	0x[0-9a-f]+	runtime/pprof\.blockMutex\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   708  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   709  `},
   710  		{
   711  			name: "cond",
   712  			f:    blockCond,
   713  			stk: []string{
   714  				"sync.(*Cond).Wait",
   715  				"runtime/pprof.blockCond",
   716  				"runtime/pprof.TestBlockProfile",
   717  			},
   718  			re: `
   719  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   720  #	0x[0-9a-f]+	sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+	.*/src/sync/cond\.go:[0-9]+
   721  #	0x[0-9a-f]+	runtime/pprof\.blockCond\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   722  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   723  `},
   724  	}
   725  
   726  	// Generate block profile
   727  	runtime.SetBlockProfileRate(1)
   728  	defer runtime.SetBlockProfileRate(0)
   729  	for _, test := range tests {
   730  		test.f()
   731  	}
   732  
   733  	t.Run("debug=1", func(t *testing.T) {
   734  		var w bytes.Buffer
   735  		Lookup("block").WriteTo(&w, 1)
   736  		prof := w.String()
   737  
   738  		if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
   739  			t.Fatalf("Bad profile header:\n%v", prof)
   740  		}
   741  
   742  		if strings.HasSuffix(prof, "#\t0x0\n\n") {
   743  			t.Errorf("Useless 0 suffix:\n%v", prof)
   744  		}
   745  
   746  		for _, test := range tests {
   747  			if !regexp.MustCompile(strings.ReplaceAll(test.re, "\t", "\t+")).MatchString(prof) {
   748  				t.Errorf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
   749  			}
   750  		}
   751  	})
   752  
   753  	t.Run("proto", func(t *testing.T) {
   754  		// proto format
   755  		var w bytes.Buffer
   756  		Lookup("block").WriteTo(&w, 0)
   757  		p, err := profile.Parse(&w)
   758  		if err != nil {
   759  			t.Fatalf("failed to parse profile: %v", err)
   760  		}
   761  		t.Logf("parsed proto: %s", p)
   762  		if err := p.CheckValid(); err != nil {
   763  			t.Fatalf("invalid profile: %v", err)
   764  		}
   765  
   766  		stks := stacks(p)
   767  		for _, test := range tests {
   768  			if !containsStack(stks, test.stk) {
   769  				t.Errorf("No matching stack entry for %v, want %+v", test.name, test.stk)
   770  			}
   771  		}
   772  	})
   773  
   774  }
   775  
   776  func stacks(p *profile.Profile) (res [][]string) {
   777  	for _, s := range p.Sample {
   778  		var stk []string
   779  		for _, l := range s.Location {
   780  			for _, line := range l.Line {
   781  				stk = append(stk, line.Function.Name)
   782  			}
   783  		}
   784  		res = append(res, stk)
   785  	}
   786  	return res
   787  }
   788  
   789  func containsStack(got [][]string, want []string) bool {
   790  	for _, stk := range got {
   791  		if len(stk) < len(want) {
   792  			continue
   793  		}
   794  		for i, f := range want {
   795  			if f != stk[i] {
   796  				break
   797  			}
   798  			if i == len(want)-1 {
   799  				return true
   800  			}
   801  		}
   802  	}
   803  	return false
   804  }
   805  
   806  const blockDelay = 10 * time.Millisecond
   807  
   808  func blockChanRecv() {
   809  	c := make(chan bool)
   810  	go func() {
   811  		time.Sleep(blockDelay)
   812  		c <- true
   813  	}()
   814  	<-c
   815  }
   816  
   817  func blockChanSend() {
   818  	c := make(chan bool)
   819  	go func() {
   820  		time.Sleep(blockDelay)
   821  		<-c
   822  	}()
   823  	c <- true
   824  }
   825  
   826  func blockChanClose() {
   827  	c := make(chan bool)
   828  	go func() {
   829  		time.Sleep(blockDelay)
   830  		close(c)
   831  	}()
   832  	<-c
   833  }
   834  
   835  func blockSelectRecvAsync() {
   836  	const numTries = 3
   837  	c := make(chan bool, 1)
   838  	c2 := make(chan bool, 1)
   839  	go func() {
   840  		for i := 0; i < numTries; i++ {
   841  			time.Sleep(blockDelay)
   842  			c <- true
   843  		}
   844  	}()
   845  	for i := 0; i < numTries; i++ {
   846  		select {
   847  		case <-c:
   848  		case <-c2:
   849  		}
   850  	}
   851  }
   852  
   853  func blockSelectSendSync() {
   854  	c := make(chan bool)
   855  	c2 := make(chan bool)
   856  	go func() {
   857  		time.Sleep(blockDelay)
   858  		<-c
   859  	}()
   860  	select {
   861  	case c <- true:
   862  	case c2 <- true:
   863  	}
   864  }
   865  
   866  func blockMutex() {
   867  	var mu sync.Mutex
   868  	mu.Lock()
   869  	go func() {
   870  		time.Sleep(blockDelay)
   871  		mu.Unlock()
   872  	}()
   873  	// Note: Unlock releases mu before recording the mutex event,
   874  	// so it's theoretically possible for this to proceed and
   875  	// capture the profile before the event is recorded. As long
   876  	// as this is blocked before the unlock happens, it's okay.
   877  	mu.Lock()
   878  }
   879  
   880  func blockCond() {
   881  	var mu sync.Mutex
   882  	c := sync.NewCond(&mu)
   883  	mu.Lock()
   884  	go func() {
   885  		time.Sleep(blockDelay)
   886  		mu.Lock()
   887  		c.Signal()
   888  		mu.Unlock()
   889  	}()
   890  	c.Wait()
   891  	mu.Unlock()
   892  }
   893  
   894  func TestMutexProfile(t *testing.T) {
   895  	// Generate mutex profile
   896  
   897  	old := runtime.SetMutexProfileFraction(1)
   898  	defer runtime.SetMutexProfileFraction(old)
   899  	if old != 0 {
   900  		t.Fatalf("need MutexProfileRate 0, got %d", old)
   901  	}
   902  
   903  	blockMutex()
   904  
   905  	t.Run("debug=1", func(t *testing.T) {
   906  		var w bytes.Buffer
   907  		Lookup("mutex").WriteTo(&w, 1)
   908  		prof := w.String()
   909  		t.Logf("received profile: %v", prof)
   910  
   911  		if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
   912  			t.Errorf("Bad profile header:\n%v", prof)
   913  		}
   914  		prof = strings.Trim(prof, "\n")
   915  		lines := strings.Split(prof, "\n")
   916  		if len(lines) != 6 {
   917  			t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
   918  		}
   919  		if len(lines) < 6 {
   920  			return
   921  		}
   922  		// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
   923  		r2 := `^\d+ \d+ @(?: 0x[[:xdigit:]]+)+`
   924  		//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
   925  		if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
   926  			t.Errorf("%q didn't match %q", lines[3], r2)
   927  		}
   928  		r3 := "^#.*runtime/pprof.blockMutex.*$"
   929  		if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
   930  			t.Errorf("%q didn't match %q", lines[5], r3)
   931  		}
   932  		t.Logf(prof)
   933  	})
   934  	t.Run("proto", func(t *testing.T) {
   935  		// proto format
   936  		var w bytes.Buffer
   937  		Lookup("mutex").WriteTo(&w, 0)
   938  		p, err := profile.Parse(&w)
   939  		if err != nil {
   940  			t.Fatalf("failed to parse profile: %v", err)
   941  		}
   942  		t.Logf("parsed proto: %s", p)
   943  		if err := p.CheckValid(); err != nil {
   944  			t.Fatalf("invalid profile: %v", err)
   945  		}
   946  
   947  		stks := stacks(p)
   948  		for _, want := range [][]string{
   949  			{"sync.(*Mutex).Unlock", "runtime/pprof.blockMutex.func1"},
   950  		} {
   951  			if !containsStack(stks, want) {
   952  				t.Errorf("No matching stack entry for %+v", want)
   953  			}
   954  		}
   955  	})
   956  }
   957  
   958  func func1(c chan int) { <-c }
   959  func func2(c chan int) { <-c }
   960  func func3(c chan int) { <-c }
   961  func func4(c chan int) { <-c }
   962  
   963  func TestGoroutineCounts(t *testing.T) {
   964  	// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
   965  	// desired blocking point.
   966  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   967  
   968  	c := make(chan int)
   969  	for i := 0; i < 100; i++ {
   970  		switch {
   971  		case i%10 == 0:
   972  			go func1(c)
   973  		case i%2 == 0:
   974  			go func2(c)
   975  		default:
   976  			go func3(c)
   977  		}
   978  		// Let goroutines block on channel
   979  		for j := 0; j < 5; j++ {
   980  			runtime.Gosched()
   981  		}
   982  	}
   983  	ctx := context.Background()
   984  
   985  	// ... and again, with labels this time (just with fewer iterations to keep
   986  	// sorting deterministic).
   987  	Do(ctx, Labels("label", "value"), func(context.Context) {
   988  		for i := 0; i < 89; i++ {
   989  			switch {
   990  			case i%10 == 0:
   991  				go func1(c)
   992  			case i%2 == 0:
   993  				go func2(c)
   994  			default:
   995  				go func3(c)
   996  			}
   997  			// Let goroutines block on channel
   998  			for j := 0; j < 5; j++ {
   999  				runtime.Gosched()
  1000  			}
  1001  		}
  1002  	})
  1003  
  1004  	var w bytes.Buffer
  1005  	goroutineProf := Lookup("goroutine")
  1006  
  1007  	// Check debug profile
  1008  	goroutineProf.WriteTo(&w, 1)
  1009  	prof := w.String()
  1010  
  1011  	labels := labelMap{"label": "value"}
  1012  	labelStr := "\n# labels: " + labels.String()
  1013  	if !containsInOrder(prof, "\n50 @ ", "\n44 @", labelStr,
  1014  		"\n40 @", "\n36 @", labelStr, "\n10 @", "\n9 @", labelStr, "\n1 @") {
  1015  		t.Errorf("expected sorted goroutine counts with Labels:\n%s", prof)
  1016  	}
  1017  
  1018  	// Check proto profile
  1019  	w.Reset()
  1020  	goroutineProf.WriteTo(&w, 0)
  1021  	p, err := profile.Parse(&w)
  1022  	if err != nil {
  1023  		t.Errorf("error parsing protobuf profile: %v", err)
  1024  	}
  1025  	if err := p.CheckValid(); err != nil {
  1026  		t.Errorf("protobuf profile is invalid: %v", err)
  1027  	}
  1028  	expectedLabels := map[int64]map[string]string{
  1029  		50: map[string]string{},
  1030  		44: map[string]string{"label": "value"},
  1031  		40: map[string]string{},
  1032  		36: map[string]string{"label": "value"},
  1033  		10: map[string]string{},
  1034  		9:  map[string]string{"label": "value"},
  1035  		1:  map[string]string{},
  1036  	}
  1037  	if !containsCountsLabels(p, expectedLabels) {
  1038  		t.Errorf("expected count profile to contain goroutines with counts and labels %v, got %v",
  1039  			expectedLabels, p)
  1040  	}
  1041  
  1042  	close(c)
  1043  
  1044  	time.Sleep(10 * time.Millisecond) // let goroutines exit
  1045  }
  1046  
  1047  func containsInOrder(s string, all ...string) bool {
  1048  	for _, t := range all {
  1049  		i := strings.Index(s, t)
  1050  		if i < 0 {
  1051  			return false
  1052  		}
  1053  		s = s[i+len(t):]
  1054  	}
  1055  	return true
  1056  }
  1057  
  1058  func containsCountsLabels(prof *profile.Profile, countLabels map[int64]map[string]string) bool {
  1059  	m := make(map[int64]int)
  1060  	type nkey struct {
  1061  		count    int64
  1062  		key, val string
  1063  	}
  1064  	n := make(map[nkey]int)
  1065  	for c, kv := range countLabels {
  1066  		m[c]++
  1067  		for k, v := range kv {
  1068  			n[nkey{
  1069  				count: c,
  1070  				key:   k,
  1071  				val:   v,
  1072  			}]++
  1073  
  1074  		}
  1075  	}
  1076  	for _, s := range prof.Sample {
  1077  		// The count is the single value in the sample
  1078  		if len(s.Value) != 1 {
  1079  			return false
  1080  		}
  1081  		m[s.Value[0]]--
  1082  		for k, vs := range s.Label {
  1083  			for _, v := range vs {
  1084  				n[nkey{
  1085  					count: s.Value[0],
  1086  					key:   k,
  1087  					val:   v,
  1088  				}]--
  1089  			}
  1090  		}
  1091  	}
  1092  	for _, n := range m {
  1093  		if n > 0 {
  1094  			return false
  1095  		}
  1096  	}
  1097  	for _, ncnt := range n {
  1098  		if ncnt != 0 {
  1099  			return false
  1100  		}
  1101  	}
  1102  	return true
  1103  }
  1104  
  1105  var emptyCallStackTestRun int64
  1106  
  1107  // Issue 18836.
  1108  func TestEmptyCallStack(t *testing.T) {
  1109  	name := fmt.Sprintf("test18836_%d", emptyCallStackTestRun)
  1110  	emptyCallStackTestRun++
  1111  
  1112  	t.Parallel()
  1113  	var buf bytes.Buffer
  1114  	p := NewProfile(name)
  1115  
  1116  	p.Add("foo", 47674)
  1117  	p.WriteTo(&buf, 1)
  1118  	p.Remove("foo")
  1119  	got := buf.String()
  1120  	prefix := name + " profile: total 1\n"
  1121  	if !strings.HasPrefix(got, prefix) {
  1122  		t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
  1123  	}
  1124  	lostevent := "lostProfileEvent"
  1125  	if !strings.Contains(got, lostevent) {
  1126  		t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
  1127  	}
  1128  }
  1129  
  1130  // stackContainsLabeled takes a spec like funcname;key=value and matches if the stack has that key
  1131  // and value and has funcname somewhere in the stack.
  1132  func stackContainsLabeled(spec string, count uintptr, stk []*profile.Location, labels map[string][]string) bool {
  1133  	semi := strings.Index(spec, ";")
  1134  	if semi == -1 {
  1135  		panic("no semicolon in key/value spec")
  1136  	}
  1137  	kv := strings.SplitN(spec[semi+1:], "=", 2)
  1138  	if len(kv) != 2 {
  1139  		panic("missing = in key/value spec")
  1140  	}
  1141  	if !contains(labels[kv[0]], kv[1]) {
  1142  		return false
  1143  	}
  1144  	return stackContains(spec[:semi], count, stk, labels)
  1145  }
  1146  
  1147  func TestCPUProfileLabel(t *testing.T) {
  1148  	testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, avoidFunctions(), func(dur time.Duration) {
  1149  		Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1150  			cpuHogger(cpuHog1, &salt1, dur)
  1151  		})
  1152  	})
  1153  }
  1154  
  1155  func TestLabelRace(t *testing.T) {
  1156  	// Test the race detector annotations for synchronization
  1157  	// between settings labels and consuming them from the
  1158  	// profile.
  1159  	testCPUProfile(t, stackContainsLabeled, []string{"runtime/pprof.cpuHogger;key=value"}, nil, func(dur time.Duration) {
  1160  		start := time.Now()
  1161  		var wg sync.WaitGroup
  1162  		for time.Since(start) < dur {
  1163  			var salts [10]int
  1164  			for i := 0; i < 10; i++ {
  1165  				wg.Add(1)
  1166  				go func(j int) {
  1167  					Do(context.Background(), Labels("key", "value"), func(context.Context) {
  1168  						cpuHogger(cpuHog1, &salts[j], time.Millisecond)
  1169  					})
  1170  					wg.Done()
  1171  				}(i)
  1172  			}
  1173  			wg.Wait()
  1174  		}
  1175  	})
  1176  }
  1177  
  1178  // Check that there is no deadlock when the program receives SIGPROF while in
  1179  // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
  1180  func TestAtomicLoadStore64(t *testing.T) {
  1181  	f, err := os.CreateTemp("", "profatomic")
  1182  	if err != nil {
  1183  		t.Fatalf("TempFile: %v", err)
  1184  	}
  1185  	defer os.Remove(f.Name())
  1186  	defer f.Close()
  1187  
  1188  	if err := StartCPUProfile(f); err != nil {
  1189  		t.Fatal(err)
  1190  	}
  1191  	defer StopCPUProfile()
  1192  
  1193  	var flag uint64
  1194  	done := make(chan bool, 1)
  1195  
  1196  	go func() {
  1197  		for atomic.LoadUint64(&flag) == 0 {
  1198  			runtime.Gosched()
  1199  		}
  1200  		done <- true
  1201  	}()
  1202  	time.Sleep(50 * time.Millisecond)
  1203  	atomic.StoreUint64(&flag, 1)
  1204  	<-done
  1205  }
  1206  
  1207  func TestTracebackAll(t *testing.T) {
  1208  	// With gccgo, if a profiling signal arrives at the wrong time
  1209  	// during traceback, it may crash or hang. See issue #29448.
  1210  	f, err := os.CreateTemp("", "proftraceback")
  1211  	if err != nil {
  1212  		t.Fatalf("TempFile: %v", err)
  1213  	}
  1214  	defer os.Remove(f.Name())
  1215  	defer f.Close()
  1216  
  1217  	if err := StartCPUProfile(f); err != nil {
  1218  		t.Fatal(err)
  1219  	}
  1220  	defer StopCPUProfile()
  1221  
  1222  	ch := make(chan int)
  1223  	defer close(ch)
  1224  
  1225  	count := 10
  1226  	for i := 0; i < count; i++ {
  1227  		go func() {
  1228  			<-ch // block
  1229  		}()
  1230  	}
  1231  
  1232  	N := 10000
  1233  	if testing.Short() {
  1234  		N = 500
  1235  	}
  1236  	buf := make([]byte, 10*1024)
  1237  	for i := 0; i < N; i++ {
  1238  		runtime.Stack(buf, true)
  1239  	}
  1240  }
  1241  
  1242  // TestTryAdd tests the cases that are hard to test with real program execution.
  1243  //
  1244  // For example, the current go compilers may not always inline functions
  1245  // involved in recursion but that may not be true in the future compilers. This
  1246  // tests such cases by using fake call sequences and forcing the profile build
  1247  // utilizing translateCPUProfile defined in proto_test.go
  1248  func TestTryAdd(t *testing.T) {
  1249  	if _, found := findInlinedCall(inlinedCallerDump, 4<<10); !found {
  1250  		t.Skip("Can't determine whether anything was inlined into inlinedCallerDump.")
  1251  	}
  1252  
  1253  	// inlinedCallerDump
  1254  	//   inlinedCalleeDump
  1255  	pcs := make([]uintptr, 2)
  1256  	inlinedCallerDump(pcs)
  1257  	inlinedCallerStack := make([]uint64, 2)
  1258  	for i := range pcs {
  1259  		inlinedCallerStack[i] = uint64(pcs[i])
  1260  	}
  1261  
  1262  	if _, found := findInlinedCall(recursionChainBottom, 4<<10); !found {
  1263  		t.Skip("Can't determine whether anything was inlined into recursionChainBottom.")
  1264  	}
  1265  
  1266  	// recursionChainTop
  1267  	//   recursionChainMiddle
  1268  	//     recursionChainBottom
  1269  	//       recursionChainTop
  1270  	//         recursionChainMiddle
  1271  	//           recursionChainBottom
  1272  	pcs = make([]uintptr, 6)
  1273  	recursionChainTop(1, pcs)
  1274  	recursionStack := make([]uint64, len(pcs))
  1275  	for i := range pcs {
  1276  		recursionStack[i] = uint64(pcs[i])
  1277  	}
  1278  
  1279  	period := int64(2000 * 1000) // 1/500*1e9 nanosec.
  1280  
  1281  	testCases := []struct {
  1282  		name        string
  1283  		input       []uint64          // following the input format assumed by profileBuilder.addCPUData.
  1284  		wantLocs    [][]string        // ordered location entries with function names.
  1285  		wantSamples []*profile.Sample // ordered samples, we care only about Value and the profile location IDs.
  1286  	}{{
  1287  		// Sanity test for a normal, complete stack trace.
  1288  		name: "full_stack_trace",
  1289  		input: []uint64{
  1290  			3, 0, 500, // hz = 500. Must match the period.
  1291  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  1292  		},
  1293  		wantLocs: [][]string{
  1294  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  1295  		},
  1296  		wantSamples: []*profile.Sample{
  1297  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  1298  		},
  1299  	}, {
  1300  		name: "bug35538",
  1301  		input: []uint64{
  1302  			3, 0, 500, // hz = 500. Must match the period.
  1303  			// Fake frame: tryAdd will have inlinedCallerDump
  1304  			// (stack[1]) on the deck when it encounters the next
  1305  			// inline function. It should accept this.
  1306  			7, 0, 10, inlinedCallerStack[0], inlinedCallerStack[1], inlinedCallerStack[0], inlinedCallerStack[1],
  1307  			5, 0, 20, inlinedCallerStack[0], inlinedCallerStack[1],
  1308  		},
  1309  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1310  		wantSamples: []*profile.Sample{
  1311  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}}},
  1312  			{Value: []int64{20, 20 * period}, Location: []*profile.Location{{ID: 1}}},
  1313  		},
  1314  	}, {
  1315  		name: "bug38096",
  1316  		input: []uint64{
  1317  			3, 0, 500, // hz = 500. Must match the period.
  1318  			// count (data[2]) == 0 && len(stk) == 1 is an overflow
  1319  			// entry. The "stk" entry is actually the count.
  1320  			4, 0, 0, 4242,
  1321  		},
  1322  		wantLocs: [][]string{{"runtime/pprof.lostProfileEvent"}},
  1323  		wantSamples: []*profile.Sample{
  1324  			{Value: []int64{4242, 4242 * period}, Location: []*profile.Location{{ID: 1}}},
  1325  		},
  1326  	}, {
  1327  		// If a function is directly called recursively then it must
  1328  		// not be inlined in the caller.
  1329  		//
  1330  		// N.B. We're generating an impossible profile here, with a
  1331  		// recursive inlineCalleeDump call. This is simulating a non-Go
  1332  		// function that looks like an inlined Go function other than
  1333  		// its recursive property. See pcDeck.tryAdd.
  1334  		name: "directly_recursive_func_is_not_inlined",
  1335  		input: []uint64{
  1336  			3, 0, 500, // hz = 500. Must match the period.
  1337  			5, 0, 30, inlinedCallerStack[0], inlinedCallerStack[0],
  1338  			4, 0, 40, inlinedCallerStack[0],
  1339  		},
  1340  		// inlinedCallerDump shows up here because
  1341  		// runtime_expandFinalInlineFrame adds it to the stack frame.
  1342  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump"}, {"runtime/pprof.inlinedCallerDump"}},
  1343  		wantSamples: []*profile.Sample{
  1344  			{Value: []int64{30, 30 * period}, Location: []*profile.Location{{ID: 1}, {ID: 1}, {ID: 2}}},
  1345  			{Value: []int64{40, 40 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}}},
  1346  		},
  1347  	}, {
  1348  		name: "recursion_chain_inline",
  1349  		input: []uint64{
  1350  			3, 0, 500, // hz = 500. Must match the period.
  1351  			9, 0, 10, recursionStack[0], recursionStack[1], recursionStack[2], recursionStack[3], recursionStack[4], recursionStack[5],
  1352  		},
  1353  		wantLocs: [][]string{
  1354  			{"runtime/pprof.recursionChainBottom"},
  1355  			{
  1356  				"runtime/pprof.recursionChainMiddle",
  1357  				"runtime/pprof.recursionChainTop",
  1358  				"runtime/pprof.recursionChainBottom",
  1359  			},
  1360  			{
  1361  				"runtime/pprof.recursionChainMiddle",
  1362  				"runtime/pprof.recursionChainTop",
  1363  				"runtime/pprof.TestTryAdd", // inlined into the test.
  1364  			},
  1365  		},
  1366  		wantSamples: []*profile.Sample{
  1367  			{Value: []int64{10, 10 * period}, Location: []*profile.Location{{ID: 1}, {ID: 2}, {ID: 3}}},
  1368  		},
  1369  	}, {
  1370  		name: "truncated_stack_trace_later",
  1371  		input: []uint64{
  1372  			3, 0, 500, // hz = 500. Must match the period.
  1373  			5, 0, 50, inlinedCallerStack[0], inlinedCallerStack[1],
  1374  			4, 0, 60, inlinedCallerStack[0],
  1375  		},
  1376  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1377  		wantSamples: []*profile.Sample{
  1378  			{Value: []int64{50, 50 * period}, Location: []*profile.Location{{ID: 1}}},
  1379  			{Value: []int64{60, 60 * period}, Location: []*profile.Location{{ID: 1}}},
  1380  		},
  1381  	}, {
  1382  		name: "truncated_stack_trace_first",
  1383  		input: []uint64{
  1384  			3, 0, 500, // hz = 500. Must match the period.
  1385  			4, 0, 70, inlinedCallerStack[0],
  1386  			5, 0, 80, inlinedCallerStack[0], inlinedCallerStack[1],
  1387  		},
  1388  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1389  		wantSamples: []*profile.Sample{
  1390  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1391  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 1}}},
  1392  		},
  1393  	}, {
  1394  		// We can recover the inlined caller from a truncated stack.
  1395  		name: "truncated_stack_trace_only",
  1396  		input: []uint64{
  1397  			3, 0, 500, // hz = 500. Must match the period.
  1398  			4, 0, 70, inlinedCallerStack[0],
  1399  		},
  1400  		wantLocs: [][]string{{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"}},
  1401  		wantSamples: []*profile.Sample{
  1402  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1403  		},
  1404  	}, {
  1405  		// The same location is used for duplicated stacks.
  1406  		name: "truncated_stack_trace_twice",
  1407  		input: []uint64{
  1408  			3, 0, 500, // hz = 500. Must match the period.
  1409  			4, 0, 70, inlinedCallerStack[0],
  1410  			// Fake frame: add a fake call to
  1411  			// inlinedCallerDump to prevent this sample
  1412  			// from getting merged into above.
  1413  			5, 0, 80, inlinedCallerStack[1], inlinedCallerStack[0],
  1414  		},
  1415  		wantLocs: [][]string{
  1416  			{"runtime/pprof.inlinedCalleeDump", "runtime/pprof.inlinedCallerDump"},
  1417  			{"runtime/pprof.inlinedCallerDump"},
  1418  		},
  1419  		wantSamples: []*profile.Sample{
  1420  			{Value: []int64{70, 70 * period}, Location: []*profile.Location{{ID: 1}}},
  1421  			{Value: []int64{80, 80 * period}, Location: []*profile.Location{{ID: 2}, {ID: 1}}},
  1422  		},
  1423  	}}
  1424  
  1425  	for _, tc := range testCases {
  1426  		t.Run(tc.name, func(t *testing.T) {
  1427  			p, err := translateCPUProfile(tc.input)
  1428  			if err != nil {
  1429  				t.Fatalf("translating profile: %v", err)
  1430  			}
  1431  			t.Logf("Profile: %v\n", p)
  1432  
  1433  			// One location entry with all inlined functions.
  1434  			var gotLoc [][]string
  1435  			for _, loc := range p.Location {
  1436  				var names []string
  1437  				for _, line := range loc.Line {
  1438  					names = append(names, line.Function.Name)
  1439  				}
  1440  				gotLoc = append(gotLoc, names)
  1441  			}
  1442  			if got, want := fmtJSON(gotLoc), fmtJSON(tc.wantLocs); got != want {
  1443  				t.Errorf("Got Location = %+v\n\twant %+v", got, want)
  1444  			}
  1445  			// All samples should point to one location.
  1446  			var gotSamples []*profile.Sample
  1447  			for _, sample := range p.Sample {
  1448  				var locs []*profile.Location
  1449  				for _, loc := range sample.Location {
  1450  					locs = append(locs, &profile.Location{ID: loc.ID})
  1451  				}
  1452  				gotSamples = append(gotSamples, &profile.Sample{Value: sample.Value, Location: locs})
  1453  			}
  1454  			if got, want := fmtJSON(gotSamples), fmtJSON(tc.wantSamples); got != want {
  1455  				t.Errorf("Got Samples = %+v\n\twant %+v", got, want)
  1456  			}
  1457  		})
  1458  	}
  1459  }