github.com/mattn/go@v0.0.0-20171011075504-07f7db3ea99f/src/runtime/pprof/pprof_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // +build !nacl
     6  
     7  package pprof
     8  
     9  import (
    10  	"bytes"
    11  	"context"
    12  	"fmt"
    13  	"internal/testenv"
    14  	"io"
    15  	"io/ioutil"
    16  	"math/big"
    17  	"os"
    18  	"os/exec"
    19  	"regexp"
    20  	"runtime"
    21  	"runtime/pprof/internal/profile"
    22  	"strings"
    23  	"sync"
    24  	"sync/atomic"
    25  	"testing"
    26  	"time"
    27  )
    28  
    29  func cpuHogger(f func() int, dur time.Duration) {
    30  	// We only need to get one 100 Hz clock tick, so we've got
    31  	// a large safety buffer.
    32  	// But do at least 500 iterations (which should take about 100ms),
    33  	// otherwise TestCPUProfileMultithreaded can fail if only one
    34  	// thread is scheduled during the testing period.
    35  	t0 := time.Now()
    36  	for i := 0; i < 500 || time.Since(t0) < dur; i++ {
    37  		f()
    38  	}
    39  }
    40  
    41  var (
    42  	salt1 = 0
    43  	salt2 = 0
    44  )
    45  
    46  // The actual CPU hogging function.
    47  // Must not call other functions nor access heap/globals in the loop,
    48  // otherwise under race detector the samples will be in the race runtime.
    49  func cpuHog1() int {
    50  	foo := salt1
    51  	for i := 0; i < 1e5; i++ {
    52  		if foo > 0 {
    53  			foo *= foo
    54  		} else {
    55  			foo *= foo + 1
    56  		}
    57  	}
    58  	return foo
    59  }
    60  
    61  func cpuHog2() int {
    62  	foo := salt2
    63  	for i := 0; i < 1e5; i++ {
    64  		if foo > 0 {
    65  			foo *= foo
    66  		} else {
    67  			foo *= foo + 2
    68  		}
    69  	}
    70  	return foo
    71  }
    72  
    73  func TestCPUProfile(t *testing.T) {
    74  	testCPUProfile(t, []string{"runtime/pprof.cpuHog1"}, func(dur time.Duration) {
    75  		cpuHogger(cpuHog1, dur)
    76  	})
    77  }
    78  
    79  func TestCPUProfileMultithreaded(t *testing.T) {
    80  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
    81  	testCPUProfile(t, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, func(dur time.Duration) {
    82  		c := make(chan int)
    83  		go func() {
    84  			cpuHogger(cpuHog1, dur)
    85  			c <- 1
    86  		}()
    87  		cpuHogger(cpuHog2, dur)
    88  		<-c
    89  	})
    90  }
    91  
    92  func TestCPUProfileInlining(t *testing.T) {
    93  	testCPUProfile(t, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, func(dur time.Duration) {
    94  		cpuHogger(inlinedCaller, dur)
    95  	})
    96  }
    97  
    98  func inlinedCaller() int {
    99  	inlinedCallee()
   100  	return 0
   101  }
   102  
   103  func inlinedCallee() {
   104  	// We could just use cpuHog1, but for loops prevent inlining
   105  	// right now. :(
   106  	foo := salt1
   107  	i := 0
   108  loop:
   109  	if foo > 0 {
   110  		foo *= foo
   111  	} else {
   112  		foo *= foo + 1
   113  	}
   114  	if i++; i < 1e5 {
   115  		goto loop
   116  	}
   117  	salt1 = foo
   118  }
   119  
   120  func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) {
   121  	p, err := profile.Parse(bytes.NewReader(valBytes))
   122  	if err != nil {
   123  		t.Fatal(err)
   124  	}
   125  	for _, sample := range p.Sample {
   126  		count := uintptr(sample.Value[0])
   127  		f(count, sample.Location, sample.Label)
   128  	}
   129  }
   130  
   131  func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) {
   132  	switch runtime.GOOS {
   133  	case "darwin":
   134  		switch runtime.GOARCH {
   135  		case "arm", "arm64":
   136  			// nothing
   137  		default:
   138  			out, err := exec.Command("uname", "-a").CombinedOutput()
   139  			if err != nil {
   140  				t.Fatal(err)
   141  			}
   142  			vers := string(out)
   143  			t.Logf("uname -a: %v", vers)
   144  		}
   145  	case "plan9":
   146  		t.Skip("skipping on plan9")
   147  	}
   148  
   149  	const maxDuration = 5 * time.Second
   150  	// If we're running a long test, start with a long duration
   151  	// for tests that try to make sure something *doesn't* happen.
   152  	duration := 5 * time.Second
   153  	if testing.Short() {
   154  		duration = 200 * time.Millisecond
   155  	}
   156  
   157  	// Profiling tests are inherently flaky, especially on a
   158  	// loaded system, such as when this test is running with
   159  	// several others under go test std. If a test fails in a way
   160  	// that could mean it just didn't run long enough, try with a
   161  	// longer duration.
   162  	for duration <= maxDuration {
   163  		var prof bytes.Buffer
   164  		if err := StartCPUProfile(&prof); err != nil {
   165  			t.Fatal(err)
   166  		}
   167  		f(duration)
   168  		StopCPUProfile()
   169  
   170  		if profileOk(t, need, prof, duration) {
   171  			return
   172  		}
   173  
   174  		duration *= 2
   175  		if duration <= maxDuration {
   176  			t.Logf("retrying with %s duration", duration)
   177  		}
   178  	}
   179  
   180  	if badOS[runtime.GOOS] {
   181  		t.Skipf("ignoring failure on %s; see golang.org/issue/13841", runtime.GOOS)
   182  		return
   183  	}
   184  	// Ignore the failure if the tests are running in a QEMU-based emulator,
   185  	// QEMU is not perfect at emulating everything.
   186  	// IN_QEMU environmental variable is set by some of the Go builders.
   187  	// IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605.
   188  	if os.Getenv("IN_QEMU") == "1" {
   189  		t.Skip("ignore the failure in QEMU; see golang.org/issue/9605")
   190  		return
   191  	}
   192  	t.FailNow()
   193  }
   194  
   195  func contains(slice []string, s string) bool {
   196  	for i := range slice {
   197  		if slice[i] == s {
   198  			return true
   199  		}
   200  	}
   201  	return false
   202  }
   203  
   204  func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) {
   205  	ok = true
   206  
   207  	// Check that profile is well formed and contains need.
   208  	have := make([]uintptr, len(need))
   209  	var samples uintptr
   210  	var buf bytes.Buffer
   211  	parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) {
   212  		fmt.Fprintf(&buf, "%d:", count)
   213  		fprintStack(&buf, stk)
   214  		samples += count
   215  		for i, name := range need {
   216  			if semi := strings.Index(name, ";"); semi > -1 {
   217  				kv := strings.SplitN(name[semi+1:], "=", 2)
   218  				if len(kv) != 2 || !contains(labels[kv[0]], kv[1]) {
   219  					continue
   220  				}
   221  				name = name[:semi]
   222  			}
   223  			for _, loc := range stk {
   224  				for _, line := range loc.Line {
   225  					if strings.Contains(line.Function.Name, name) {
   226  						have[i] += count
   227  					}
   228  				}
   229  			}
   230  		}
   231  		fmt.Fprintf(&buf, "\n")
   232  	})
   233  	t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String())
   234  
   235  	if samples < 10 && runtime.GOOS == "windows" {
   236  		// On some windows machines we end up with
   237  		// not enough samples due to coarse timer
   238  		// resolution. Let it go.
   239  		t.Log("too few samples on Windows (golang.org/issue/10842)")
   240  		return false
   241  	}
   242  
   243  	// Check that we got a reasonable number of samples.
   244  	// We used to always require at least ideal/4 samples,
   245  	// but that is too hard to guarantee on a loaded system.
   246  	// Now we accept 10 or more samples, which we take to be
   247  	// enough to show that at least some profiling is occurring.
   248  	if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) {
   249  		t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal)
   250  		ok = false
   251  	}
   252  
   253  	if len(need) == 0 {
   254  		return ok
   255  	}
   256  
   257  	var total uintptr
   258  	for i, name := range need {
   259  		total += have[i]
   260  		t.Logf("%s: %d\n", name, have[i])
   261  	}
   262  	if total == 0 {
   263  		t.Logf("no samples in expected functions")
   264  		ok = false
   265  	}
   266  	// We'd like to check a reasonable minimum, like
   267  	// total / len(have) / smallconstant, but this test is
   268  	// pretty flaky (see bug 7095).  So we'll just test to
   269  	// make sure we got at least one sample.
   270  	min := uintptr(1)
   271  	for i, name := range need {
   272  		if have[i] < min {
   273  			t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have)))
   274  			ok = false
   275  		}
   276  	}
   277  	return ok
   278  }
   279  
   280  // Fork can hang if preempted with signals frequently enough (see issue 5517).
   281  // Ensure that we do not do this.
   282  func TestCPUProfileWithFork(t *testing.T) {
   283  	testenv.MustHaveExec(t)
   284  
   285  	heap := 1 << 30
   286  	if runtime.GOOS == "android" {
   287  		// Use smaller size for Android to avoid crash.
   288  		heap = 100 << 20
   289  	}
   290  	if testing.Short() {
   291  		heap = 100 << 20
   292  	}
   293  	// This makes fork slower.
   294  	garbage := make([]byte, heap)
   295  	// Need to touch the slice, otherwise it won't be paged in.
   296  	done := make(chan bool)
   297  	go func() {
   298  		for i := range garbage {
   299  			garbage[i] = 42
   300  		}
   301  		done <- true
   302  	}()
   303  	<-done
   304  
   305  	var prof bytes.Buffer
   306  	if err := StartCPUProfile(&prof); err != nil {
   307  		t.Fatal(err)
   308  	}
   309  	defer StopCPUProfile()
   310  
   311  	for i := 0; i < 10; i++ {
   312  		exec.Command(os.Args[0], "-h").CombinedOutput()
   313  	}
   314  }
   315  
   316  // Test that profiler does not observe runtime.gogo as "user" goroutine execution.
   317  // If it did, it would see inconsistent state and would either record an incorrect stack
   318  // or crash because the stack was malformed.
   319  func TestGoroutineSwitch(t *testing.T) {
   320  	// How much to try. These defaults take about 1 seconds
   321  	// on a 2012 MacBook Pro. The ones in short mode take
   322  	// about 0.1 seconds.
   323  	tries := 10
   324  	count := 1000000
   325  	if testing.Short() {
   326  		tries = 1
   327  	}
   328  	for try := 0; try < tries; try++ {
   329  		var prof bytes.Buffer
   330  		if err := StartCPUProfile(&prof); err != nil {
   331  			t.Fatal(err)
   332  		}
   333  		for i := 0; i < count; i++ {
   334  			runtime.Gosched()
   335  		}
   336  		StopCPUProfile()
   337  
   338  		// Read profile to look for entries for runtime.gogo with an attempt at a traceback.
   339  		// The special entry
   340  		parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) {
   341  			// An entry with two frames with 'System' in its top frame
   342  			// exists to record a PC without a traceback. Those are okay.
   343  			if len(stk) == 2 {
   344  				name := stk[1].Line[0].Function.Name
   345  				if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" {
   346  					return
   347  				}
   348  			}
   349  
   350  			// Otherwise, should not see runtime.gogo.
   351  			// The place we'd see it would be the inner most frame.
   352  			name := stk[0].Line[0].Function.Name
   353  			if name == "runtime.gogo" {
   354  				var buf bytes.Buffer
   355  				fprintStack(&buf, stk)
   356  				t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String())
   357  			}
   358  		})
   359  	}
   360  }
   361  
   362  func fprintStack(w io.Writer, stk []*profile.Location) {
   363  	for _, loc := range stk {
   364  		fmt.Fprintf(w, " %#x", loc.Address)
   365  		fmt.Fprintf(w, " (")
   366  		for i, line := range loc.Line {
   367  			if i > 0 {
   368  				fmt.Fprintf(w, " ")
   369  			}
   370  			fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line)
   371  		}
   372  		fmt.Fprintf(w, ")")
   373  	}
   374  	fmt.Fprintf(w, "\n")
   375  }
   376  
   377  // Test that profiling of division operations is okay, especially on ARM. See issue 6681.
   378  func TestMathBigDivide(t *testing.T) {
   379  	testCPUProfile(t, nil, func(duration time.Duration) {
   380  		t := time.After(duration)
   381  		pi := new(big.Int)
   382  		for {
   383  			for i := 0; i < 100; i++ {
   384  				n := big.NewInt(2646693125139304345)
   385  				d := big.NewInt(842468587426513207)
   386  				pi.Div(n, d)
   387  			}
   388  			select {
   389  			case <-t:
   390  				return
   391  			default:
   392  			}
   393  		}
   394  	})
   395  }
   396  
   397  // Operating systems that are expected to fail the tests. See issue 13841.
   398  var badOS = map[string]bool{
   399  	"darwin":    true,
   400  	"netbsd":    true,
   401  	"plan9":     true,
   402  	"dragonfly": true,
   403  	"solaris":   true,
   404  }
   405  
   406  func TestBlockProfile(t *testing.T) {
   407  	type TestCase struct {
   408  		name string
   409  		f    func()
   410  		re   string
   411  	}
   412  	tests := [...]TestCase{
   413  		{"chan recv", blockChanRecv, `
   414  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   415  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   416  #	0x[0-9a-f]+	runtime/pprof\.blockChanRecv\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   417  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   418  `},
   419  		{"chan send", blockChanSend, `
   420  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   421  #	0x[0-9a-f]+	runtime\.chansend1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   422  #	0x[0-9a-f]+	runtime/pprof\.blockChanSend\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   423  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   424  `},
   425  		{"chan close", blockChanClose, `
   426  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   427  #	0x[0-9a-f]+	runtime\.chanrecv1\+0x[0-9a-f]+	.*/src/runtime/chan.go:[0-9]+
   428  #	0x[0-9a-f]+	runtime/pprof\.blockChanClose\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   429  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   430  `},
   431  		{"select recv async", blockSelectRecvAsync, `
   432  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   433  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   434  #	0x[0-9a-f]+	runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   435  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   436  `},
   437  		{"select send sync", blockSelectSendSync, `
   438  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   439  #	0x[0-9a-f]+	runtime\.selectgo\+0x[0-9a-f]+	.*/src/runtime/select.go:[0-9]+
   440  #	0x[0-9a-f]+	runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   441  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   442  `},
   443  		{"mutex", blockMutex, `
   444  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   445  #	0x[0-9a-f]+	sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+	.*/src/sync/mutex\.go:[0-9]+
   446  #	0x[0-9a-f]+	runtime/pprof\.blockMutex\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   447  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   448  `},
   449  		{"cond", blockCond, `
   450  [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+
   451  #	0x[0-9a-f]+	sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+	.*/src/sync/cond\.go:[0-9]+
   452  #	0x[0-9a-f]+	runtime/pprof\.blockCond\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   453  #	0x[0-9a-f]+	runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+	.*/src/runtime/pprof/pprof_test.go:[0-9]+
   454  `},
   455  	}
   456  
   457  	runtime.SetBlockProfileRate(1)
   458  	defer runtime.SetBlockProfileRate(0)
   459  	for _, test := range tests {
   460  		test.f()
   461  	}
   462  	var w bytes.Buffer
   463  	Lookup("block").WriteTo(&w, 1)
   464  	prof := w.String()
   465  
   466  	if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") {
   467  		t.Fatalf("Bad profile header:\n%v", prof)
   468  	}
   469  
   470  	if strings.HasSuffix(prof, "#\t0x0\n\n") {
   471  		t.Errorf("Useless 0 suffix:\n%v", prof)
   472  	}
   473  
   474  	for _, test := range tests {
   475  		if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) {
   476  			t.Fatalf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof)
   477  		}
   478  	}
   479  }
   480  
   481  const blockDelay = 10 * time.Millisecond
   482  
   483  func blockChanRecv() {
   484  	c := make(chan bool)
   485  	go func() {
   486  		time.Sleep(blockDelay)
   487  		c <- true
   488  	}()
   489  	<-c
   490  }
   491  
   492  func blockChanSend() {
   493  	c := make(chan bool)
   494  	go func() {
   495  		time.Sleep(blockDelay)
   496  		<-c
   497  	}()
   498  	c <- true
   499  }
   500  
   501  func blockChanClose() {
   502  	c := make(chan bool)
   503  	go func() {
   504  		time.Sleep(blockDelay)
   505  		close(c)
   506  	}()
   507  	<-c
   508  }
   509  
   510  func blockSelectRecvAsync() {
   511  	const numTries = 3
   512  	c := make(chan bool, 1)
   513  	c2 := make(chan bool, 1)
   514  	go func() {
   515  		for i := 0; i < numTries; i++ {
   516  			time.Sleep(blockDelay)
   517  			c <- true
   518  		}
   519  	}()
   520  	for i := 0; i < numTries; i++ {
   521  		select {
   522  		case <-c:
   523  		case <-c2:
   524  		}
   525  	}
   526  }
   527  
   528  func blockSelectSendSync() {
   529  	c := make(chan bool)
   530  	c2 := make(chan bool)
   531  	go func() {
   532  		time.Sleep(blockDelay)
   533  		<-c
   534  	}()
   535  	select {
   536  	case c <- true:
   537  	case c2 <- true:
   538  	}
   539  }
   540  
   541  func blockMutex() {
   542  	var mu sync.Mutex
   543  	mu.Lock()
   544  	go func() {
   545  		time.Sleep(blockDelay)
   546  		mu.Unlock()
   547  	}()
   548  	// Note: Unlock releases mu before recording the mutex event,
   549  	// so it's theoretically possible for this to proceed and
   550  	// capture the profile before the event is recorded. As long
   551  	// as this is blocked before the unlock happens, it's okay.
   552  	mu.Lock()
   553  }
   554  
   555  func blockCond() {
   556  	var mu sync.Mutex
   557  	c := sync.NewCond(&mu)
   558  	mu.Lock()
   559  	go func() {
   560  		time.Sleep(blockDelay)
   561  		mu.Lock()
   562  		c.Signal()
   563  		mu.Unlock()
   564  	}()
   565  	c.Wait()
   566  	mu.Unlock()
   567  }
   568  
   569  func TestMutexProfile(t *testing.T) {
   570  	old := runtime.SetMutexProfileFraction(1)
   571  	defer runtime.SetMutexProfileFraction(old)
   572  	if old != 0 {
   573  		t.Fatalf("need MutexProfileRate 0, got %d", old)
   574  	}
   575  
   576  	blockMutex()
   577  
   578  	var w bytes.Buffer
   579  	Lookup("mutex").WriteTo(&w, 1)
   580  	prof := w.String()
   581  
   582  	if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") {
   583  		t.Errorf("Bad profile header:\n%v", prof)
   584  	}
   585  	prof = strings.Trim(prof, "\n")
   586  	lines := strings.Split(prof, "\n")
   587  	if len(lines) != 6 {
   588  		t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof)
   589  	}
   590  	if len(lines) < 6 {
   591  		return
   592  	}
   593  	// checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931"
   594  	r2 := `^\d+ 1 @(?: 0x[[:xdigit:]]+)+`
   595  	//r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$"
   596  	if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok {
   597  		t.Errorf("%q didn't match %q", lines[3], r2)
   598  	}
   599  	r3 := "^#.*runtime/pprof.blockMutex.*$"
   600  	if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok {
   601  		t.Errorf("%q didn't match %q", lines[5], r3)
   602  	}
   603  }
   604  
   605  func func1(c chan int) { <-c }
   606  func func2(c chan int) { <-c }
   607  func func3(c chan int) { <-c }
   608  func func4(c chan int) { <-c }
   609  
   610  func TestGoroutineCounts(t *testing.T) {
   611  	// Setting GOMAXPROCS to 1 ensures we can force all goroutines to the
   612  	// desired blocking point.
   613  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   614  
   615  	c := make(chan int)
   616  	for i := 0; i < 100; i++ {
   617  		switch {
   618  		case i%10 == 0:
   619  			go func1(c)
   620  		case i%2 == 0:
   621  			go func2(c)
   622  		default:
   623  			go func3(c)
   624  		}
   625  		// Let goroutines block on channel
   626  		for j := 0; j < 5; j++ {
   627  			runtime.Gosched()
   628  		}
   629  	}
   630  
   631  	var w bytes.Buffer
   632  	goroutineProf := Lookup("goroutine")
   633  
   634  	// Check debug profile
   635  	goroutineProf.WriteTo(&w, 1)
   636  	prof := w.String()
   637  
   638  	if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") {
   639  		t.Errorf("expected sorted goroutine counts:\n%s", prof)
   640  	}
   641  
   642  	// Check proto profile
   643  	w.Reset()
   644  	goroutineProf.WriteTo(&w, 0)
   645  	p, err := profile.Parse(&w)
   646  	if err != nil {
   647  		t.Errorf("error parsing protobuf profile: %v", err)
   648  	}
   649  	if err := p.CheckValid(); err != nil {
   650  		t.Errorf("protobuf profile is invalid: %v", err)
   651  	}
   652  	if !containsCounts(p, []int64{50, 40, 10, 1}) {
   653  		t.Errorf("expected count profile to contain goroutines with counts %v, got %v",
   654  			[]int64{50, 40, 10, 1}, p)
   655  	}
   656  
   657  	close(c)
   658  
   659  	time.Sleep(10 * time.Millisecond) // let goroutines exit
   660  }
   661  
   662  func containsInOrder(s string, all ...string) bool {
   663  	for _, t := range all {
   664  		i := strings.Index(s, t)
   665  		if i < 0 {
   666  			return false
   667  		}
   668  		s = s[i+len(t):]
   669  	}
   670  	return true
   671  }
   672  
   673  func containsCounts(prof *profile.Profile, counts []int64) bool {
   674  	m := make(map[int64]int)
   675  	for _, c := range counts {
   676  		m[c]++
   677  	}
   678  	for _, s := range prof.Sample {
   679  		// The count is the single value in the sample
   680  		if len(s.Value) != 1 {
   681  			return false
   682  		}
   683  		m[s.Value[0]]--
   684  	}
   685  	for _, n := range m {
   686  		if n > 0 {
   687  			return false
   688  		}
   689  	}
   690  	return true
   691  }
   692  
   693  // Issue 18836.
   694  func TestEmptyCallStack(t *testing.T) {
   695  	t.Parallel()
   696  	var buf bytes.Buffer
   697  	p := NewProfile("test18836")
   698  	p.Add("foo", 47674)
   699  	p.WriteTo(&buf, 1)
   700  	p.Remove("foo")
   701  	got := buf.String()
   702  	prefix := "test18836 profile: total 1\n"
   703  	if !strings.HasPrefix(got, prefix) {
   704  		t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix)
   705  	}
   706  	lostevent := "lostProfileEvent"
   707  	if !strings.Contains(got, lostevent) {
   708  		t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent)
   709  	}
   710  }
   711  
   712  func TestCPUProfileLabel(t *testing.T) {
   713  	testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) {
   714  		Do(context.Background(), Labels("key", "value"), func(context.Context) {
   715  			cpuHogger(cpuHog1, dur)
   716  		})
   717  	})
   718  }
   719  
   720  func TestLabelRace(t *testing.T) {
   721  	// Test the race detector annotations for synchronization
   722  	// between settings labels and consuming them from the
   723  	// profile.
   724  	testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) {
   725  		start := time.Now()
   726  		var wg sync.WaitGroup
   727  		for time.Since(start) < dur {
   728  			for i := 0; i < 10; i++ {
   729  				wg.Add(1)
   730  				go func() {
   731  					Do(context.Background(), Labels("key", "value"), func(context.Context) {
   732  						cpuHogger(cpuHog1, time.Millisecond)
   733  					})
   734  					wg.Done()
   735  				}()
   736  			}
   737  			wg.Wait()
   738  		}
   739  	})
   740  }
   741  
   742  // Check that there is no deadlock when the program receives SIGPROF while in
   743  // 64bit atomics' critical section. Used to happen on mips{,le}. See #20146.
   744  func TestAtomicLoadStore64(t *testing.T) {
   745  	f, err := ioutil.TempFile("", "profatomic")
   746  	if err != nil {
   747  		t.Fatalf("TempFile: %v", err)
   748  	}
   749  	defer os.Remove(f.Name())
   750  	defer f.Close()
   751  
   752  	if err := StartCPUProfile(f); err != nil {
   753  		t.Fatal(err)
   754  	}
   755  	defer StopCPUProfile()
   756  
   757  	var flag uint64
   758  	done := make(chan bool, 1)
   759  
   760  	go func() {
   761  		for atomic.LoadUint64(&flag) == 0 {
   762  			runtime.Gosched()
   763  		}
   764  		done <- true
   765  	}()
   766  	time.Sleep(50 * time.Millisecond)
   767  	atomic.StoreUint64(&flag, 1)
   768  	<-done
   769  }