github.com/x04/go/src@v0.0.0-20200202162449-3d481ceb3525/runtime/stack_test.go (about)

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"github.com/x04/go/src/bytes"
     9  	"github.com/x04/go/src/fmt"
    10  	"github.com/x04/go/src/os"
    11  	"github.com/x04/go/src/reflect"
    12  	"github.com/x04/go/src/regexp"
    13  	. "github.com/x04/go/src/runtime"
    14  	"github.com/x04/go/src/strconv"
    15  	"github.com/x04/go/src/strings"
    16  	"github.com/x04/go/src/sync"
    17  	"github.com/x04/go/src/sync/atomic"
    18  	"github.com/x04/go/src/testing"
    19  	"github.com/x04/go/src/time"
    20  )
    21  
    22  // TestStackMem measures per-thread stack segment cache behavior.
    23  // The test consumed up to 500MB in the past.
    24  func TestStackMem(t *testing.T) {
    25  	const (
    26  		BatchSize	= 32
    27  		BatchCount	= 256
    28  		ArraySize	= 1024
    29  		RecursionDepth	= 128
    30  	)
    31  	if testing.Short() {
    32  		return
    33  	}
    34  	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
    35  	s0 := new(MemStats)
    36  	ReadMemStats(s0)
    37  	for b := 0; b < BatchCount; b++ {
    38  		c := make(chan bool, BatchSize)
    39  		for i := 0; i < BatchSize; i++ {
    40  			go func() {
    41  				var f func(k int, a [ArraySize]byte)
    42  				f = func(k int, a [ArraySize]byte) {
    43  					if k == 0 {
    44  						time.Sleep(time.Millisecond)
    45  						return
    46  					}
    47  					f(k-1, a)
    48  				}
    49  				f(RecursionDepth, [ArraySize]byte{})
    50  				c <- true
    51  			}()
    52  		}
    53  		for i := 0; i < BatchSize; i++ {
    54  			<-c
    55  		}
    56  
    57  		// The goroutines have signaled via c that they are ready to exit.
    58  		// Give them a chance to exit by sleeping. If we don't wait, we
    59  		// might not reuse them on the next batch.
    60  		time.Sleep(10 * time.Millisecond)
    61  	}
    62  	s1 := new(MemStats)
    63  	ReadMemStats(s1)
    64  	consumed := int64(s1.StackSys - s0.StackSys)
    65  	t.Logf("Consumed %vMB for stack mem", consumed>>20)
    66  	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth)	// 8 is to reduce flakiness.
    67  	if consumed > estimate {
    68  		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
    69  	}
    70  	// Due to broken stack memory accounting (https://golang.org/issue/7468),
    71  	// StackInuse can decrease during function execution, so we cast the values to int64.
    72  	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
    73  	t.Logf("Inuse %vMB for stack mem", inuse>>20)
    74  	if inuse > 4<<20 {
    75  		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
    76  	}
    77  }
    78  
    79  // Test stack growing in different contexts.
    80  func TestStackGrowth(t *testing.T) {
    81  	if *flagQuick {
    82  		t.Skip("-quick")
    83  	}
    84  
    85  	if GOARCH == "wasm" {
    86  		t.Skip("fails on wasm (too slow?)")
    87  	}
    88  
    89  	// Don't make this test parallel as this makes the 20 second
    90  	// timeout unreliable on slow builders. (See issue #19381.)
    91  
    92  	var wg sync.WaitGroup
    93  
    94  	// in a normal goroutine
    95  	var growDuration time.Duration	// For debugging failures
    96  	wg.Add(1)
    97  	go func() {
    98  		defer wg.Done()
    99  		start := time.Now()
   100  		growStack(nil)
   101  		growDuration = time.Since(start)
   102  	}()
   103  	wg.Wait()
   104  
   105  	// in locked goroutine
   106  	wg.Add(1)
   107  	go func() {
   108  		defer wg.Done()
   109  		LockOSThread()
   110  		growStack(nil)
   111  		UnlockOSThread()
   112  	}()
   113  	wg.Wait()
   114  
   115  	// in finalizer
   116  	wg.Add(1)
   117  	go func() {
   118  		defer wg.Done()
   119  		done := make(chan bool)
   120  		var startTime time.Time
   121  		var started, progress uint32
   122  		go func() {
   123  			s := new(string)
   124  			SetFinalizer(s, func(ss *string) {
   125  				startTime = time.Now()
   126  				atomic.StoreUint32(&started, 1)
   127  				growStack(&progress)
   128  				done <- true
   129  			})
   130  			s = nil
   131  			done <- true
   132  		}()
   133  		<-done
   134  		GC()
   135  
   136  		timeout := 20 * time.Second
   137  		if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" {
   138  			scale, err := strconv.Atoi(s)
   139  			if err == nil {
   140  				timeout *= time.Duration(scale)
   141  			}
   142  		}
   143  
   144  		select {
   145  		case <-done:
   146  		case <-time.After(timeout):
   147  			if atomic.LoadUint32(&started) == 0 {
   148  				t.Log("finalizer did not start")
   149  			} else {
   150  				t.Logf("finalizer started %s ago and finished %d iterations", time.Since(startTime), atomic.LoadUint32(&progress))
   151  			}
   152  			t.Log("first growStack took", growDuration)
   153  			t.Error("finalizer did not run")
   154  			return
   155  		}
   156  	}()
   157  	wg.Wait()
   158  }
   159  
   160  // ... and in init
   161  //func init() {
   162  //	growStack()
   163  //}
   164  
   165  func growStack(progress *uint32) {
   166  	n := 1 << 10
   167  	if testing.Short() {
   168  		n = 1 << 8
   169  	}
   170  	for i := 0; i < n; i++ {
   171  		x := 0
   172  		growStackIter(&x, i)
   173  		if x != i+1 {
   174  			panic("stack is corrupted")
   175  		}
   176  		if progress != nil {
   177  			atomic.StoreUint32(progress, uint32(i))
   178  		}
   179  	}
   180  	GC()
   181  }
   182  
   183  // This function is not an anonymous func, so that the compiler can do escape
   184  // analysis and place x on stack (and subsequently stack growth update the pointer).
   185  func growStackIter(p *int, n int) {
   186  	if n == 0 {
   187  		*p = n + 1
   188  		GC()
   189  		return
   190  	}
   191  	*p = n + 1
   192  	x := 0
   193  	growStackIter(&x, n-1)
   194  	if x != n {
   195  		panic("stack is corrupted")
   196  	}
   197  }
   198  
   199  func TestStackGrowthCallback(t *testing.T) {
   200  	t.Parallel()
   201  	var wg sync.WaitGroup
   202  
   203  	// test stack growth at chan op
   204  	wg.Add(1)
   205  	go func() {
   206  		defer wg.Done()
   207  		c := make(chan int, 1)
   208  		growStackWithCallback(func() {
   209  			c <- 1
   210  			<-c
   211  		})
   212  	}()
   213  
   214  	// test stack growth at map op
   215  	wg.Add(1)
   216  	go func() {
   217  		defer wg.Done()
   218  		m := make(map[int]int)
   219  		growStackWithCallback(func() {
   220  			_, _ = m[1]
   221  			m[1] = 1
   222  		})
   223  	}()
   224  
   225  	// test stack growth at goroutine creation
   226  	wg.Add(1)
   227  	go func() {
   228  		defer wg.Done()
   229  		growStackWithCallback(func() {
   230  			done := make(chan bool)
   231  			go func() {
   232  				done <- true
   233  			}()
   234  			<-done
   235  		})
   236  	}()
   237  	wg.Wait()
   238  }
   239  
   240  func growStackWithCallback(cb func()) {
   241  	var f func(n int)
   242  	f = func(n int) {
   243  		if n == 0 {
   244  			cb()
   245  			return
   246  		}
   247  		f(n - 1)
   248  	}
   249  	for i := 0; i < 1<<10; i++ {
   250  		f(i)
   251  	}
   252  }
   253  
   254  // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
   255  // during a stack copy.
   256  func set(p *int, x int) {
   257  	*p = x
   258  }
   259  func TestDeferPtrs(t *testing.T) {
   260  	var y int
   261  
   262  	defer func() {
   263  		if y != 42 {
   264  			t.Errorf("defer's stack references were not adjusted appropriately")
   265  		}
   266  	}()
   267  	defer set(&y, 42)
   268  	growStack(nil)
   269  }
   270  
   271  type bigBuf [4 * 1024]byte
   272  
   273  // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
   274  // stack grows as part of starting the deferred function. It calls Goexit at various
   275  // stack depths, forcing the deferred function (with >4kB of args) to be run at
   276  // the bottom of the stack. The goal is to find a stack depth less than 4kB from
   277  // the end of the stack. Each trial runs in a different goroutine so that an earlier
   278  // stack growth does not invalidate a later attempt.
   279  func TestDeferPtrsGoexit(t *testing.T) {
   280  	for i := 0; i < 100; i++ {
   281  		c := make(chan int, 1)
   282  		go testDeferPtrsGoexit(c, i)
   283  		if n := <-c; n != 42 {
   284  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   285  		}
   286  	}
   287  }
   288  
   289  func testDeferPtrsGoexit(c chan int, i int) {
   290  	var y int
   291  	defer func() {
   292  		c <- y
   293  	}()
   294  	defer setBig(&y, 42, bigBuf{})
   295  	useStackAndCall(i, Goexit)
   296  }
   297  
   298  func setBig(p *int, x int, b bigBuf) {
   299  	*p = x
   300  }
   301  
   302  // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
   303  // of Goexit to run the Defers. Those two are different execution paths
   304  // in the runtime.
   305  func TestDeferPtrsPanic(t *testing.T) {
   306  	for i := 0; i < 100; i++ {
   307  		c := make(chan int, 1)
   308  		go testDeferPtrsGoexit(c, i)
   309  		if n := <-c; n != 42 {
   310  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   311  		}
   312  	}
   313  }
   314  
   315  func testDeferPtrsPanic(c chan int, i int) {
   316  	var y int
   317  	defer func() {
   318  		if recover() == nil {
   319  			c <- -1
   320  			return
   321  		}
   322  		c <- y
   323  	}()
   324  	defer setBig(&y, 42, bigBuf{})
   325  	useStackAndCall(i, func() { panic(1) })
   326  }
   327  
   328  //go:noinline
   329  func testDeferLeafSigpanic1() {
   330  	// Cause a sigpanic to be injected in this frame.
   331  	//
   332  	// This function has to be declared before
   333  	// TestDeferLeafSigpanic so the runtime will crash if we think
   334  	// this function's continuation PC is in
   335  	// TestDeferLeafSigpanic.
   336  	*(*int)(nil) = 0
   337  }
   338  
   339  // TestDeferLeafSigpanic tests defer matching around leaf functions
   340  // that sigpanic. This is tricky because on LR machines the outer
   341  // function and the inner function have the same SP, but it's critical
   342  // that we match up the defer correctly to get the right liveness map.
   343  // See issue #25499.
   344  func TestDeferLeafSigpanic(t *testing.T) {
   345  	// Push a defer that will walk the stack.
   346  	defer func() {
   347  		if err := recover(); err == nil {
   348  			t.Fatal("expected panic from nil pointer")
   349  		}
   350  		GC()
   351  	}()
   352  	// Call a leaf function. We must set up the exact call stack:
   353  	//
   354  	//  defering function -> leaf function -> sigpanic
   355  	//
   356  	// On LR machines, the leaf function will have the same SP as
   357  	// the SP pushed for the defer frame.
   358  	testDeferLeafSigpanic1()
   359  }
   360  
   361  // TestPanicUseStack checks that a chain of Panic structs on the stack are
   362  // updated correctly if the stack grows during the deferred execution that
   363  // happens as a result of the panic.
   364  func TestPanicUseStack(t *testing.T) {
   365  	pc := make([]uintptr, 10000)
   366  	defer func() {
   367  		recover()
   368  		Callers(0, pc)	// force stack walk
   369  		useStackAndCall(100, func() {
   370  			defer func() {
   371  				recover()
   372  				Callers(0, pc)	// force stack walk
   373  				useStackAndCall(200, func() {
   374  					defer func() {
   375  						recover()
   376  						Callers(0, pc)	// force stack walk
   377  					}()
   378  					panic(3)
   379  				})
   380  			}()
   381  			panic(2)
   382  		})
   383  	}()
   384  	panic(1)
   385  }
   386  
   387  func TestPanicFar(t *testing.T) {
   388  	var xtree *xtreeNode
   389  	pc := make([]uintptr, 10000)
   390  	defer func() {
   391  		// At this point we created a large stack and unwound
   392  		// it via recovery. Force a stack walk, which will
   393  		// check the stack's consistency.
   394  		Callers(0, pc)
   395  	}()
   396  	defer func() {
   397  		recover()
   398  	}()
   399  	useStackAndCall(100, func() {
   400  		// Kick off the GC and make it do something nontrivial.
   401  		// (This used to force stack barriers to stick around.)
   402  		xtree = makeTree(18)
   403  		// Give the GC time to start scanning stacks.
   404  		time.Sleep(time.Millisecond)
   405  		panic(1)
   406  	})
   407  	_ = xtree
   408  }
   409  
   410  type xtreeNode struct {
   411  	l, r *xtreeNode
   412  }
   413  
   414  func makeTree(d int) *xtreeNode {
   415  	if d == 0 {
   416  		return new(xtreeNode)
   417  	}
   418  	return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
   419  }
   420  
   421  // use about n KB of stack and call f
   422  func useStackAndCall(n int, f func()) {
   423  	if n == 0 {
   424  		f()
   425  		return
   426  	}
   427  	var b [1024]byte	// makes frame about 1KB
   428  	useStackAndCall(n-1+int(b[99]), f)
   429  }
   430  
   431  func useStack(n int) {
   432  	useStackAndCall(n, func() {})
   433  }
   434  
   435  func growing(c chan int, done chan struct{}) {
   436  	for n := range c {
   437  		useStack(n)
   438  		done <- struct{}{}
   439  	}
   440  	done <- struct{}{}
   441  }
   442  
   443  func TestStackCache(t *testing.T) {
   444  	// Allocate a bunch of goroutines and grow their stacks.
   445  	// Repeat a few times to test the stack cache.
   446  	const (
   447  		R	= 4
   448  		G	= 200
   449  		S	= 5
   450  	)
   451  	for i := 0; i < R; i++ {
   452  		var reqchans [G]chan int
   453  		done := make(chan struct{})
   454  		for j := 0; j < G; j++ {
   455  			reqchans[j] = make(chan int)
   456  			go growing(reqchans[j], done)
   457  		}
   458  		for s := 0; s < S; s++ {
   459  			for j := 0; j < G; j++ {
   460  				reqchans[j] <- 1 << uint(s)
   461  			}
   462  			for j := 0; j < G; j++ {
   463  				<-done
   464  			}
   465  		}
   466  		for j := 0; j < G; j++ {
   467  			close(reqchans[j])
   468  		}
   469  		for j := 0; j < G; j++ {
   470  			<-done
   471  		}
   472  	}
   473  }
   474  
   475  func TestStackOutput(t *testing.T) {
   476  	b := make([]byte, 1024)
   477  	stk := string(b[:Stack(b, false)])
   478  	if !strings.HasPrefix(stk, "goroutine ") {
   479  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   480  		t.Errorf("Stack output should begin with \"goroutine \"")
   481  	}
   482  }
   483  
   484  func TestStackAllOutput(t *testing.T) {
   485  	b := make([]byte, 1024)
   486  	stk := string(b[:Stack(b, true)])
   487  	if !strings.HasPrefix(stk, "goroutine ") {
   488  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   489  		t.Errorf("Stack output should begin with \"goroutine \"")
   490  	}
   491  }
   492  
   493  func TestStackPanic(t *testing.T) {
   494  	// Test that stack copying copies panics correctly. This is difficult
   495  	// to test because it is very unlikely that the stack will be copied
   496  	// in the middle of gopanic. But it can happen.
   497  	// To make this test effective, edit panic.go:gopanic and uncomment
   498  	// the GC() call just before freedefer(d).
   499  	defer func() {
   500  		if x := recover(); x == nil {
   501  			t.Errorf("recover failed")
   502  		}
   503  	}()
   504  	useStack(32)
   505  	panic("test panic")
   506  }
   507  
   508  func BenchmarkStackCopyPtr(b *testing.B) {
   509  	c := make(chan bool)
   510  	for i := 0; i < b.N; i++ {
   511  		go func() {
   512  			i := 1000000
   513  			countp(&i)
   514  			c <- true
   515  		}()
   516  		<-c
   517  	}
   518  }
   519  
   520  func countp(n *int) {
   521  	if *n == 0 {
   522  		return
   523  	}
   524  	*n--
   525  	countp(n)
   526  }
   527  
   528  func BenchmarkStackCopy(b *testing.B) {
   529  	c := make(chan bool)
   530  	for i := 0; i < b.N; i++ {
   531  		go func() {
   532  			count(1000000)
   533  			c <- true
   534  		}()
   535  		<-c
   536  	}
   537  }
   538  
   539  func count(n int) int {
   540  	if n == 0 {
   541  		return 0
   542  	}
   543  	return 1 + count(n-1)
   544  }
   545  
   546  func BenchmarkStackCopyNoCache(b *testing.B) {
   547  	c := make(chan bool)
   548  	for i := 0; i < b.N; i++ {
   549  		go func() {
   550  			count1(1000000)
   551  			c <- true
   552  		}()
   553  		<-c
   554  	}
   555  }
   556  
   557  func count1(n int) int {
   558  	if n <= 0 {
   559  		return 0
   560  	}
   561  	return 1 + count2(n-1)
   562  }
   563  
   564  func count2(n int) int	{ return 1 + count3(n-1) }
   565  func count3(n int) int	{ return 1 + count4(n-1) }
   566  func count4(n int) int	{ return 1 + count5(n-1) }
   567  func count5(n int) int	{ return 1 + count6(n-1) }
   568  func count6(n int) int	{ return 1 + count7(n-1) }
   569  func count7(n int) int	{ return 1 + count8(n-1) }
   570  func count8(n int) int	{ return 1 + count9(n-1) }
   571  func count9(n int) int	{ return 1 + count10(n-1) }
   572  func count10(n int) int	{ return 1 + count11(n-1) }
   573  func count11(n int) int	{ return 1 + count12(n-1) }
   574  func count12(n int) int	{ return 1 + count13(n-1) }
   575  func count13(n int) int	{ return 1 + count14(n-1) }
   576  func count14(n int) int	{ return 1 + count15(n-1) }
   577  func count15(n int) int	{ return 1 + count16(n-1) }
   578  func count16(n int) int	{ return 1 + count17(n-1) }
   579  func count17(n int) int	{ return 1 + count18(n-1) }
   580  func count18(n int) int	{ return 1 + count19(n-1) }
   581  func count19(n int) int	{ return 1 + count20(n-1) }
   582  func count20(n int) int	{ return 1 + count21(n-1) }
   583  func count21(n int) int	{ return 1 + count22(n-1) }
   584  func count22(n int) int	{ return 1 + count23(n-1) }
   585  func count23(n int) int	{ return 1 + count1(n-1) }
   586  
   587  type structWithMethod struct{}
   588  
   589  func (s structWithMethod) caller() string {
   590  	_, file, line, ok := Caller(1)
   591  	if !ok {
   592  		panic("Caller failed")
   593  	}
   594  	return fmt.Sprintf("%s:%d", file, line)
   595  }
   596  
   597  func (s structWithMethod) callers() []uintptr {
   598  	pc := make([]uintptr, 16)
   599  	return pc[:Callers(0, pc)]
   600  }
   601  
   602  func (s structWithMethod) stack() string {
   603  	buf := make([]byte, 4<<10)
   604  	return string(buf[:Stack(buf, false)])
   605  }
   606  
   607  func (s structWithMethod) nop()	{}
   608  
   609  func TestStackWrapperCaller(t *testing.T) {
   610  	var d structWithMethod
   611  	// Force the compiler to construct a wrapper method.
   612  	wrapper := (*structWithMethod).caller
   613  	// Check that the wrapper doesn't affect the stack trace.
   614  	if dc, ic := d.caller(), wrapper(&d); dc != ic {
   615  		t.Fatalf("direct caller %q != indirect caller %q", dc, ic)
   616  	}
   617  }
   618  
   619  func TestStackWrapperCallers(t *testing.T) {
   620  	var d structWithMethod
   621  	wrapper := (*structWithMethod).callers
   622  	// Check that <autogenerated> doesn't appear in the stack trace.
   623  	pcs := wrapper(&d)
   624  	frames := CallersFrames(pcs)
   625  	for {
   626  		fr, more := frames.Next()
   627  		if fr.File == "<autogenerated>" {
   628  			t.Fatalf("<autogenerated> appears in stack trace: %+v", fr)
   629  		}
   630  		if !more {
   631  			break
   632  		}
   633  	}
   634  }
   635  
   636  func TestStackWrapperStack(t *testing.T) {
   637  	var d structWithMethod
   638  	wrapper := (*structWithMethod).stack
   639  	// Check that <autogenerated> doesn't appear in the stack trace.
   640  	stk := wrapper(&d)
   641  	if strings.Contains(stk, "<autogenerated>") {
   642  		t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk)
   643  	}
   644  }
   645  
   646  type I interface {
   647  	M()
   648  }
   649  
   650  func TestStackWrapperStackPanic(t *testing.T) {
   651  	t.Run("sigpanic", func(t *testing.T) {
   652  		// nil calls to interface methods cause a sigpanic.
   653  		testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M")
   654  	})
   655  	t.Run("panicwrap", func(t *testing.T) {
   656  		// Nil calls to value method wrappers call panicwrap.
   657  		wrapper := (*structWithMethod).nop
   658  		testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop")
   659  	})
   660  }
   661  
   662  func testStackWrapperPanic(t *testing.T, cb func(), expect string) {
   663  	// Test that the stack trace from a panicking wrapper includes
   664  	// the wrapper, even though elide these when they don't panic.
   665  	t.Run("CallersFrames", func(t *testing.T) {
   666  		defer func() {
   667  			err := recover()
   668  			if err == nil {
   669  				t.Fatalf("expected panic")
   670  			}
   671  			pcs := make([]uintptr, 10)
   672  			n := Callers(0, pcs)
   673  			frames := CallersFrames(pcs[:n])
   674  			for {
   675  				frame, more := frames.Next()
   676  				t.Log(frame.Function)
   677  				if frame.Function == expect {
   678  					return
   679  				}
   680  				if !more {
   681  					break
   682  				}
   683  			}
   684  			t.Fatalf("panicking wrapper %s missing from stack trace", expect)
   685  		}()
   686  		cb()
   687  	})
   688  	t.Run("Stack", func(t *testing.T) {
   689  		defer func() {
   690  			err := recover()
   691  			if err == nil {
   692  				t.Fatalf("expected panic")
   693  			}
   694  			buf := make([]byte, 4<<10)
   695  			stk := string(buf[:Stack(buf, false)])
   696  			if !strings.Contains(stk, "\n"+expect) {
   697  				t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk)
   698  			}
   699  		}()
   700  		cb()
   701  	})
   702  }
   703  
   704  func TestCallersFromWrapper(t *testing.T) {
   705  	// Test that invoking CallersFrames on a stack where the first
   706  	// PC is an autogenerated wrapper keeps the wrapper in the
   707  	// trace. Normally we elide these, assuming that the wrapper
   708  	// calls the thing you actually wanted to see, but in this
   709  	// case we need to keep it.
   710  	pc := reflect.ValueOf(I.M).Pointer()
   711  	frames := CallersFrames([]uintptr{pc})
   712  	frame, more := frames.Next()
   713  	if frame.Function != "runtime_test.I.M" {
   714  		t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function)
   715  	}
   716  	if more {
   717  		t.Fatalf("want 1 frame, got > 1")
   718  	}
   719  }
   720  
   721  func TestTracebackSystemstack(t *testing.T) {
   722  	if GOARCH == "ppc64" || GOARCH == "ppc64le" {
   723  		t.Skip("systemstack tail call not implemented on ppc64x")
   724  	}
   725  
   726  	// Test that profiles correctly jump over systemstack,
   727  	// including nested systemstack calls.
   728  	pcs := make([]uintptr, 20)
   729  	pcs = pcs[:TracebackSystemstack(pcs, 5)]
   730  	// Check that runtime.TracebackSystemstack appears five times
   731  	// and that we see TestTracebackSystemstack.
   732  	countIn, countOut := 0, 0
   733  	frames := CallersFrames(pcs)
   734  	var tb bytes.Buffer
   735  	for {
   736  		frame, more := frames.Next()
   737  		fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line)
   738  		switch frame.Function {
   739  		case "runtime.TracebackSystemstack":
   740  			countIn++
   741  		case "runtime_test.TestTracebackSystemstack":
   742  			countOut++
   743  		}
   744  		if !more {
   745  			break
   746  		}
   747  	}
   748  	if countIn != 5 || countOut != 1 {
   749  		t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String())
   750  	}
   751  }
   752  
   753  func TestTracebackAncestors(t *testing.T) {
   754  	goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`)
   755  	for _, tracebackDepth := range []int{0, 1, 5, 50} {
   756  		output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth))
   757  
   758  		numGoroutines := 3
   759  		numFrames := 2
   760  		ancestorsExpected := numGoroutines
   761  		if numGoroutines > tracebackDepth {
   762  			ancestorsExpected = tracebackDepth
   763  		}
   764  
   765  		matches := goroutineRegex.FindAllStringSubmatch(output, -1)
   766  		if len(matches) != 2 {
   767  			t.Fatalf("want 2 goroutines, got:\n%s", output)
   768  		}
   769  
   770  		// Check functions in the traceback.
   771  		fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"}
   772  		for _, fn := range fns {
   773  			if !strings.Contains(output, "\n"+fn+"(") {
   774  				t.Fatalf("expected %q function in traceback:\n%s", fn, output)
   775  			}
   776  		}
   777  
   778  		if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count {
   779  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   780  		}
   781  
   782  		if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count {
   783  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   784  		}
   785  
   786  		if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count {
   787  			t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output)
   788  		}
   789  	}
   790  }
   791  
   792  // Test that defer closure is correctly scanned when the stack is scanned.
   793  func TestDeferLiveness(t *testing.T) {
   794  	output := runTestProg(t, "testprog", "DeferLiveness", "GODEBUG=clobberfree=1")
   795  	if output != "" {
   796  		t.Errorf("output:\n%s\n\nwant no output", output)
   797  	}
   798  }
   799  
   800  func TestDeferHeapAndStack(t *testing.T) {
   801  	P := 4		// processors
   802  	N := 10000	//iterations
   803  	D := 200	// stack depth
   804  
   805  	if testing.Short() {
   806  		P /= 2
   807  		N /= 10
   808  		D /= 10
   809  	}
   810  	c := make(chan bool)
   811  	for p := 0; p < P; p++ {
   812  		go func() {
   813  			for i := 0; i < N; i++ {
   814  				if deferHeapAndStack(D) != 2*D {
   815  					panic("bad result")
   816  				}
   817  			}
   818  			c <- true
   819  		}()
   820  	}
   821  	for p := 0; p < P; p++ {
   822  		<-c
   823  	}
   824  }
   825  
   826  // deferHeapAndStack(n) computes 2*n
   827  func deferHeapAndStack(n int) (r int) {
   828  	if n == 0 {
   829  		return 0
   830  	}
   831  	if n%2 == 0 {
   832  		// heap-allocated defers
   833  		for i := 0; i < 2; i++ {
   834  			defer func() {
   835  				r++
   836  			}()
   837  		}
   838  	} else {
   839  		// stack-allocated defers
   840  		defer func() {
   841  			r++
   842  		}()
   843  		defer func() {
   844  			r++
   845  		}()
   846  	}
   847  	r = deferHeapAndStack(n - 1)
   848  	escapeMe(new([1024]byte))	// force some GCs
   849  	return
   850  }
   851  
   852  // Pass a value to escapeMe to force it to escape.
   853  var escapeMe = func(x interface{}) {}