github.com/yanyiwu/go@v0.0.0-20150106053140-03d6637dbb7f/src/runtime/stack_test.go (about)

     1  // Copyright 2012 The Go Authors.  All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	. "runtime"
     9  	"strings"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  )
    14  
    15  // TestStackMem measures per-thread stack segment cache behavior.
    16  // The test consumed up to 500MB in the past.
    17  func TestStackMem(t *testing.T) {
    18  	const (
    19  		BatchSize      = 32
    20  		BatchCount     = 256
    21  		ArraySize      = 1024
    22  		RecursionDepth = 128
    23  	)
    24  	if testing.Short() {
    25  		return
    26  	}
    27  	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
    28  	s0 := new(MemStats)
    29  	ReadMemStats(s0)
    30  	for b := 0; b < BatchCount; b++ {
    31  		c := make(chan bool, BatchSize)
    32  		for i := 0; i < BatchSize; i++ {
    33  			go func() {
    34  				var f func(k int, a [ArraySize]byte)
    35  				f = func(k int, a [ArraySize]byte) {
    36  					if k == 0 {
    37  						time.Sleep(time.Millisecond)
    38  						return
    39  					}
    40  					f(k-1, a)
    41  				}
    42  				f(RecursionDepth, [ArraySize]byte{})
    43  				c <- true
    44  			}()
    45  		}
    46  		for i := 0; i < BatchSize; i++ {
    47  			<-c
    48  		}
    49  
    50  		// The goroutines have signaled via c that they are ready to exit.
    51  		// Give them a chance to exit by sleeping. If we don't wait, we
    52  		// might not reuse them on the next batch.
    53  		time.Sleep(10 * time.Millisecond)
    54  	}
    55  	s1 := new(MemStats)
    56  	ReadMemStats(s1)
    57  	consumed := int64(s1.StackSys - s0.StackSys)
    58  	t.Logf("Consumed %vMB for stack mem", consumed>>20)
    59  	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
    60  	if consumed > estimate {
    61  		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
    62  	}
    63  	// Due to broken stack memory accounting (http://golang.org/issue/7468),
    64  	// StackInuse can decrease during function execution, so we cast the values to int64.
    65  	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
    66  	t.Logf("Inuse %vMB for stack mem", inuse>>20)
    67  	if inuse > 4<<20 {
    68  		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
    69  	}
    70  }
    71  
    72  // Test stack growing in different contexts.
    73  func TestStackGrowth(t *testing.T) {
    74  	t.Parallel()
    75  	var wg sync.WaitGroup
    76  
    77  	// in a normal goroutine
    78  	wg.Add(1)
    79  	go func() {
    80  		defer wg.Done()
    81  		growStack()
    82  	}()
    83  	wg.Wait()
    84  
    85  	// in locked goroutine
    86  	wg.Add(1)
    87  	go func() {
    88  		defer wg.Done()
    89  		LockOSThread()
    90  		growStack()
    91  		UnlockOSThread()
    92  	}()
    93  	wg.Wait()
    94  
    95  	// in finalizer
    96  	wg.Add(1)
    97  	go func() {
    98  		defer wg.Done()
    99  		done := make(chan bool)
   100  		go func() {
   101  			s := new(string)
   102  			SetFinalizer(s, func(ss *string) {
   103  				growStack()
   104  				done <- true
   105  			})
   106  			s = nil
   107  			done <- true
   108  		}()
   109  		<-done
   110  		GC()
   111  		select {
   112  		case <-done:
   113  		case <-time.After(20 * time.Second):
   114  			t.Fatal("finalizer did not run")
   115  		}
   116  	}()
   117  	wg.Wait()
   118  }
   119  
   120  // ... and in init
   121  //func init() {
   122  //	growStack()
   123  //}
   124  
   125  func growStack() {
   126  	n := 1 << 10
   127  	if testing.Short() {
   128  		n = 1 << 8
   129  	}
   130  	for i := 0; i < n; i++ {
   131  		x := 0
   132  		growStackIter(&x, i)
   133  		if x != i+1 {
   134  			panic("stack is corrupted")
   135  		}
   136  	}
   137  	GC()
   138  }
   139  
   140  // This function is not an anonymous func, so that the compiler can do escape
   141  // analysis and place x on stack (and subsequently stack growth update the pointer).
   142  func growStackIter(p *int, n int) {
   143  	if n == 0 {
   144  		*p = n + 1
   145  		GC()
   146  		return
   147  	}
   148  	*p = n + 1
   149  	x := 0
   150  	growStackIter(&x, n-1)
   151  	if x != n {
   152  		panic("stack is corrupted")
   153  	}
   154  }
   155  
   156  func TestStackGrowthCallback(t *testing.T) {
   157  	t.Parallel()
   158  	var wg sync.WaitGroup
   159  
   160  	// test stack growth at chan op
   161  	wg.Add(1)
   162  	go func() {
   163  		defer wg.Done()
   164  		c := make(chan int, 1)
   165  		growStackWithCallback(func() {
   166  			c <- 1
   167  			<-c
   168  		})
   169  	}()
   170  
   171  	// test stack growth at map op
   172  	wg.Add(1)
   173  	go func() {
   174  		defer wg.Done()
   175  		m := make(map[int]int)
   176  		growStackWithCallback(func() {
   177  			_, _ = m[1]
   178  			m[1] = 1
   179  		})
   180  	}()
   181  
   182  	// test stack growth at goroutine creation
   183  	wg.Add(1)
   184  	go func() {
   185  		defer wg.Done()
   186  		growStackWithCallback(func() {
   187  			done := make(chan bool)
   188  			go func() {
   189  				done <- true
   190  			}()
   191  			<-done
   192  		})
   193  	}()
   194  
   195  	wg.Wait()
   196  }
   197  
   198  func growStackWithCallback(cb func()) {
   199  	var f func(n int)
   200  	f = func(n int) {
   201  		if n == 0 {
   202  			cb()
   203  			return
   204  		}
   205  		f(n - 1)
   206  	}
   207  	for i := 0; i < 1<<10; i++ {
   208  		f(i)
   209  	}
   210  }
   211  
   212  // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
   213  // during a stack copy.
   214  func set(p *int, x int) {
   215  	*p = x
   216  }
   217  func TestDeferPtrs(t *testing.T) {
   218  	var y int
   219  
   220  	defer func() {
   221  		if y != 42 {
   222  			t.Errorf("defer's stack references were not adjusted appropriately")
   223  		}
   224  	}()
   225  	defer set(&y, 42)
   226  	growStack()
   227  }
   228  
   229  type bigBuf [4 * 1024]byte
   230  
   231  // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
   232  // stack grows as part of starting the deferred function. It calls Goexit at various
   233  // stack depths, forcing the deferred function (with >4kB of args) to be run at
   234  // the bottom of the stack. The goal is to find a stack depth less than 4kB from
   235  // the end of the stack. Each trial runs in a different goroutine so that an earlier
   236  // stack growth does not invalidate a later attempt.
   237  func TestDeferPtrsGoexit(t *testing.T) {
   238  	for i := 0; i < 100; i++ {
   239  		c := make(chan int, 1)
   240  		go testDeferPtrsGoexit(c, i)
   241  		if n := <-c; n != 42 {
   242  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   243  		}
   244  	}
   245  }
   246  
   247  func testDeferPtrsGoexit(c chan int, i int) {
   248  	var y int
   249  	defer func() {
   250  		c <- y
   251  	}()
   252  	defer setBig(&y, 42, bigBuf{})
   253  	useStackAndCall(i, Goexit)
   254  }
   255  
   256  func setBig(p *int, x int, b bigBuf) {
   257  	*p = x
   258  }
   259  
   260  // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
   261  // of Goexit to run the Defers. Those two are different execution paths
   262  // in the runtime.
   263  func TestDeferPtrsPanic(t *testing.T) {
   264  	for i := 0; i < 100; i++ {
   265  		c := make(chan int, 1)
   266  		go testDeferPtrsGoexit(c, i)
   267  		if n := <-c; n != 42 {
   268  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   269  		}
   270  	}
   271  }
   272  
   273  func testDeferPtrsPanic(c chan int, i int) {
   274  	var y int
   275  	defer func() {
   276  		if recover() == nil {
   277  			c <- -1
   278  			return
   279  		}
   280  		c <- y
   281  	}()
   282  	defer setBig(&y, 42, bigBuf{})
   283  	useStackAndCall(i, func() { panic(1) })
   284  }
   285  
   286  // TestPanicUseStack checks that a chain of Panic structs on the stack are
   287  // updated correctly if the stack grows during the deferred execution that
   288  // happens as a result of the panic.
   289  func TestPanicUseStack(t *testing.T) {
   290  	pc := make([]uintptr, 10000)
   291  	defer func() {
   292  		recover()
   293  		Callers(0, pc) // force stack walk
   294  		useStackAndCall(100, func() {
   295  			defer func() {
   296  				recover()
   297  				Callers(0, pc) // force stack walk
   298  				useStackAndCall(200, func() {
   299  					defer func() {
   300  						recover()
   301  						Callers(0, pc) // force stack walk
   302  					}()
   303  					panic(3)
   304  				})
   305  			}()
   306  			panic(2)
   307  		})
   308  	}()
   309  	panic(1)
   310  }
   311  
   312  // use about n KB of stack and call f
   313  func useStackAndCall(n int, f func()) {
   314  	if n == 0 {
   315  		f()
   316  		return
   317  	}
   318  	var b [1024]byte // makes frame about 1KB
   319  	useStackAndCall(n-1+int(b[99]), f)
   320  }
   321  
   322  func useStack(n int) {
   323  	useStackAndCall(n, func() {})
   324  }
   325  
   326  func growing(c chan int, done chan struct{}) {
   327  	for n := range c {
   328  		useStack(n)
   329  		done <- struct{}{}
   330  	}
   331  	done <- struct{}{}
   332  }
   333  
   334  func TestStackCache(t *testing.T) {
   335  	// Allocate a bunch of goroutines and grow their stacks.
   336  	// Repeat a few times to test the stack cache.
   337  	const (
   338  		R = 4
   339  		G = 200
   340  		S = 5
   341  	)
   342  	for i := 0; i < R; i++ {
   343  		var reqchans [G]chan int
   344  		done := make(chan struct{})
   345  		for j := 0; j < G; j++ {
   346  			reqchans[j] = make(chan int)
   347  			go growing(reqchans[j], done)
   348  		}
   349  		for s := 0; s < S; s++ {
   350  			for j := 0; j < G; j++ {
   351  				reqchans[j] <- 1 << uint(s)
   352  			}
   353  			for j := 0; j < G; j++ {
   354  				<-done
   355  			}
   356  		}
   357  		for j := 0; j < G; j++ {
   358  			close(reqchans[j])
   359  		}
   360  		for j := 0; j < G; j++ {
   361  			<-done
   362  		}
   363  	}
   364  }
   365  
   366  func TestStackOutput(t *testing.T) {
   367  	b := make([]byte, 1024)
   368  	stk := string(b[:Stack(b, false)])
   369  	if !strings.HasPrefix(stk, "goroutine ") {
   370  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   371  		t.Errorf("Stack output should begin with \"goroutine \"")
   372  	}
   373  }
   374  
   375  func TestStackAllOutput(t *testing.T) {
   376  	b := make([]byte, 1024)
   377  	stk := string(b[:Stack(b, true)])
   378  	if !strings.HasPrefix(stk, "goroutine ") {
   379  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   380  		t.Errorf("Stack output should begin with \"goroutine \"")
   381  	}
   382  }
   383  
   384  func TestStackPanic(t *testing.T) {
   385  	// Test that stack copying copies panics correctly.  This is difficult
   386  	// to test because it is very unlikely that the stack will be copied
   387  	// in the middle of gopanic.  But it can happen.
   388  	// To make this test effective, edit panic.go:gopanic and uncomment
   389  	// the GC() call just before freedefer(d).
   390  	defer func() {
   391  		if x := recover(); x == nil {
   392  			t.Errorf("recover failed")
   393  		}
   394  	}()
   395  	useStack(32)
   396  	panic("test panic")
   397  }
   398  
   399  func BenchmarkStackCopy(b *testing.B) {
   400  	c := make(chan bool)
   401  	for i := 0; i < b.N; i++ {
   402  		go func() {
   403  			count(1000000)
   404  			c <- true
   405  		}()
   406  		<-c
   407  	}
   408  }
   409  
   410  func count(n int) int {
   411  	if n == 0 {
   412  		return 0
   413  	}
   414  	return 1 + count(n-1)
   415  }