github.com/aloncn/graphics-go@v0.0.1/src/runtime/stack_test.go (about)

     1  // Copyright 2012 The Go Authors.  All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	. "runtime"
     9  	"strings"
    10  	"sync"
    11  	"testing"
    12  	"time"
    13  )
    14  
    15  // TestStackMem measures per-thread stack segment cache behavior.
    16  // The test consumed up to 500MB in the past.
    17  func TestStackMem(t *testing.T) {
    18  	const (
    19  		BatchSize      = 32
    20  		BatchCount     = 256
    21  		ArraySize      = 1024
    22  		RecursionDepth = 128
    23  	)
    24  	if testing.Short() {
    25  		return
    26  	}
    27  	defer GOMAXPROCS(GOMAXPROCS(BatchSize))
    28  	s0 := new(MemStats)
    29  	ReadMemStats(s0)
    30  	for b := 0; b < BatchCount; b++ {
    31  		c := make(chan bool, BatchSize)
    32  		for i := 0; i < BatchSize; i++ {
    33  			go func() {
    34  				var f func(k int, a [ArraySize]byte)
    35  				f = func(k int, a [ArraySize]byte) {
    36  					if k == 0 {
    37  						time.Sleep(time.Millisecond)
    38  						return
    39  					}
    40  					f(k-1, a)
    41  				}
    42  				f(RecursionDepth, [ArraySize]byte{})
    43  				c <- true
    44  			}()
    45  		}
    46  		for i := 0; i < BatchSize; i++ {
    47  			<-c
    48  		}
    49  
    50  		// The goroutines have signaled via c that they are ready to exit.
    51  		// Give them a chance to exit by sleeping. If we don't wait, we
    52  		// might not reuse them on the next batch.
    53  		time.Sleep(10 * time.Millisecond)
    54  	}
    55  	s1 := new(MemStats)
    56  	ReadMemStats(s1)
    57  	consumed := int64(s1.StackSys - s0.StackSys)
    58  	t.Logf("Consumed %vMB for stack mem", consumed>>20)
    59  	estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness.
    60  	if consumed > estimate {
    61  		t.Fatalf("Stack mem: want %v, got %v", estimate, consumed)
    62  	}
    63  	// Due to broken stack memory accounting (https://golang.org/issue/7468),
    64  	// StackInuse can decrease during function execution, so we cast the values to int64.
    65  	inuse := int64(s1.StackInuse) - int64(s0.StackInuse)
    66  	t.Logf("Inuse %vMB for stack mem", inuse>>20)
    67  	if inuse > 4<<20 {
    68  		t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse)
    69  	}
    70  }
    71  
    72  // Test stack growing in different contexts.
    73  func TestStackGrowth(t *testing.T) {
    74  	t.Parallel()
    75  	var wg sync.WaitGroup
    76  
    77  	// in a normal goroutine
    78  	wg.Add(1)
    79  	go func() {
    80  		defer wg.Done()
    81  		growStack()
    82  	}()
    83  	wg.Wait()
    84  
    85  	// in locked goroutine
    86  	wg.Add(1)
    87  	go func() {
    88  		defer wg.Done()
    89  		LockOSThread()
    90  		growStack()
    91  		UnlockOSThread()
    92  	}()
    93  	wg.Wait()
    94  
    95  	// in finalizer
    96  	wg.Add(1)
    97  	go func() {
    98  		defer wg.Done()
    99  		done := make(chan bool)
   100  		go func() {
   101  			s := new(string)
   102  			SetFinalizer(s, func(ss *string) {
   103  				growStack()
   104  				done <- true
   105  			})
   106  			s = nil
   107  			done <- true
   108  		}()
   109  		<-done
   110  		GC()
   111  		select {
   112  		case <-done:
   113  		case <-time.After(20 * time.Second):
   114  			t.Error("finalizer did not run")
   115  			return
   116  		}
   117  	}()
   118  	wg.Wait()
   119  }
   120  
   121  // ... and in init
   122  //func init() {
   123  //	growStack()
   124  //}
   125  
   126  func growStack() {
   127  	n := 1 << 10
   128  	if testing.Short() {
   129  		n = 1 << 8
   130  	}
   131  	for i := 0; i < n; i++ {
   132  		x := 0
   133  		growStackIter(&x, i)
   134  		if x != i+1 {
   135  			panic("stack is corrupted")
   136  		}
   137  	}
   138  	GC()
   139  }
   140  
   141  // This function is not an anonymous func, so that the compiler can do escape
   142  // analysis and place x on stack (and subsequently stack growth update the pointer).
   143  func growStackIter(p *int, n int) {
   144  	if n == 0 {
   145  		*p = n + 1
   146  		GC()
   147  		return
   148  	}
   149  	*p = n + 1
   150  	x := 0
   151  	growStackIter(&x, n-1)
   152  	if x != n {
   153  		panic("stack is corrupted")
   154  	}
   155  }
   156  
   157  func TestStackGrowthCallback(t *testing.T) {
   158  	t.Parallel()
   159  	var wg sync.WaitGroup
   160  
   161  	// test stack growth at chan op
   162  	wg.Add(1)
   163  	go func() {
   164  		defer wg.Done()
   165  		c := make(chan int, 1)
   166  		growStackWithCallback(func() {
   167  			c <- 1
   168  			<-c
   169  		})
   170  	}()
   171  
   172  	// test stack growth at map op
   173  	wg.Add(1)
   174  	go func() {
   175  		defer wg.Done()
   176  		m := make(map[int]int)
   177  		growStackWithCallback(func() {
   178  			_, _ = m[1]
   179  			m[1] = 1
   180  		})
   181  	}()
   182  
   183  	// test stack growth at goroutine creation
   184  	wg.Add(1)
   185  	go func() {
   186  		defer wg.Done()
   187  		growStackWithCallback(func() {
   188  			done := make(chan bool)
   189  			go func() {
   190  				done <- true
   191  			}()
   192  			<-done
   193  		})
   194  	}()
   195  	wg.Wait()
   196  }
   197  
   198  func growStackWithCallback(cb func()) {
   199  	var f func(n int)
   200  	f = func(n int) {
   201  		if n == 0 {
   202  			cb()
   203  			return
   204  		}
   205  		f(n - 1)
   206  	}
   207  	for i := 0; i < 1<<10; i++ {
   208  		f(i)
   209  	}
   210  }
   211  
   212  // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y)
   213  // during a stack copy.
   214  func set(p *int, x int) {
   215  	*p = x
   216  }
   217  func TestDeferPtrs(t *testing.T) {
   218  	var y int
   219  
   220  	defer func() {
   221  		if y != 42 {
   222  			t.Errorf("defer's stack references were not adjusted appropriately")
   223  		}
   224  	}()
   225  	defer set(&y, 42)
   226  	growStack()
   227  }
   228  
   229  type bigBuf [4 * 1024]byte
   230  
   231  // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the
   232  // stack grows as part of starting the deferred function. It calls Goexit at various
   233  // stack depths, forcing the deferred function (with >4kB of args) to be run at
   234  // the bottom of the stack. The goal is to find a stack depth less than 4kB from
   235  // the end of the stack. Each trial runs in a different goroutine so that an earlier
   236  // stack growth does not invalidate a later attempt.
   237  func TestDeferPtrsGoexit(t *testing.T) {
   238  	for i := 0; i < 100; i++ {
   239  		c := make(chan int, 1)
   240  		go testDeferPtrsGoexit(c, i)
   241  		if n := <-c; n != 42 {
   242  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   243  		}
   244  	}
   245  }
   246  
   247  func testDeferPtrsGoexit(c chan int, i int) {
   248  	var y int
   249  	defer func() {
   250  		c <- y
   251  	}()
   252  	defer setBig(&y, 42, bigBuf{})
   253  	useStackAndCall(i, Goexit)
   254  }
   255  
   256  func setBig(p *int, x int, b bigBuf) {
   257  	*p = x
   258  }
   259  
   260  // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead
   261  // of Goexit to run the Defers. Those two are different execution paths
   262  // in the runtime.
   263  func TestDeferPtrsPanic(t *testing.T) {
   264  	for i := 0; i < 100; i++ {
   265  		c := make(chan int, 1)
   266  		go testDeferPtrsGoexit(c, i)
   267  		if n := <-c; n != 42 {
   268  			t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n)
   269  		}
   270  	}
   271  }
   272  
   273  func testDeferPtrsPanic(c chan int, i int) {
   274  	var y int
   275  	defer func() {
   276  		if recover() == nil {
   277  			c <- -1
   278  			return
   279  		}
   280  		c <- y
   281  	}()
   282  	defer setBig(&y, 42, bigBuf{})
   283  	useStackAndCall(i, func() { panic(1) })
   284  }
   285  
   286  // TestPanicUseStack checks that a chain of Panic structs on the stack are
   287  // updated correctly if the stack grows during the deferred execution that
   288  // happens as a result of the panic.
   289  func TestPanicUseStack(t *testing.T) {
   290  	pc := make([]uintptr, 10000)
   291  	defer func() {
   292  		recover()
   293  		Callers(0, pc) // force stack walk
   294  		useStackAndCall(100, func() {
   295  			defer func() {
   296  				recover()
   297  				Callers(0, pc) // force stack walk
   298  				useStackAndCall(200, func() {
   299  					defer func() {
   300  						recover()
   301  						Callers(0, pc) // force stack walk
   302  					}()
   303  					panic(3)
   304  				})
   305  			}()
   306  			panic(2)
   307  		})
   308  	}()
   309  	panic(1)
   310  }
   311  
   312  func TestPanicFar(t *testing.T) {
   313  	var xtree *xtreeNode
   314  	pc := make([]uintptr, 10000)
   315  	defer func() {
   316  		// At this point we created a large stack and unwound
   317  		// it via recovery. Force a stack walk, which will
   318  		// check the consistency of stack barriers.
   319  		Callers(0, pc)
   320  	}()
   321  	defer func() {
   322  		recover()
   323  	}()
   324  	useStackAndCall(100, func() {
   325  		// Kick off the GC and make it do something nontrivial
   326  		// to keep stack barriers installed for a while.
   327  		xtree = makeTree(18)
   328  		// Give the GC time to install stack barriers.
   329  		time.Sleep(time.Millisecond)
   330  		panic(1)
   331  	})
   332  	_ = xtree
   333  }
   334  
   335  type xtreeNode struct {
   336  	l, r *xtreeNode
   337  }
   338  
   339  func makeTree(d int) *xtreeNode {
   340  	if d == 0 {
   341  		return new(xtreeNode)
   342  	}
   343  	return &xtreeNode{makeTree(d - 1), makeTree(d - 1)}
   344  }
   345  
   346  // use about n KB of stack and call f
   347  func useStackAndCall(n int, f func()) {
   348  	if n == 0 {
   349  		f()
   350  		return
   351  	}
   352  	var b [1024]byte // makes frame about 1KB
   353  	useStackAndCall(n-1+int(b[99]), f)
   354  }
   355  
   356  func useStack(n int) {
   357  	useStackAndCall(n, func() {})
   358  }
   359  
   360  func growing(c chan int, done chan struct{}) {
   361  	for n := range c {
   362  		useStack(n)
   363  		done <- struct{}{}
   364  	}
   365  	done <- struct{}{}
   366  }
   367  
   368  func TestStackCache(t *testing.T) {
   369  	// Allocate a bunch of goroutines and grow their stacks.
   370  	// Repeat a few times to test the stack cache.
   371  	const (
   372  		R = 4
   373  		G = 200
   374  		S = 5
   375  	)
   376  	for i := 0; i < R; i++ {
   377  		var reqchans [G]chan int
   378  		done := make(chan struct{})
   379  		for j := 0; j < G; j++ {
   380  			reqchans[j] = make(chan int)
   381  			go growing(reqchans[j], done)
   382  		}
   383  		for s := 0; s < S; s++ {
   384  			for j := 0; j < G; j++ {
   385  				reqchans[j] <- 1 << uint(s)
   386  			}
   387  			for j := 0; j < G; j++ {
   388  				<-done
   389  			}
   390  		}
   391  		for j := 0; j < G; j++ {
   392  			close(reqchans[j])
   393  		}
   394  		for j := 0; j < G; j++ {
   395  			<-done
   396  		}
   397  	}
   398  }
   399  
   400  func TestStackOutput(t *testing.T) {
   401  	b := make([]byte, 1024)
   402  	stk := string(b[:Stack(b, false)])
   403  	if !strings.HasPrefix(stk, "goroutine ") {
   404  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   405  		t.Errorf("Stack output should begin with \"goroutine \"")
   406  	}
   407  }
   408  
   409  func TestStackAllOutput(t *testing.T) {
   410  	b := make([]byte, 1024)
   411  	stk := string(b[:Stack(b, true)])
   412  	if !strings.HasPrefix(stk, "goroutine ") {
   413  		t.Errorf("Stack (len %d):\n%s", len(stk), stk)
   414  		t.Errorf("Stack output should begin with \"goroutine \"")
   415  	}
   416  }
   417  
   418  func TestStackPanic(t *testing.T) {
   419  	// Test that stack copying copies panics correctly.  This is difficult
   420  	// to test because it is very unlikely that the stack will be copied
   421  	// in the middle of gopanic.  But it can happen.
   422  	// To make this test effective, edit panic.go:gopanic and uncomment
   423  	// the GC() call just before freedefer(d).
   424  	defer func() {
   425  		if x := recover(); x == nil {
   426  			t.Errorf("recover failed")
   427  		}
   428  	}()
   429  	useStack(32)
   430  	panic("test panic")
   431  }
   432  
   433  func BenchmarkStackCopy(b *testing.B) {
   434  	c := make(chan bool)
   435  	for i := 0; i < b.N; i++ {
   436  		go func() {
   437  			count(1000000)
   438  			c <- true
   439  		}()
   440  		<-c
   441  	}
   442  }
   443  
   444  func count(n int) int {
   445  	if n == 0 {
   446  		return 0
   447  	}
   448  	return 1 + count(n-1)
   449  }