github.com/panjjo/go@v0.0.0-20161104043856-d62b31386338/src/runtime/gc_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"os"
     9  	"reflect"
    10  	"runtime"
    11  	"runtime/debug"
    12  	"testing"
    13  	"time"
    14  	"unsafe"
    15  )
    16  
    17  func TestGcSys(t *testing.T) {
    18  	if os.Getenv("GOGC") == "off" {
    19  		t.Skip("skipping test; GOGC=off in environment")
    20  	}
    21  	got := runTestProg(t, "testprog", "GCSys")
    22  	want := "OK\n"
    23  	if got != want {
    24  		t.Fatalf("expected %q, but got %q", want, got)
    25  	}
    26  }
    27  
    28  func TestGcDeepNesting(t *testing.T) {
    29  	type T [2][2][2][2][2][2][2][2][2][2]*int
    30  	a := new(T)
    31  
    32  	// Prevent the compiler from applying escape analysis.
    33  	// This makes sure new(T) is allocated on heap, not on the stack.
    34  	t.Logf("%p", a)
    35  
    36  	a[0][0][0][0][0][0][0][0][0][0] = new(int)
    37  	*a[0][0][0][0][0][0][0][0][0][0] = 13
    38  	runtime.GC()
    39  	if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
    40  		t.Fail()
    41  	}
    42  }
    43  
    44  func TestGcHashmapIndirection(t *testing.T) {
    45  	defer debug.SetGCPercent(debug.SetGCPercent(1))
    46  	runtime.GC()
    47  	type T struct {
    48  		a [256]int
    49  	}
    50  	m := make(map[T]T)
    51  	for i := 0; i < 2000; i++ {
    52  		var a T
    53  		a.a[0] = i
    54  		m[a] = T{}
    55  	}
    56  }
    57  
    58  func TestGcArraySlice(t *testing.T) {
    59  	type X struct {
    60  		buf     [1]byte
    61  		nextbuf []byte
    62  		next    *X
    63  	}
    64  	var head *X
    65  	for i := 0; i < 10; i++ {
    66  		p := &X{}
    67  		p.buf[0] = 42
    68  		p.next = head
    69  		if head != nil {
    70  			p.nextbuf = head.buf[:]
    71  		}
    72  		head = p
    73  		runtime.GC()
    74  	}
    75  	for p := head; p != nil; p = p.next {
    76  		if p.buf[0] != 42 {
    77  			t.Fatal("corrupted heap")
    78  		}
    79  	}
    80  }
    81  
    82  func TestGcRescan(t *testing.T) {
    83  	type X struct {
    84  		c     chan error
    85  		nextx *X
    86  	}
    87  	type Y struct {
    88  		X
    89  		nexty *Y
    90  		p     *int
    91  	}
    92  	var head *Y
    93  	for i := 0; i < 10; i++ {
    94  		p := &Y{}
    95  		p.c = make(chan error)
    96  		if head != nil {
    97  			p.nextx = &head.X
    98  		}
    99  		p.nexty = head
   100  		p.p = new(int)
   101  		*p.p = 42
   102  		head = p
   103  		runtime.GC()
   104  	}
   105  	for p := head; p != nil; p = p.nexty {
   106  		if *p.p != 42 {
   107  			t.Fatal("corrupted heap")
   108  		}
   109  	}
   110  }
   111  
   112  func TestGcLastTime(t *testing.T) {
   113  	ms := new(runtime.MemStats)
   114  	t0 := time.Now().UnixNano()
   115  	runtime.GC()
   116  	t1 := time.Now().UnixNano()
   117  	runtime.ReadMemStats(ms)
   118  	last := int64(ms.LastGC)
   119  	if t0 > last || last > t1 {
   120  		t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
   121  	}
   122  	pause := ms.PauseNs[(ms.NumGC+255)%256]
   123  	// Due to timer granularity, pause can actually be 0 on windows
   124  	// or on virtualized environments.
   125  	if pause == 0 {
   126  		t.Logf("last GC pause was 0")
   127  	} else if pause > 10e9 {
   128  		t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
   129  	}
   130  }
   131  
   132  var hugeSink interface{}
   133  
   134  func TestHugeGCInfo(t *testing.T) {
   135  	// The test ensures that compiler can chew these huge types even on weakest machines.
   136  	// The types are not allocated at runtime.
   137  	if hugeSink != nil {
   138  		// 400MB on 32 bots, 4TB on 64-bits.
   139  		const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
   140  		hugeSink = new([n]*byte)
   141  		hugeSink = new([n]uintptr)
   142  		hugeSink = new(struct {
   143  			x float64
   144  			y [n]*byte
   145  			z []string
   146  		})
   147  		hugeSink = new(struct {
   148  			x float64
   149  			y [n]uintptr
   150  			z []string
   151  		})
   152  	}
   153  }
   154  
   155  func TestPeriodicGC(t *testing.T) {
   156  	// Make sure we're not in the middle of a GC.
   157  	runtime.GC()
   158  
   159  	var ms1, ms2 runtime.MemStats
   160  	runtime.ReadMemStats(&ms1)
   161  
   162  	// Make periodic GC run continuously.
   163  	orig := *runtime.ForceGCPeriod
   164  	*runtime.ForceGCPeriod = 0
   165  
   166  	// Let some periodic GCs happen. In a heavily loaded system,
   167  	// it's possible these will be delayed, so this is designed to
   168  	// succeed quickly if things are working, but to give it some
   169  	// slack if things are slow.
   170  	var numGCs uint32
   171  	const want = 2
   172  	for i := 0; i < 20 && numGCs < want; i++ {
   173  		time.Sleep(5 * time.Millisecond)
   174  
   175  		// Test that periodic GC actually happened.
   176  		runtime.ReadMemStats(&ms2)
   177  		numGCs = ms2.NumGC - ms1.NumGC
   178  	}
   179  	*runtime.ForceGCPeriod = orig
   180  
   181  	if numGCs < want {
   182  		t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
   183  	}
   184  }
   185  
   186  func BenchmarkSetTypePtr(b *testing.B) {
   187  	benchSetType(b, new(*byte))
   188  }
   189  
   190  func BenchmarkSetTypePtr8(b *testing.B) {
   191  	benchSetType(b, new([8]*byte))
   192  }
   193  
   194  func BenchmarkSetTypePtr16(b *testing.B) {
   195  	benchSetType(b, new([16]*byte))
   196  }
   197  
   198  func BenchmarkSetTypePtr32(b *testing.B) {
   199  	benchSetType(b, new([32]*byte))
   200  }
   201  
   202  func BenchmarkSetTypePtr64(b *testing.B) {
   203  	benchSetType(b, new([64]*byte))
   204  }
   205  
   206  func BenchmarkSetTypePtr126(b *testing.B) {
   207  	benchSetType(b, new([126]*byte))
   208  }
   209  
   210  func BenchmarkSetTypePtr128(b *testing.B) {
   211  	benchSetType(b, new([128]*byte))
   212  }
   213  
   214  func BenchmarkSetTypePtrSlice(b *testing.B) {
   215  	benchSetType(b, make([]*byte, 1<<10))
   216  }
   217  
   218  type Node1 struct {
   219  	Value       [1]uintptr
   220  	Left, Right *byte
   221  }
   222  
   223  func BenchmarkSetTypeNode1(b *testing.B) {
   224  	benchSetType(b, new(Node1))
   225  }
   226  
   227  func BenchmarkSetTypeNode1Slice(b *testing.B) {
   228  	benchSetType(b, make([]Node1, 32))
   229  }
   230  
   231  type Node8 struct {
   232  	Value       [8]uintptr
   233  	Left, Right *byte
   234  }
   235  
   236  func BenchmarkSetTypeNode8(b *testing.B) {
   237  	benchSetType(b, new(Node8))
   238  }
   239  
   240  func BenchmarkSetTypeNode8Slice(b *testing.B) {
   241  	benchSetType(b, make([]Node8, 32))
   242  }
   243  
   244  type Node64 struct {
   245  	Value       [64]uintptr
   246  	Left, Right *byte
   247  }
   248  
   249  func BenchmarkSetTypeNode64(b *testing.B) {
   250  	benchSetType(b, new(Node64))
   251  }
   252  
   253  func BenchmarkSetTypeNode64Slice(b *testing.B) {
   254  	benchSetType(b, make([]Node64, 32))
   255  }
   256  
   257  type Node64Dead struct {
   258  	Left, Right *byte
   259  	Value       [64]uintptr
   260  }
   261  
   262  func BenchmarkSetTypeNode64Dead(b *testing.B) {
   263  	benchSetType(b, new(Node64Dead))
   264  }
   265  
   266  func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
   267  	benchSetType(b, make([]Node64Dead, 32))
   268  }
   269  
   270  type Node124 struct {
   271  	Value       [124]uintptr
   272  	Left, Right *byte
   273  }
   274  
   275  func BenchmarkSetTypeNode124(b *testing.B) {
   276  	benchSetType(b, new(Node124))
   277  }
   278  
   279  func BenchmarkSetTypeNode124Slice(b *testing.B) {
   280  	benchSetType(b, make([]Node124, 32))
   281  }
   282  
   283  type Node126 struct {
   284  	Value       [126]uintptr
   285  	Left, Right *byte
   286  }
   287  
   288  func BenchmarkSetTypeNode126(b *testing.B) {
   289  	benchSetType(b, new(Node126))
   290  }
   291  
   292  func BenchmarkSetTypeNode126Slice(b *testing.B) {
   293  	benchSetType(b, make([]Node126, 32))
   294  }
   295  
   296  type Node128 struct {
   297  	Value       [128]uintptr
   298  	Left, Right *byte
   299  }
   300  
   301  func BenchmarkSetTypeNode128(b *testing.B) {
   302  	benchSetType(b, new(Node128))
   303  }
   304  
   305  func BenchmarkSetTypeNode128Slice(b *testing.B) {
   306  	benchSetType(b, make([]Node128, 32))
   307  }
   308  
   309  type Node130 struct {
   310  	Value       [130]uintptr
   311  	Left, Right *byte
   312  }
   313  
   314  func BenchmarkSetTypeNode130(b *testing.B) {
   315  	benchSetType(b, new(Node130))
   316  }
   317  
   318  func BenchmarkSetTypeNode130Slice(b *testing.B) {
   319  	benchSetType(b, make([]Node130, 32))
   320  }
   321  
   322  type Node1024 struct {
   323  	Value       [1024]uintptr
   324  	Left, Right *byte
   325  }
   326  
   327  func BenchmarkSetTypeNode1024(b *testing.B) {
   328  	benchSetType(b, new(Node1024))
   329  }
   330  
   331  func BenchmarkSetTypeNode1024Slice(b *testing.B) {
   332  	benchSetType(b, make([]Node1024, 32))
   333  }
   334  
   335  func benchSetType(b *testing.B, x interface{}) {
   336  	v := reflect.ValueOf(x)
   337  	t := v.Type()
   338  	switch t.Kind() {
   339  	case reflect.Ptr:
   340  		b.SetBytes(int64(t.Elem().Size()))
   341  	case reflect.Slice:
   342  		b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
   343  	}
   344  	b.ResetTimer()
   345  	runtime.BenchSetType(b.N, x)
   346  }
   347  
   348  func BenchmarkAllocation(b *testing.B) {
   349  	type T struct {
   350  		x, y *byte
   351  	}
   352  	ngo := runtime.GOMAXPROCS(0)
   353  	work := make(chan bool, b.N+ngo)
   354  	result := make(chan *T)
   355  	for i := 0; i < b.N; i++ {
   356  		work <- true
   357  	}
   358  	for i := 0; i < ngo; i++ {
   359  		work <- false
   360  	}
   361  	for i := 0; i < ngo; i++ {
   362  		go func() {
   363  			var x *T
   364  			for <-work {
   365  				for i := 0; i < 1000; i++ {
   366  					x = &T{}
   367  				}
   368  			}
   369  			result <- x
   370  		}()
   371  	}
   372  	for i := 0; i < ngo; i++ {
   373  		<-result
   374  	}
   375  }
   376  
   377  func TestPrintGC(t *testing.T) {
   378  	if testing.Short() {
   379  		t.Skip("Skipping in short mode")
   380  	}
   381  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   382  	done := make(chan bool)
   383  	go func() {
   384  		for {
   385  			select {
   386  			case <-done:
   387  				return
   388  			default:
   389  				runtime.GC()
   390  			}
   391  		}
   392  	}()
   393  	for i := 0; i < 1e4; i++ {
   394  		func() {
   395  			defer print("")
   396  		}()
   397  	}
   398  	close(done)
   399  }
   400  
   401  func testTypeSwitch(x interface{}) error {
   402  	switch y := x.(type) {
   403  	case nil:
   404  		// ok
   405  	case error:
   406  		return y
   407  	}
   408  	return nil
   409  }
   410  
   411  func testAssert(x interface{}) error {
   412  	if y, ok := x.(error); ok {
   413  		return y
   414  	}
   415  	return nil
   416  }
   417  
   418  func testAssertVar(x interface{}) error {
   419  	var y, ok = x.(error)
   420  	if ok {
   421  		return y
   422  	}
   423  	return nil
   424  }
   425  
   426  var a bool
   427  
   428  //go:noinline
   429  func testIfaceEqual(x interface{}) {
   430  	if x == "abc" {
   431  		a = true
   432  	}
   433  }
   434  
   435  func TestPageAccounting(t *testing.T) {
   436  	// Grow the heap in small increments. This used to drop the
   437  	// pages-in-use count below zero because of a rounding
   438  	// mismatch (golang.org/issue/15022).
   439  	const blockSize = 64 << 10
   440  	blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
   441  	for i := range blocks {
   442  		blocks[i] = new([blockSize]byte)
   443  	}
   444  
   445  	// Check that the running page count matches reality.
   446  	pagesInUse, counted := runtime.CountPagesInUse()
   447  	if pagesInUse != counted {
   448  		t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
   449  	}
   450  }