github.com/s1s1ty/go@v0.0.0-20180207192209-104445e3140f/src/runtime/gc_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	"os"
    10  	"reflect"
    11  	"runtime"
    12  	"runtime/debug"
    13  	"sync/atomic"
    14  	"testing"
    15  	"time"
    16  	"unsafe"
    17  )
    18  
    19  func TestGcSys(t *testing.T) {
    20  	if os.Getenv("GOGC") == "off" {
    21  		t.Skip("skipping test; GOGC=off in environment")
    22  	}
    23  	got := runTestProg(t, "testprog", "GCSys")
    24  	want := "OK\n"
    25  	if got != want {
    26  		t.Fatalf("expected %q, but got %q", want, got)
    27  	}
    28  }
    29  
    30  func TestGcDeepNesting(t *testing.T) {
    31  	type T [2][2][2][2][2][2][2][2][2][2]*int
    32  	a := new(T)
    33  
    34  	// Prevent the compiler from applying escape analysis.
    35  	// This makes sure new(T) is allocated on heap, not on the stack.
    36  	t.Logf("%p", a)
    37  
    38  	a[0][0][0][0][0][0][0][0][0][0] = new(int)
    39  	*a[0][0][0][0][0][0][0][0][0][0] = 13
    40  	runtime.GC()
    41  	if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
    42  		t.Fail()
    43  	}
    44  }
    45  
    46  func TestGcHashmapIndirection(t *testing.T) {
    47  	defer debug.SetGCPercent(debug.SetGCPercent(1))
    48  	runtime.GC()
    49  	type T struct {
    50  		a [256]int
    51  	}
    52  	m := make(map[T]T)
    53  	for i := 0; i < 2000; i++ {
    54  		var a T
    55  		a.a[0] = i
    56  		m[a] = T{}
    57  	}
    58  }
    59  
    60  func TestGcArraySlice(t *testing.T) {
    61  	type X struct {
    62  		buf     [1]byte
    63  		nextbuf []byte
    64  		next    *X
    65  	}
    66  	var head *X
    67  	for i := 0; i < 10; i++ {
    68  		p := &X{}
    69  		p.buf[0] = 42
    70  		p.next = head
    71  		if head != nil {
    72  			p.nextbuf = head.buf[:]
    73  		}
    74  		head = p
    75  		runtime.GC()
    76  	}
    77  	for p := head; p != nil; p = p.next {
    78  		if p.buf[0] != 42 {
    79  			t.Fatal("corrupted heap")
    80  		}
    81  	}
    82  }
    83  
    84  func TestGcRescan(t *testing.T) {
    85  	type X struct {
    86  		c     chan error
    87  		nextx *X
    88  	}
    89  	type Y struct {
    90  		X
    91  		nexty *Y
    92  		p     *int
    93  	}
    94  	var head *Y
    95  	for i := 0; i < 10; i++ {
    96  		p := &Y{}
    97  		p.c = make(chan error)
    98  		if head != nil {
    99  			p.nextx = &head.X
   100  		}
   101  		p.nexty = head
   102  		p.p = new(int)
   103  		*p.p = 42
   104  		head = p
   105  		runtime.GC()
   106  	}
   107  	for p := head; p != nil; p = p.nexty {
   108  		if *p.p != 42 {
   109  			t.Fatal("corrupted heap")
   110  		}
   111  	}
   112  }
   113  
   114  func TestGcLastTime(t *testing.T) {
   115  	ms := new(runtime.MemStats)
   116  	t0 := time.Now().UnixNano()
   117  	runtime.GC()
   118  	t1 := time.Now().UnixNano()
   119  	runtime.ReadMemStats(ms)
   120  	last := int64(ms.LastGC)
   121  	if t0 > last || last > t1 {
   122  		t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
   123  	}
   124  	pause := ms.PauseNs[(ms.NumGC+255)%256]
   125  	// Due to timer granularity, pause can actually be 0 on windows
   126  	// or on virtualized environments.
   127  	if pause == 0 {
   128  		t.Logf("last GC pause was 0")
   129  	} else if pause > 10e9 {
   130  		t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
   131  	}
   132  }
   133  
   134  var hugeSink interface{}
   135  
   136  func TestHugeGCInfo(t *testing.T) {
   137  	// The test ensures that compiler can chew these huge types even on weakest machines.
   138  	// The types are not allocated at runtime.
   139  	if hugeSink != nil {
   140  		// 400MB on 32 bots, 4TB on 64-bits.
   141  		const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
   142  		hugeSink = new([n]*byte)
   143  		hugeSink = new([n]uintptr)
   144  		hugeSink = new(struct {
   145  			x float64
   146  			y [n]*byte
   147  			z []string
   148  		})
   149  		hugeSink = new(struct {
   150  			x float64
   151  			y [n]uintptr
   152  			z []string
   153  		})
   154  	}
   155  }
   156  
   157  func TestPeriodicGC(t *testing.T) {
   158  	// Make sure we're not in the middle of a GC.
   159  	runtime.GC()
   160  
   161  	var ms1, ms2 runtime.MemStats
   162  	runtime.ReadMemStats(&ms1)
   163  
   164  	// Make periodic GC run continuously.
   165  	orig := *runtime.ForceGCPeriod
   166  	*runtime.ForceGCPeriod = 0
   167  
   168  	// Let some periodic GCs happen. In a heavily loaded system,
   169  	// it's possible these will be delayed, so this is designed to
   170  	// succeed quickly if things are working, but to give it some
   171  	// slack if things are slow.
   172  	var numGCs uint32
   173  	const want = 2
   174  	for i := 0; i < 200 && numGCs < want; i++ {
   175  		time.Sleep(5 * time.Millisecond)
   176  
   177  		// Test that periodic GC actually happened.
   178  		runtime.ReadMemStats(&ms2)
   179  		numGCs = ms2.NumGC - ms1.NumGC
   180  	}
   181  	*runtime.ForceGCPeriod = orig
   182  
   183  	if numGCs < want {
   184  		t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
   185  	}
   186  }
   187  
   188  func BenchmarkSetTypePtr(b *testing.B) {
   189  	benchSetType(b, new(*byte))
   190  }
   191  
   192  func BenchmarkSetTypePtr8(b *testing.B) {
   193  	benchSetType(b, new([8]*byte))
   194  }
   195  
   196  func BenchmarkSetTypePtr16(b *testing.B) {
   197  	benchSetType(b, new([16]*byte))
   198  }
   199  
   200  func BenchmarkSetTypePtr32(b *testing.B) {
   201  	benchSetType(b, new([32]*byte))
   202  }
   203  
   204  func BenchmarkSetTypePtr64(b *testing.B) {
   205  	benchSetType(b, new([64]*byte))
   206  }
   207  
   208  func BenchmarkSetTypePtr126(b *testing.B) {
   209  	benchSetType(b, new([126]*byte))
   210  }
   211  
   212  func BenchmarkSetTypePtr128(b *testing.B) {
   213  	benchSetType(b, new([128]*byte))
   214  }
   215  
   216  func BenchmarkSetTypePtrSlice(b *testing.B) {
   217  	benchSetType(b, make([]*byte, 1<<10))
   218  }
   219  
   220  type Node1 struct {
   221  	Value       [1]uintptr
   222  	Left, Right *byte
   223  }
   224  
   225  func BenchmarkSetTypeNode1(b *testing.B) {
   226  	benchSetType(b, new(Node1))
   227  }
   228  
   229  func BenchmarkSetTypeNode1Slice(b *testing.B) {
   230  	benchSetType(b, make([]Node1, 32))
   231  }
   232  
   233  type Node8 struct {
   234  	Value       [8]uintptr
   235  	Left, Right *byte
   236  }
   237  
   238  func BenchmarkSetTypeNode8(b *testing.B) {
   239  	benchSetType(b, new(Node8))
   240  }
   241  
   242  func BenchmarkSetTypeNode8Slice(b *testing.B) {
   243  	benchSetType(b, make([]Node8, 32))
   244  }
   245  
   246  type Node64 struct {
   247  	Value       [64]uintptr
   248  	Left, Right *byte
   249  }
   250  
   251  func BenchmarkSetTypeNode64(b *testing.B) {
   252  	benchSetType(b, new(Node64))
   253  }
   254  
   255  func BenchmarkSetTypeNode64Slice(b *testing.B) {
   256  	benchSetType(b, make([]Node64, 32))
   257  }
   258  
   259  type Node64Dead struct {
   260  	Left, Right *byte
   261  	Value       [64]uintptr
   262  }
   263  
   264  func BenchmarkSetTypeNode64Dead(b *testing.B) {
   265  	benchSetType(b, new(Node64Dead))
   266  }
   267  
   268  func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
   269  	benchSetType(b, make([]Node64Dead, 32))
   270  }
   271  
   272  type Node124 struct {
   273  	Value       [124]uintptr
   274  	Left, Right *byte
   275  }
   276  
   277  func BenchmarkSetTypeNode124(b *testing.B) {
   278  	benchSetType(b, new(Node124))
   279  }
   280  
   281  func BenchmarkSetTypeNode124Slice(b *testing.B) {
   282  	benchSetType(b, make([]Node124, 32))
   283  }
   284  
   285  type Node126 struct {
   286  	Value       [126]uintptr
   287  	Left, Right *byte
   288  }
   289  
   290  func BenchmarkSetTypeNode126(b *testing.B) {
   291  	benchSetType(b, new(Node126))
   292  }
   293  
   294  func BenchmarkSetTypeNode126Slice(b *testing.B) {
   295  	benchSetType(b, make([]Node126, 32))
   296  }
   297  
   298  type Node128 struct {
   299  	Value       [128]uintptr
   300  	Left, Right *byte
   301  }
   302  
   303  func BenchmarkSetTypeNode128(b *testing.B) {
   304  	benchSetType(b, new(Node128))
   305  }
   306  
   307  func BenchmarkSetTypeNode128Slice(b *testing.B) {
   308  	benchSetType(b, make([]Node128, 32))
   309  }
   310  
   311  type Node130 struct {
   312  	Value       [130]uintptr
   313  	Left, Right *byte
   314  }
   315  
   316  func BenchmarkSetTypeNode130(b *testing.B) {
   317  	benchSetType(b, new(Node130))
   318  }
   319  
   320  func BenchmarkSetTypeNode130Slice(b *testing.B) {
   321  	benchSetType(b, make([]Node130, 32))
   322  }
   323  
   324  type Node1024 struct {
   325  	Value       [1024]uintptr
   326  	Left, Right *byte
   327  }
   328  
   329  func BenchmarkSetTypeNode1024(b *testing.B) {
   330  	benchSetType(b, new(Node1024))
   331  }
   332  
   333  func BenchmarkSetTypeNode1024Slice(b *testing.B) {
   334  	benchSetType(b, make([]Node1024, 32))
   335  }
   336  
   337  func benchSetType(b *testing.B, x interface{}) {
   338  	v := reflect.ValueOf(x)
   339  	t := v.Type()
   340  	switch t.Kind() {
   341  	case reflect.Ptr:
   342  		b.SetBytes(int64(t.Elem().Size()))
   343  	case reflect.Slice:
   344  		b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
   345  	}
   346  	b.ResetTimer()
   347  	runtime.BenchSetType(b.N, x)
   348  }
   349  
   350  func BenchmarkAllocation(b *testing.B) {
   351  	type T struct {
   352  		x, y *byte
   353  	}
   354  	ngo := runtime.GOMAXPROCS(0)
   355  	work := make(chan bool, b.N+ngo)
   356  	result := make(chan *T)
   357  	for i := 0; i < b.N; i++ {
   358  		work <- true
   359  	}
   360  	for i := 0; i < ngo; i++ {
   361  		work <- false
   362  	}
   363  	for i := 0; i < ngo; i++ {
   364  		go func() {
   365  			var x *T
   366  			for <-work {
   367  				for i := 0; i < 1000; i++ {
   368  					x = &T{}
   369  				}
   370  			}
   371  			result <- x
   372  		}()
   373  	}
   374  	for i := 0; i < ngo; i++ {
   375  		<-result
   376  	}
   377  }
   378  
   379  func TestPrintGC(t *testing.T) {
   380  	if testing.Short() {
   381  		t.Skip("Skipping in short mode")
   382  	}
   383  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   384  	done := make(chan bool)
   385  	go func() {
   386  		for {
   387  			select {
   388  			case <-done:
   389  				return
   390  			default:
   391  				runtime.GC()
   392  			}
   393  		}
   394  	}()
   395  	for i := 0; i < 1e4; i++ {
   396  		func() {
   397  			defer print("")
   398  		}()
   399  	}
   400  	close(done)
   401  }
   402  
   403  func testTypeSwitch(x interface{}) error {
   404  	switch y := x.(type) {
   405  	case nil:
   406  		// ok
   407  	case error:
   408  		return y
   409  	}
   410  	return nil
   411  }
   412  
   413  func testAssert(x interface{}) error {
   414  	if y, ok := x.(error); ok {
   415  		return y
   416  	}
   417  	return nil
   418  }
   419  
   420  func testAssertVar(x interface{}) error {
   421  	var y, ok = x.(error)
   422  	if ok {
   423  		return y
   424  	}
   425  	return nil
   426  }
   427  
   428  var a bool
   429  
   430  //go:noinline
   431  func testIfaceEqual(x interface{}) {
   432  	if x == "abc" {
   433  		a = true
   434  	}
   435  }
   436  
   437  func TestPageAccounting(t *testing.T) {
   438  	// Grow the heap in small increments. This used to drop the
   439  	// pages-in-use count below zero because of a rounding
   440  	// mismatch (golang.org/issue/15022).
   441  	const blockSize = 64 << 10
   442  	blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
   443  	for i := range blocks {
   444  		blocks[i] = new([blockSize]byte)
   445  	}
   446  
   447  	// Check that the running page count matches reality.
   448  	pagesInUse, counted := runtime.CountPagesInUse()
   449  	if pagesInUse != counted {
   450  		t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
   451  	}
   452  }
   453  
   454  func TestReadMemStats(t *testing.T) {
   455  	base, slow := runtime.ReadMemStatsSlow()
   456  	if base != slow {
   457  		logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
   458  		t.Fatal("memstats mismatch")
   459  	}
   460  }
   461  
   462  func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
   463  	typ := got.Type()
   464  	switch typ.Kind() {
   465  	case reflect.Array, reflect.Slice:
   466  		if got.Len() != want.Len() {
   467  			t.Logf("len(%s): got %v, want %v", prefix, got, want)
   468  			return
   469  		}
   470  		for i := 0; i < got.Len(); i++ {
   471  			logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
   472  		}
   473  	case reflect.Struct:
   474  		for i := 0; i < typ.NumField(); i++ {
   475  			gf, wf := got.Field(i), want.Field(i)
   476  			logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
   477  		}
   478  	case reflect.Map:
   479  		t.Fatal("not implemented: logDiff for map")
   480  	default:
   481  		if got.Interface() != want.Interface() {
   482  			t.Logf("%s: got %v, want %v", prefix, got, want)
   483  		}
   484  	}
   485  }
   486  
   487  func BenchmarkReadMemStats(b *testing.B) {
   488  	var ms runtime.MemStats
   489  	const heapSize = 100 << 20
   490  	x := make([]*[1024]byte, heapSize/1024)
   491  	for i := range x {
   492  		x[i] = new([1024]byte)
   493  	}
   494  	hugeSink = x
   495  
   496  	b.ResetTimer()
   497  	for i := 0; i < b.N; i++ {
   498  		runtime.ReadMemStats(&ms)
   499  	}
   500  
   501  	hugeSink = nil
   502  }
   503  
   504  func TestUserForcedGC(t *testing.T) {
   505  	// Test that runtime.GC() triggers a GC even if GOGC=off.
   506  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   507  
   508  	var ms1, ms2 runtime.MemStats
   509  	runtime.ReadMemStats(&ms1)
   510  	runtime.GC()
   511  	runtime.ReadMemStats(&ms2)
   512  	if ms1.NumGC == ms2.NumGC {
   513  		t.Fatalf("runtime.GC() did not trigger GC")
   514  	}
   515  	if ms1.NumForcedGC == ms2.NumForcedGC {
   516  		t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
   517  	}
   518  }
   519  
   520  func writeBarrierBenchmark(b *testing.B, f func()) {
   521  	runtime.GC()
   522  	var ms runtime.MemStats
   523  	runtime.ReadMemStats(&ms)
   524  	//b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
   525  
   526  	// Keep GC running continuously during the benchmark, which in
   527  	// turn keeps the write barrier on continuously.
   528  	var stop uint32
   529  	done := make(chan bool)
   530  	go func() {
   531  		for atomic.LoadUint32(&stop) == 0 {
   532  			runtime.GC()
   533  		}
   534  		close(done)
   535  	}()
   536  	defer func() {
   537  		atomic.StoreUint32(&stop, 1)
   538  		<-done
   539  	}()
   540  
   541  	b.ResetTimer()
   542  	f()
   543  	b.StopTimer()
   544  }
   545  
   546  func BenchmarkWriteBarrier(b *testing.B) {
   547  	if runtime.GOMAXPROCS(-1) < 2 {
   548  		// We don't want GC to take our time.
   549  		b.Skip("need GOMAXPROCS >= 2")
   550  	}
   551  
   552  	// Construct a large tree both so the GC runs for a while and
   553  	// so we have a data structure to manipulate the pointers of.
   554  	type node struct {
   555  		l, r *node
   556  	}
   557  	var wbRoots []*node
   558  	var mkTree func(level int) *node
   559  	mkTree = func(level int) *node {
   560  		if level == 0 {
   561  			return nil
   562  		}
   563  		n := &node{mkTree(level - 1), mkTree(level - 1)}
   564  		if level == 10 {
   565  			// Seed GC with enough early pointers so it
   566  			// doesn't accidentally switch to mark 2 when
   567  			// it only has the top of the tree.
   568  			wbRoots = append(wbRoots, n)
   569  		}
   570  		return n
   571  	}
   572  	const depth = 22 // 64 MB
   573  	root := mkTree(22)
   574  
   575  	writeBarrierBenchmark(b, func() {
   576  		var stack [depth]*node
   577  		tos := -1
   578  
   579  		// There are two write barriers per iteration, so i+=2.
   580  		for i := 0; i < b.N; i += 2 {
   581  			if tos == -1 {
   582  				stack[0] = root
   583  				tos = 0
   584  			}
   585  
   586  			// Perform one step of reversing the tree.
   587  			n := stack[tos]
   588  			if n.l == nil {
   589  				tos--
   590  			} else {
   591  				n.l, n.r = n.r, n.l
   592  				stack[tos] = n.l
   593  				stack[tos+1] = n.r
   594  				tos++
   595  			}
   596  
   597  			if i%(1<<12) == 0 {
   598  				// Avoid non-preemptible loops (see issue #10958).
   599  				runtime.Gosched()
   600  			}
   601  		}
   602  	})
   603  
   604  	runtime.KeepAlive(wbRoots)
   605  }
   606  
   607  func BenchmarkBulkWriteBarrier(b *testing.B) {
   608  	if runtime.GOMAXPROCS(-1) < 2 {
   609  		// We don't want GC to take our time.
   610  		b.Skip("need GOMAXPROCS >= 2")
   611  	}
   612  
   613  	// Construct a large set of objects we can copy around.
   614  	const heapSize = 64 << 20
   615  	type obj [16]*byte
   616  	ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
   617  	for i := range ptrs {
   618  		ptrs[i] = new(obj)
   619  	}
   620  
   621  	writeBarrierBenchmark(b, func() {
   622  		const blockSize = 1024
   623  		var pos int
   624  		for i := 0; i < b.N; i += blockSize {
   625  			// Rotate block.
   626  			block := ptrs[pos : pos+blockSize]
   627  			first := block[0]
   628  			copy(block, block[1:])
   629  			block[blockSize-1] = first
   630  
   631  			pos += blockSize
   632  			if pos+blockSize > len(ptrs) {
   633  				pos = 0
   634  			}
   635  
   636  			runtime.Gosched()
   637  		}
   638  	})
   639  
   640  	runtime.KeepAlive(ptrs)
   641  }