github.com/megatontech/mynoteforgo@v0.0.0-20200507084910-5d0c6ea6e890/源码/runtime/gc_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	"os"
    10  	"reflect"
    11  	"runtime"
    12  	"runtime/debug"
    13  	"sync"
    14  	"sync/atomic"
    15  	"testing"
    16  	"time"
    17  	"unsafe"
    18  )
    19  
    20  func TestGcSys(t *testing.T) {
    21  	if os.Getenv("GOGC") == "off" {
    22  		t.Skip("skipping test; GOGC=off in environment")
    23  	}
    24  	if runtime.GOOS == "windows" {
    25  		t.Skip("skipping test; GOOS=windows http://golang.org/issue/27156")
    26  	}
    27  	if runtime.GOOS == "linux" && runtime.GOARCH == "arm64" {
    28  		t.Skip("skipping test; GOOS=linux GOARCH=arm64 https://github.com/golang/go/issues/27636")
    29  	}
    30  	got := runTestProg(t, "testprog", "GCSys")
    31  	want := "OK\n"
    32  	if got != want {
    33  		t.Fatalf("expected %q, but got %q", want, got)
    34  	}
    35  }
    36  
    37  func TestGcDeepNesting(t *testing.T) {
    38  	type T [2][2][2][2][2][2][2][2][2][2]*int
    39  	a := new(T)
    40  
    41  	// Prevent the compiler from applying escape analysis.
    42  	// This makes sure new(T) is allocated on heap, not on the stack.
    43  	t.Logf("%p", a)
    44  
    45  	a[0][0][0][0][0][0][0][0][0][0] = new(int)
    46  	*a[0][0][0][0][0][0][0][0][0][0] = 13
    47  	runtime.GC()
    48  	if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
    49  		t.Fail()
    50  	}
    51  }
    52  
    53  func TestGcMapIndirection(t *testing.T) {
    54  	defer debug.SetGCPercent(debug.SetGCPercent(1))
    55  	runtime.GC()
    56  	type T struct {
    57  		a [256]int
    58  	}
    59  	m := make(map[T]T)
    60  	for i := 0; i < 2000; i++ {
    61  		var a T
    62  		a.a[0] = i
    63  		m[a] = T{}
    64  	}
    65  }
    66  
    67  func TestGcArraySlice(t *testing.T) {
    68  	type X struct {
    69  		buf     [1]byte
    70  		nextbuf []byte
    71  		next    *X
    72  	}
    73  	var head *X
    74  	for i := 0; i < 10; i++ {
    75  		p := &X{}
    76  		p.buf[0] = 42
    77  		p.next = head
    78  		if head != nil {
    79  			p.nextbuf = head.buf[:]
    80  		}
    81  		head = p
    82  		runtime.GC()
    83  	}
    84  	for p := head; p != nil; p = p.next {
    85  		if p.buf[0] != 42 {
    86  			t.Fatal("corrupted heap")
    87  		}
    88  	}
    89  }
    90  
    91  func TestGcRescan(t *testing.T) {
    92  	type X struct {
    93  		c     chan error
    94  		nextx *X
    95  	}
    96  	type Y struct {
    97  		X
    98  		nexty *Y
    99  		p     *int
   100  	}
   101  	var head *Y
   102  	for i := 0; i < 10; i++ {
   103  		p := &Y{}
   104  		p.c = make(chan error)
   105  		if head != nil {
   106  			p.nextx = &head.X
   107  		}
   108  		p.nexty = head
   109  		p.p = new(int)
   110  		*p.p = 42
   111  		head = p
   112  		runtime.GC()
   113  	}
   114  	for p := head; p != nil; p = p.nexty {
   115  		if *p.p != 42 {
   116  			t.Fatal("corrupted heap")
   117  		}
   118  	}
   119  }
   120  
   121  func TestGcLastTime(t *testing.T) {
   122  	ms := new(runtime.MemStats)
   123  	t0 := time.Now().UnixNano()
   124  	runtime.GC()
   125  	t1 := time.Now().UnixNano()
   126  	runtime.ReadMemStats(ms)
   127  	last := int64(ms.LastGC)
   128  	if t0 > last || last > t1 {
   129  		t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
   130  	}
   131  	pause := ms.PauseNs[(ms.NumGC+255)%256]
   132  	// Due to timer granularity, pause can actually be 0 on windows
   133  	// or on virtualized environments.
   134  	if pause == 0 {
   135  		t.Logf("last GC pause was 0")
   136  	} else if pause > 10e9 {
   137  		t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
   138  	}
   139  }
   140  
   141  var hugeSink interface{}
   142  
   143  func TestHugeGCInfo(t *testing.T) {
   144  	// The test ensures that compiler can chew these huge types even on weakest machines.
   145  	// The types are not allocated at runtime.
   146  	if hugeSink != nil {
   147  		// 400MB on 32 bots, 4TB on 64-bits.
   148  		const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
   149  		hugeSink = new([n]*byte)
   150  		hugeSink = new([n]uintptr)
   151  		hugeSink = new(struct {
   152  			x float64
   153  			y [n]*byte
   154  			z []string
   155  		})
   156  		hugeSink = new(struct {
   157  			x float64
   158  			y [n]uintptr
   159  			z []string
   160  		})
   161  	}
   162  }
   163  
   164  func TestPeriodicGC(t *testing.T) {
   165  	if runtime.GOARCH == "wasm" {
   166  		t.Skip("no sysmon on wasm yet")
   167  	}
   168  
   169  	// Make sure we're not in the middle of a GC.
   170  	runtime.GC()
   171  
   172  	var ms1, ms2 runtime.MemStats
   173  	runtime.ReadMemStats(&ms1)
   174  
   175  	// Make periodic GC run continuously.
   176  	orig := *runtime.ForceGCPeriod
   177  	*runtime.ForceGCPeriod = 0
   178  
   179  	// Let some periodic GCs happen. In a heavily loaded system,
   180  	// it's possible these will be delayed, so this is designed to
   181  	// succeed quickly if things are working, but to give it some
   182  	// slack if things are slow.
   183  	var numGCs uint32
   184  	const want = 2
   185  	for i := 0; i < 200 && numGCs < want; i++ {
   186  		time.Sleep(5 * time.Millisecond)
   187  
   188  		// Test that periodic GC actually happened.
   189  		runtime.ReadMemStats(&ms2)
   190  		numGCs = ms2.NumGC - ms1.NumGC
   191  	}
   192  	*runtime.ForceGCPeriod = orig
   193  
   194  	if numGCs < want {
   195  		t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
   196  	}
   197  }
   198  
   199  func BenchmarkSetTypePtr(b *testing.B) {
   200  	benchSetType(b, new(*byte))
   201  }
   202  
   203  func BenchmarkSetTypePtr8(b *testing.B) {
   204  	benchSetType(b, new([8]*byte))
   205  }
   206  
   207  func BenchmarkSetTypePtr16(b *testing.B) {
   208  	benchSetType(b, new([16]*byte))
   209  }
   210  
   211  func BenchmarkSetTypePtr32(b *testing.B) {
   212  	benchSetType(b, new([32]*byte))
   213  }
   214  
   215  func BenchmarkSetTypePtr64(b *testing.B) {
   216  	benchSetType(b, new([64]*byte))
   217  }
   218  
   219  func BenchmarkSetTypePtr126(b *testing.B) {
   220  	benchSetType(b, new([126]*byte))
   221  }
   222  
   223  func BenchmarkSetTypePtr128(b *testing.B) {
   224  	benchSetType(b, new([128]*byte))
   225  }
   226  
   227  func BenchmarkSetTypePtrSlice(b *testing.B) {
   228  	benchSetType(b, make([]*byte, 1<<10))
   229  }
   230  
   231  type Node1 struct {
   232  	Value       [1]uintptr
   233  	Left, Right *byte
   234  }
   235  
   236  func BenchmarkSetTypeNode1(b *testing.B) {
   237  	benchSetType(b, new(Node1))
   238  }
   239  
   240  func BenchmarkSetTypeNode1Slice(b *testing.B) {
   241  	benchSetType(b, make([]Node1, 32))
   242  }
   243  
   244  type Node8 struct {
   245  	Value       [8]uintptr
   246  	Left, Right *byte
   247  }
   248  
   249  func BenchmarkSetTypeNode8(b *testing.B) {
   250  	benchSetType(b, new(Node8))
   251  }
   252  
   253  func BenchmarkSetTypeNode8Slice(b *testing.B) {
   254  	benchSetType(b, make([]Node8, 32))
   255  }
   256  
   257  type Node64 struct {
   258  	Value       [64]uintptr
   259  	Left, Right *byte
   260  }
   261  
   262  func BenchmarkSetTypeNode64(b *testing.B) {
   263  	benchSetType(b, new(Node64))
   264  }
   265  
   266  func BenchmarkSetTypeNode64Slice(b *testing.B) {
   267  	benchSetType(b, make([]Node64, 32))
   268  }
   269  
   270  type Node64Dead struct {
   271  	Left, Right *byte
   272  	Value       [64]uintptr
   273  }
   274  
   275  func BenchmarkSetTypeNode64Dead(b *testing.B) {
   276  	benchSetType(b, new(Node64Dead))
   277  }
   278  
   279  func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
   280  	benchSetType(b, make([]Node64Dead, 32))
   281  }
   282  
   283  type Node124 struct {
   284  	Value       [124]uintptr
   285  	Left, Right *byte
   286  }
   287  
   288  func BenchmarkSetTypeNode124(b *testing.B) {
   289  	benchSetType(b, new(Node124))
   290  }
   291  
   292  func BenchmarkSetTypeNode124Slice(b *testing.B) {
   293  	benchSetType(b, make([]Node124, 32))
   294  }
   295  
   296  type Node126 struct {
   297  	Value       [126]uintptr
   298  	Left, Right *byte
   299  }
   300  
   301  func BenchmarkSetTypeNode126(b *testing.B) {
   302  	benchSetType(b, new(Node126))
   303  }
   304  
   305  func BenchmarkSetTypeNode126Slice(b *testing.B) {
   306  	benchSetType(b, make([]Node126, 32))
   307  }
   308  
   309  type Node128 struct {
   310  	Value       [128]uintptr
   311  	Left, Right *byte
   312  }
   313  
   314  func BenchmarkSetTypeNode128(b *testing.B) {
   315  	benchSetType(b, new(Node128))
   316  }
   317  
   318  func BenchmarkSetTypeNode128Slice(b *testing.B) {
   319  	benchSetType(b, make([]Node128, 32))
   320  }
   321  
   322  type Node130 struct {
   323  	Value       [130]uintptr
   324  	Left, Right *byte
   325  }
   326  
   327  func BenchmarkSetTypeNode130(b *testing.B) {
   328  	benchSetType(b, new(Node130))
   329  }
   330  
   331  func BenchmarkSetTypeNode130Slice(b *testing.B) {
   332  	benchSetType(b, make([]Node130, 32))
   333  }
   334  
   335  type Node1024 struct {
   336  	Value       [1024]uintptr
   337  	Left, Right *byte
   338  }
   339  
   340  func BenchmarkSetTypeNode1024(b *testing.B) {
   341  	benchSetType(b, new(Node1024))
   342  }
   343  
   344  func BenchmarkSetTypeNode1024Slice(b *testing.B) {
   345  	benchSetType(b, make([]Node1024, 32))
   346  }
   347  
   348  func benchSetType(b *testing.B, x interface{}) {
   349  	v := reflect.ValueOf(x)
   350  	t := v.Type()
   351  	switch t.Kind() {
   352  	case reflect.Ptr:
   353  		b.SetBytes(int64(t.Elem().Size()))
   354  	case reflect.Slice:
   355  		b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
   356  	}
   357  	b.ResetTimer()
   358  	runtime.BenchSetType(b.N, x)
   359  }
   360  
   361  func BenchmarkAllocation(b *testing.B) {
   362  	type T struct {
   363  		x, y *byte
   364  	}
   365  	ngo := runtime.GOMAXPROCS(0)
   366  	work := make(chan bool, b.N+ngo)
   367  	result := make(chan *T)
   368  	for i := 0; i < b.N; i++ {
   369  		work <- true
   370  	}
   371  	for i := 0; i < ngo; i++ {
   372  		work <- false
   373  	}
   374  	for i := 0; i < ngo; i++ {
   375  		go func() {
   376  			var x *T
   377  			for <-work {
   378  				for i := 0; i < 1000; i++ {
   379  					x = &T{}
   380  				}
   381  			}
   382  			result <- x
   383  		}()
   384  	}
   385  	for i := 0; i < ngo; i++ {
   386  		<-result
   387  	}
   388  }
   389  
   390  func TestPrintGC(t *testing.T) {
   391  	if testing.Short() {
   392  		t.Skip("Skipping in short mode")
   393  	}
   394  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   395  	done := make(chan bool)
   396  	go func() {
   397  		for {
   398  			select {
   399  			case <-done:
   400  				return
   401  			default:
   402  				runtime.GC()
   403  			}
   404  		}
   405  	}()
   406  	for i := 0; i < 1e4; i++ {
   407  		func() {
   408  			defer print("")
   409  		}()
   410  	}
   411  	close(done)
   412  }
   413  
   414  func testTypeSwitch(x interface{}) error {
   415  	switch y := x.(type) {
   416  	case nil:
   417  		// ok
   418  	case error:
   419  		return y
   420  	}
   421  	return nil
   422  }
   423  
   424  func testAssert(x interface{}) error {
   425  	if y, ok := x.(error); ok {
   426  		return y
   427  	}
   428  	return nil
   429  }
   430  
   431  func testAssertVar(x interface{}) error {
   432  	var y, ok = x.(error)
   433  	if ok {
   434  		return y
   435  	}
   436  	return nil
   437  }
   438  
   439  var a bool
   440  
   441  //go:noinline
   442  func testIfaceEqual(x interface{}) {
   443  	if x == "abc" {
   444  		a = true
   445  	}
   446  }
   447  
   448  func TestPageAccounting(t *testing.T) {
   449  	// Grow the heap in small increments. This used to drop the
   450  	// pages-in-use count below zero because of a rounding
   451  	// mismatch (golang.org/issue/15022).
   452  	const blockSize = 64 << 10
   453  	blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
   454  	for i := range blocks {
   455  		blocks[i] = new([blockSize]byte)
   456  	}
   457  
   458  	// Check that the running page count matches reality.
   459  	pagesInUse, counted := runtime.CountPagesInUse()
   460  	if pagesInUse != counted {
   461  		t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
   462  	}
   463  }
   464  
   465  func TestReadMemStats(t *testing.T) {
   466  	base, slow := runtime.ReadMemStatsSlow()
   467  	if base != slow {
   468  		logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
   469  		t.Fatal("memstats mismatch")
   470  	}
   471  }
   472  
   473  func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
   474  	typ := got.Type()
   475  	switch typ.Kind() {
   476  	case reflect.Array, reflect.Slice:
   477  		if got.Len() != want.Len() {
   478  			t.Logf("len(%s): got %v, want %v", prefix, got, want)
   479  			return
   480  		}
   481  		for i := 0; i < got.Len(); i++ {
   482  			logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
   483  		}
   484  	case reflect.Struct:
   485  		for i := 0; i < typ.NumField(); i++ {
   486  			gf, wf := got.Field(i), want.Field(i)
   487  			logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
   488  		}
   489  	case reflect.Map:
   490  		t.Fatal("not implemented: logDiff for map")
   491  	default:
   492  		if got.Interface() != want.Interface() {
   493  			t.Logf("%s: got %v, want %v", prefix, got, want)
   494  		}
   495  	}
   496  }
   497  
   498  func BenchmarkReadMemStats(b *testing.B) {
   499  	var ms runtime.MemStats
   500  	const heapSize = 100 << 20
   501  	x := make([]*[1024]byte, heapSize/1024)
   502  	for i := range x {
   503  		x[i] = new([1024]byte)
   504  	}
   505  	hugeSink = x
   506  
   507  	b.ResetTimer()
   508  	for i := 0; i < b.N; i++ {
   509  		runtime.ReadMemStats(&ms)
   510  	}
   511  
   512  	hugeSink = nil
   513  }
   514  
   515  func TestUserForcedGC(t *testing.T) {
   516  	// Test that runtime.GC() triggers a GC even if GOGC=off.
   517  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   518  
   519  	var ms1, ms2 runtime.MemStats
   520  	runtime.ReadMemStats(&ms1)
   521  	runtime.GC()
   522  	runtime.ReadMemStats(&ms2)
   523  	if ms1.NumGC == ms2.NumGC {
   524  		t.Fatalf("runtime.GC() did not trigger GC")
   525  	}
   526  	if ms1.NumForcedGC == ms2.NumForcedGC {
   527  		t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
   528  	}
   529  }
   530  
   531  func writeBarrierBenchmark(b *testing.B, f func()) {
   532  	runtime.GC()
   533  	var ms runtime.MemStats
   534  	runtime.ReadMemStats(&ms)
   535  	//b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
   536  
   537  	// Keep GC running continuously during the benchmark, which in
   538  	// turn keeps the write barrier on continuously.
   539  	var stop uint32
   540  	done := make(chan bool)
   541  	go func() {
   542  		for atomic.LoadUint32(&stop) == 0 {
   543  			runtime.GC()
   544  		}
   545  		close(done)
   546  	}()
   547  	defer func() {
   548  		atomic.StoreUint32(&stop, 1)
   549  		<-done
   550  	}()
   551  
   552  	b.ResetTimer()
   553  	f()
   554  	b.StopTimer()
   555  }
   556  
   557  func BenchmarkWriteBarrier(b *testing.B) {
   558  	if runtime.GOMAXPROCS(-1) < 2 {
   559  		// We don't want GC to take our time.
   560  		b.Skip("need GOMAXPROCS >= 2")
   561  	}
   562  
   563  	// Construct a large tree both so the GC runs for a while and
   564  	// so we have a data structure to manipulate the pointers of.
   565  	type node struct {
   566  		l, r *node
   567  	}
   568  	var wbRoots []*node
   569  	var mkTree func(level int) *node
   570  	mkTree = func(level int) *node {
   571  		if level == 0 {
   572  			return nil
   573  		}
   574  		n := &node{mkTree(level - 1), mkTree(level - 1)}
   575  		if level == 10 {
   576  			// Seed GC with enough early pointers so it
   577  			// doesn't start termination barriers when it
   578  			// only has the top of the tree.
   579  			wbRoots = append(wbRoots, n)
   580  		}
   581  		return n
   582  	}
   583  	const depth = 22 // 64 MB
   584  	root := mkTree(22)
   585  
   586  	writeBarrierBenchmark(b, func() {
   587  		var stack [depth]*node
   588  		tos := -1
   589  
   590  		// There are two write barriers per iteration, so i+=2.
   591  		for i := 0; i < b.N; i += 2 {
   592  			if tos == -1 {
   593  				stack[0] = root
   594  				tos = 0
   595  			}
   596  
   597  			// Perform one step of reversing the tree.
   598  			n := stack[tos]
   599  			if n.l == nil {
   600  				tos--
   601  			} else {
   602  				n.l, n.r = n.r, n.l
   603  				stack[tos] = n.l
   604  				stack[tos+1] = n.r
   605  				tos++
   606  			}
   607  
   608  			if i%(1<<12) == 0 {
   609  				// Avoid non-preemptible loops (see issue #10958).
   610  				runtime.Gosched()
   611  			}
   612  		}
   613  	})
   614  
   615  	runtime.KeepAlive(wbRoots)
   616  }
   617  
   618  func BenchmarkBulkWriteBarrier(b *testing.B) {
   619  	if runtime.GOMAXPROCS(-1) < 2 {
   620  		// We don't want GC to take our time.
   621  		b.Skip("need GOMAXPROCS >= 2")
   622  	}
   623  
   624  	// Construct a large set of objects we can copy around.
   625  	const heapSize = 64 << 20
   626  	type obj [16]*byte
   627  	ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
   628  	for i := range ptrs {
   629  		ptrs[i] = new(obj)
   630  	}
   631  
   632  	writeBarrierBenchmark(b, func() {
   633  		const blockSize = 1024
   634  		var pos int
   635  		for i := 0; i < b.N; i += blockSize {
   636  			// Rotate block.
   637  			block := ptrs[pos : pos+blockSize]
   638  			first := block[0]
   639  			copy(block, block[1:])
   640  			block[blockSize-1] = first
   641  
   642  			pos += blockSize
   643  			if pos+blockSize > len(ptrs) {
   644  				pos = 0
   645  			}
   646  
   647  			runtime.Gosched()
   648  		}
   649  	})
   650  
   651  	runtime.KeepAlive(ptrs)
   652  }
   653  
   654  func BenchmarkScanStackNoLocals(b *testing.B) {
   655  	var ready sync.WaitGroup
   656  	teardown := make(chan bool)
   657  	for j := 0; j < 10; j++ {
   658  		ready.Add(1)
   659  		go func() {
   660  			x := 100000
   661  			countpwg(&x, &ready, teardown)
   662  		}()
   663  	}
   664  	ready.Wait()
   665  	b.ResetTimer()
   666  	for i := 0; i < b.N; i++ {
   667  		b.StartTimer()
   668  		runtime.GC()
   669  		runtime.GC()
   670  		b.StopTimer()
   671  	}
   672  	close(teardown)
   673  }
   674  
   675  func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
   676  	if *n == 0 {
   677  		ready.Done()
   678  		<-teardown
   679  		return
   680  	}
   681  	*n--
   682  	countpwg(n, ready, teardown)
   683  }