github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/gc_test.go (about)

     1  // Copyright 2011 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"fmt"
     9  	"math/rand"
    10  	"os"
    11  	"reflect"
    12  	"runtime"
    13  	"runtime/debug"
    14  	"sort"
    15  	"strings"
    16  	"sync"
    17  	"sync/atomic"
    18  	"testing"
    19  	"time"
    20  	"unsafe"
    21  )
    22  
    23  func TestGcSys(t *testing.T) {
    24  	t.Skip("skipping known-flaky test; golang.org/issue/37331")
    25  	if os.Getenv("GOGC") == "off" {
    26  		t.Skip("skipping test; GOGC=off in environment")
    27  	}
    28  	got := runTestProg(t, "testprog", "GCSys")
    29  	want := "OK\n"
    30  	if got != want {
    31  		t.Fatalf("expected %q, but got %q", want, got)
    32  	}
    33  }
    34  
    35  func TestGcDeepNesting(t *testing.T) {
    36  	type T [2][2][2][2][2][2][2][2][2][2]*int
    37  	a := new(T)
    38  
    39  	// Prevent the compiler from applying escape analysis.
    40  	// This makes sure new(T) is allocated on heap, not on the stack.
    41  	t.Logf("%p", a)
    42  
    43  	a[0][0][0][0][0][0][0][0][0][0] = new(int)
    44  	*a[0][0][0][0][0][0][0][0][0][0] = 13
    45  	runtime.GC()
    46  	if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
    47  		t.Fail()
    48  	}
    49  }
    50  
    51  func TestGcMapIndirection(t *testing.T) {
    52  	defer debug.SetGCPercent(debug.SetGCPercent(1))
    53  	runtime.GC()
    54  	type T struct {
    55  		a [256]int
    56  	}
    57  	m := make(map[T]T)
    58  	for i := 0; i < 2000; i++ {
    59  		var a T
    60  		a.a[0] = i
    61  		m[a] = T{}
    62  	}
    63  }
    64  
    65  func TestGcArraySlice(t *testing.T) {
    66  	type X struct {
    67  		buf     [1]byte
    68  		nextbuf []byte
    69  		next    *X
    70  	}
    71  	var head *X
    72  	for i := 0; i < 10; i++ {
    73  		p := &X{}
    74  		p.buf[0] = 42
    75  		p.next = head
    76  		if head != nil {
    77  			p.nextbuf = head.buf[:]
    78  		}
    79  		head = p
    80  		runtime.GC()
    81  	}
    82  	for p := head; p != nil; p = p.next {
    83  		if p.buf[0] != 42 {
    84  			t.Fatal("corrupted heap")
    85  		}
    86  	}
    87  }
    88  
    89  func TestGcRescan(t *testing.T) {
    90  	type X struct {
    91  		c     chan error
    92  		nextx *X
    93  	}
    94  	type Y struct {
    95  		X
    96  		nexty *Y
    97  		p     *int
    98  	}
    99  	var head *Y
   100  	for i := 0; i < 10; i++ {
   101  		p := &Y{}
   102  		p.c = make(chan error)
   103  		if head != nil {
   104  			p.nextx = &head.X
   105  		}
   106  		p.nexty = head
   107  		p.p = new(int)
   108  		*p.p = 42
   109  		head = p
   110  		runtime.GC()
   111  	}
   112  	for p := head; p != nil; p = p.nexty {
   113  		if *p.p != 42 {
   114  			t.Fatal("corrupted heap")
   115  		}
   116  	}
   117  }
   118  
   119  func TestGcLastTime(t *testing.T) {
   120  	ms := new(runtime.MemStats)
   121  	t0 := time.Now().UnixNano()
   122  	runtime.GC()
   123  	t1 := time.Now().UnixNano()
   124  	runtime.ReadMemStats(ms)
   125  	last := int64(ms.LastGC)
   126  	if t0 > last || last > t1 {
   127  		t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
   128  	}
   129  	pause := ms.PauseNs[(ms.NumGC+255)%256]
   130  	// Due to timer granularity, pause can actually be 0 on windows
   131  	// or on virtualized environments.
   132  	if pause == 0 {
   133  		t.Logf("last GC pause was 0")
   134  	} else if pause > 10e9 {
   135  		t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
   136  	}
   137  }
   138  
   139  var hugeSink any
   140  
   141  func TestHugeGCInfo(t *testing.T) {
   142  	// The test ensures that compiler can chew these huge types even on weakest machines.
   143  	// The types are not allocated at runtime.
   144  	if hugeSink != nil {
   145  		// 400MB on 32 bots, 4TB on 64-bits.
   146  		const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
   147  		hugeSink = new([n]*byte)
   148  		hugeSink = new([n]uintptr)
   149  		hugeSink = new(struct {
   150  			x float64
   151  			y [n]*byte
   152  			z []string
   153  		})
   154  		hugeSink = new(struct {
   155  			x float64
   156  			y [n]uintptr
   157  			z []string
   158  		})
   159  	}
   160  }
   161  
   162  func TestPeriodicGC(t *testing.T) {
   163  	if runtime.GOARCH == "wasm" {
   164  		t.Skip("no sysmon on wasm yet")
   165  	}
   166  
   167  	// Make sure we're not in the middle of a GC.
   168  	runtime.GC()
   169  
   170  	var ms1, ms2 runtime.MemStats
   171  	runtime.ReadMemStats(&ms1)
   172  
   173  	// Make periodic GC run continuously.
   174  	orig := *runtime.ForceGCPeriod
   175  	*runtime.ForceGCPeriod = 0
   176  
   177  	// Let some periodic GCs happen. In a heavily loaded system,
   178  	// it's possible these will be delayed, so this is designed to
   179  	// succeed quickly if things are working, but to give it some
   180  	// slack if things are slow.
   181  	var numGCs uint32
   182  	const want = 2
   183  	for i := 0; i < 200 && numGCs < want; i++ {
   184  		time.Sleep(5 * time.Millisecond)
   185  
   186  		// Test that periodic GC actually happened.
   187  		runtime.ReadMemStats(&ms2)
   188  		numGCs = ms2.NumGC - ms1.NumGC
   189  	}
   190  	*runtime.ForceGCPeriod = orig
   191  
   192  	if numGCs < want {
   193  		t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
   194  	}
   195  }
   196  
   197  func TestGcZombieReporting(t *testing.T) {
   198  	// This test is somewhat sensitive to how the allocator works.
   199  	// Pointers in zombies slice may cross-span, thus we
   200  	// add invalidptr=0 for avoiding the badPointer check.
   201  	// See issue https://golang.org/issues/49613/
   202  	got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
   203  	want := "found pointer to free object"
   204  	if !strings.Contains(got, want) {
   205  		t.Fatalf("expected %q in output, but got %q", want, got)
   206  	}
   207  }
   208  
   209  func TestGCTestMoveStackOnNextCall(t *testing.T) {
   210  	t.Parallel()
   211  	var onStack int
   212  	// GCTestMoveStackOnNextCall can fail in rare cases if there's
   213  	// a preemption. This won't happen many times in quick
   214  	// succession, so just retry a few times.
   215  	for retry := 0; retry < 5; retry++ {
   216  		runtime.GCTestMoveStackOnNextCall()
   217  		if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
   218  			// Passed.
   219  			return
   220  		}
   221  	}
   222  	t.Fatal("stack did not move")
   223  }
   224  
   225  // This must not be inlined because the point is to force a stack
   226  // growth check and move the stack.
   227  //
   228  //go:noinline
   229  func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
   230  	// new should have been updated by the stack move;
   231  	// old should not have.
   232  
   233  	// Capture new's value before doing anything that could
   234  	// further move the stack.
   235  	new2 := uintptr(unsafe.Pointer(new))
   236  
   237  	t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
   238  	if new2 == old {
   239  		// Check that we didn't screw up the test's escape analysis.
   240  		if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
   241  			t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
   242  		}
   243  		// This was a real failure.
   244  		return false
   245  	}
   246  	return true
   247  }
   248  
   249  func TestGCTestMoveStackRepeatedly(t *testing.T) {
   250  	// Move the stack repeatedly to make sure we're not doubling
   251  	// it each time.
   252  	for i := 0; i < 100; i++ {
   253  		runtime.GCTestMoveStackOnNextCall()
   254  		moveStack1(false)
   255  	}
   256  }
   257  
   258  //go:noinline
   259  func moveStack1(x bool) {
   260  	// Make sure this function doesn't get auto-nosplit.
   261  	if x {
   262  		println("x")
   263  	}
   264  }
   265  
   266  func TestGCTestIsReachable(t *testing.T) {
   267  	var all, half []unsafe.Pointer
   268  	var want uint64
   269  	for i := 0; i < 16; i++ {
   270  		// The tiny allocator muddies things, so we use a
   271  		// scannable type.
   272  		p := unsafe.Pointer(new(*int))
   273  		all = append(all, p)
   274  		if i%2 == 0 {
   275  			half = append(half, p)
   276  			want |= 1 << i
   277  		}
   278  	}
   279  
   280  	got := runtime.GCTestIsReachable(all...)
   281  	if want != got {
   282  		t.Fatalf("did not get expected reachable set; want %b, got %b", want, got)
   283  	}
   284  	runtime.KeepAlive(half)
   285  }
   286  
   287  var pointerClassBSS *int
   288  var pointerClassData = 42
   289  
   290  func TestGCTestPointerClass(t *testing.T) {
   291  	t.Parallel()
   292  	check := func(p unsafe.Pointer, want string) {
   293  		t.Helper()
   294  		got := runtime.GCTestPointerClass(p)
   295  		if got != want {
   296  			// Convert the pointer to a uintptr to avoid
   297  			// escaping it.
   298  			t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
   299  		}
   300  	}
   301  	var onStack int
   302  	var notOnStack int
   303  	check(unsafe.Pointer(&onStack), "stack")
   304  	check(unsafe.Pointer(runtime.Escape(&notOnStack)), "heap")
   305  	check(unsafe.Pointer(&pointerClassBSS), "bss")
   306  	check(unsafe.Pointer(&pointerClassData), "data")
   307  	check(nil, "other")
   308  }
   309  
   310  func BenchmarkSetTypePtr(b *testing.B) {
   311  	benchSetType(b, new(*byte))
   312  }
   313  
   314  func BenchmarkSetTypePtr8(b *testing.B) {
   315  	benchSetType(b, new([8]*byte))
   316  }
   317  
   318  func BenchmarkSetTypePtr16(b *testing.B) {
   319  	benchSetType(b, new([16]*byte))
   320  }
   321  
   322  func BenchmarkSetTypePtr32(b *testing.B) {
   323  	benchSetType(b, new([32]*byte))
   324  }
   325  
   326  func BenchmarkSetTypePtr64(b *testing.B) {
   327  	benchSetType(b, new([64]*byte))
   328  }
   329  
   330  func BenchmarkSetTypePtr126(b *testing.B) {
   331  	benchSetType(b, new([126]*byte))
   332  }
   333  
   334  func BenchmarkSetTypePtr128(b *testing.B) {
   335  	benchSetType(b, new([128]*byte))
   336  }
   337  
   338  func BenchmarkSetTypePtrSlice(b *testing.B) {
   339  	benchSetType(b, make([]*byte, 1<<10))
   340  }
   341  
   342  type Node1 struct {
   343  	Value       [1]uintptr
   344  	Left, Right *byte
   345  }
   346  
   347  func BenchmarkSetTypeNode1(b *testing.B) {
   348  	benchSetType(b, new(Node1))
   349  }
   350  
   351  func BenchmarkSetTypeNode1Slice(b *testing.B) {
   352  	benchSetType(b, make([]Node1, 32))
   353  }
   354  
   355  type Node8 struct {
   356  	Value       [8]uintptr
   357  	Left, Right *byte
   358  }
   359  
   360  func BenchmarkSetTypeNode8(b *testing.B) {
   361  	benchSetType(b, new(Node8))
   362  }
   363  
   364  func BenchmarkSetTypeNode8Slice(b *testing.B) {
   365  	benchSetType(b, make([]Node8, 32))
   366  }
   367  
   368  type Node64 struct {
   369  	Value       [64]uintptr
   370  	Left, Right *byte
   371  }
   372  
   373  func BenchmarkSetTypeNode64(b *testing.B) {
   374  	benchSetType(b, new(Node64))
   375  }
   376  
   377  func BenchmarkSetTypeNode64Slice(b *testing.B) {
   378  	benchSetType(b, make([]Node64, 32))
   379  }
   380  
   381  type Node64Dead struct {
   382  	Left, Right *byte
   383  	Value       [64]uintptr
   384  }
   385  
   386  func BenchmarkSetTypeNode64Dead(b *testing.B) {
   387  	benchSetType(b, new(Node64Dead))
   388  }
   389  
   390  func BenchmarkSetTypeNode64DeadSlice(b *testing.B) {
   391  	benchSetType(b, make([]Node64Dead, 32))
   392  }
   393  
   394  type Node124 struct {
   395  	Value       [124]uintptr
   396  	Left, Right *byte
   397  }
   398  
   399  func BenchmarkSetTypeNode124(b *testing.B) {
   400  	benchSetType(b, new(Node124))
   401  }
   402  
   403  func BenchmarkSetTypeNode124Slice(b *testing.B) {
   404  	benchSetType(b, make([]Node124, 32))
   405  }
   406  
   407  type Node126 struct {
   408  	Value       [126]uintptr
   409  	Left, Right *byte
   410  }
   411  
   412  func BenchmarkSetTypeNode126(b *testing.B) {
   413  	benchSetType(b, new(Node126))
   414  }
   415  
   416  func BenchmarkSetTypeNode126Slice(b *testing.B) {
   417  	benchSetType(b, make([]Node126, 32))
   418  }
   419  
   420  type Node128 struct {
   421  	Value       [128]uintptr
   422  	Left, Right *byte
   423  }
   424  
   425  func BenchmarkSetTypeNode128(b *testing.B) {
   426  	benchSetType(b, new(Node128))
   427  }
   428  
   429  func BenchmarkSetTypeNode128Slice(b *testing.B) {
   430  	benchSetType(b, make([]Node128, 32))
   431  }
   432  
   433  type Node130 struct {
   434  	Value       [130]uintptr
   435  	Left, Right *byte
   436  }
   437  
   438  func BenchmarkSetTypeNode130(b *testing.B) {
   439  	benchSetType(b, new(Node130))
   440  }
   441  
   442  func BenchmarkSetTypeNode130Slice(b *testing.B) {
   443  	benchSetType(b, make([]Node130, 32))
   444  }
   445  
   446  type Node1024 struct {
   447  	Value       [1024]uintptr
   448  	Left, Right *byte
   449  }
   450  
   451  func BenchmarkSetTypeNode1024(b *testing.B) {
   452  	benchSetType(b, new(Node1024))
   453  }
   454  
   455  func BenchmarkSetTypeNode1024Slice(b *testing.B) {
   456  	benchSetType(b, make([]Node1024, 32))
   457  }
   458  
   459  func benchSetType(b *testing.B, x any) {
   460  	v := reflect.ValueOf(x)
   461  	t := v.Type()
   462  	switch t.Kind() {
   463  	case reflect.Pointer:
   464  		b.SetBytes(int64(t.Elem().Size()))
   465  	case reflect.Slice:
   466  		b.SetBytes(int64(t.Elem().Size()) * int64(v.Len()))
   467  	}
   468  	b.ResetTimer()
   469  	runtime.BenchSetType(b.N, x)
   470  }
   471  
   472  func BenchmarkAllocation(b *testing.B) {
   473  	type T struct {
   474  		x, y *byte
   475  	}
   476  	ngo := runtime.GOMAXPROCS(0)
   477  	work := make(chan bool, b.N+ngo)
   478  	result := make(chan *T)
   479  	for i := 0; i < b.N; i++ {
   480  		work <- true
   481  	}
   482  	for i := 0; i < ngo; i++ {
   483  		work <- false
   484  	}
   485  	for i := 0; i < ngo; i++ {
   486  		go func() {
   487  			var x *T
   488  			for <-work {
   489  				for i := 0; i < 1000; i++ {
   490  					x = &T{}
   491  				}
   492  			}
   493  			result <- x
   494  		}()
   495  	}
   496  	for i := 0; i < ngo; i++ {
   497  		<-result
   498  	}
   499  }
   500  
   501  func TestPrintGC(t *testing.T) {
   502  	if testing.Short() {
   503  		t.Skip("Skipping in short mode")
   504  	}
   505  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
   506  	done := make(chan bool)
   507  	go func() {
   508  		for {
   509  			select {
   510  			case <-done:
   511  				return
   512  			default:
   513  				runtime.GC()
   514  			}
   515  		}
   516  	}()
   517  	for i := 0; i < 1e4; i++ {
   518  		func() {
   519  			defer print("")
   520  		}()
   521  	}
   522  	close(done)
   523  }
   524  
   525  func testTypeSwitch(x any) error {
   526  	switch y := x.(type) {
   527  	case nil:
   528  		// ok
   529  	case error:
   530  		return y
   531  	}
   532  	return nil
   533  }
   534  
   535  func testAssert(x any) error {
   536  	if y, ok := x.(error); ok {
   537  		return y
   538  	}
   539  	return nil
   540  }
   541  
   542  func testAssertVar(x any) error {
   543  	var y, ok = x.(error)
   544  	if ok {
   545  		return y
   546  	}
   547  	return nil
   548  }
   549  
   550  var a bool
   551  
   552  //go:noinline
   553  func testIfaceEqual(x any) {
   554  	if x == "abc" {
   555  		a = true
   556  	}
   557  }
   558  
   559  func TestPageAccounting(t *testing.T) {
   560  	// Grow the heap in small increments. This used to drop the
   561  	// pages-in-use count below zero because of a rounding
   562  	// mismatch (golang.org/issue/15022).
   563  	const blockSize = 64 << 10
   564  	blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
   565  	for i := range blocks {
   566  		blocks[i] = new([blockSize]byte)
   567  	}
   568  
   569  	// Check that the running page count matches reality.
   570  	pagesInUse, counted := runtime.CountPagesInUse()
   571  	if pagesInUse != counted {
   572  		t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
   573  	}
   574  }
   575  
   576  func TestReadMemStats(t *testing.T) {
   577  	base, slow := runtime.ReadMemStatsSlow()
   578  	if base != slow {
   579  		logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
   580  		t.Fatal("memstats mismatch")
   581  	}
   582  }
   583  
   584  func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
   585  	typ := got.Type()
   586  	switch typ.Kind() {
   587  	case reflect.Array, reflect.Slice:
   588  		if got.Len() != want.Len() {
   589  			t.Logf("len(%s): got %v, want %v", prefix, got, want)
   590  			return
   591  		}
   592  		for i := 0; i < got.Len(); i++ {
   593  			logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
   594  		}
   595  	case reflect.Struct:
   596  		for i := 0; i < typ.NumField(); i++ {
   597  			gf, wf := got.Field(i), want.Field(i)
   598  			logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
   599  		}
   600  	case reflect.Map:
   601  		t.Fatal("not implemented: logDiff for map")
   602  	default:
   603  		if got.Interface() != want.Interface() {
   604  			t.Logf("%s: got %v, want %v", prefix, got, want)
   605  		}
   606  	}
   607  }
   608  
   609  func BenchmarkReadMemStats(b *testing.B) {
   610  	var ms runtime.MemStats
   611  	const heapSize = 100 << 20
   612  	x := make([]*[1024]byte, heapSize/1024)
   613  	for i := range x {
   614  		x[i] = new([1024]byte)
   615  	}
   616  
   617  	b.ResetTimer()
   618  	for i := 0; i < b.N; i++ {
   619  		runtime.ReadMemStats(&ms)
   620  	}
   621  
   622  	runtime.KeepAlive(x)
   623  }
   624  
   625  func applyGCLoad(b *testing.B) func() {
   626  	// We’ll apply load to the runtime with maxProcs-1 goroutines
   627  	// and use one more to actually benchmark. It doesn't make sense
   628  	// to try to run this test with only 1 P (that's what
   629  	// BenchmarkReadMemStats is for).
   630  	maxProcs := runtime.GOMAXPROCS(-1)
   631  	if maxProcs == 1 {
   632  		b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
   633  	}
   634  
   635  	// Code to build a big tree with lots of pointers.
   636  	type node struct {
   637  		children [16]*node
   638  	}
   639  	var buildTree func(depth int) *node
   640  	buildTree = func(depth int) *node {
   641  		tree := new(node)
   642  		if depth != 0 {
   643  			for i := range tree.children {
   644  				tree.children[i] = buildTree(depth - 1)
   645  			}
   646  		}
   647  		return tree
   648  	}
   649  
   650  	// Keep the GC busy by continuously generating large trees.
   651  	done := make(chan struct{})
   652  	var wg sync.WaitGroup
   653  	for i := 0; i < maxProcs-1; i++ {
   654  		wg.Add(1)
   655  		go func() {
   656  			defer wg.Done()
   657  			var hold *node
   658  		loop:
   659  			for {
   660  				hold = buildTree(5)
   661  				select {
   662  				case <-done:
   663  					break loop
   664  				default:
   665  				}
   666  			}
   667  			runtime.KeepAlive(hold)
   668  		}()
   669  	}
   670  	return func() {
   671  		close(done)
   672  		wg.Wait()
   673  	}
   674  }
   675  
   676  func BenchmarkReadMemStatsLatency(b *testing.B) {
   677  	stop := applyGCLoad(b)
   678  
   679  	// Spend this much time measuring latencies.
   680  	latencies := make([]time.Duration, 0, 1024)
   681  
   682  	// Run for timeToBench hitting ReadMemStats continuously
   683  	// and measuring the latency.
   684  	b.ResetTimer()
   685  	var ms runtime.MemStats
   686  	for i := 0; i < b.N; i++ {
   687  		// Sleep for a bit, otherwise we're just going to keep
   688  		// stopping the world and no one will get to do anything.
   689  		time.Sleep(100 * time.Millisecond)
   690  		start := time.Now()
   691  		runtime.ReadMemStats(&ms)
   692  		latencies = append(latencies, time.Since(start))
   693  	}
   694  	// Make sure to stop the timer before we wait! The load created above
   695  	// is very heavy-weight and not easy to stop, so we could end up
   696  	// confusing the benchmarking framework for small b.N.
   697  	b.StopTimer()
   698  	stop()
   699  
   700  	// Disable the default */op metrics.
   701  	// ns/op doesn't mean anything because it's an average, but we
   702  	// have a sleep in our b.N loop above which skews this significantly.
   703  	b.ReportMetric(0, "ns/op")
   704  	b.ReportMetric(0, "B/op")
   705  	b.ReportMetric(0, "allocs/op")
   706  
   707  	// Sort latencies then report percentiles.
   708  	sort.Slice(latencies, func(i, j int) bool {
   709  		return latencies[i] < latencies[j]
   710  	})
   711  	b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
   712  	b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
   713  	b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
   714  }
   715  
   716  func TestUserForcedGC(t *testing.T) {
   717  	// Test that runtime.GC() triggers a GC even if GOGC=off.
   718  	defer debug.SetGCPercent(debug.SetGCPercent(-1))
   719  
   720  	var ms1, ms2 runtime.MemStats
   721  	runtime.ReadMemStats(&ms1)
   722  	runtime.GC()
   723  	runtime.ReadMemStats(&ms2)
   724  	if ms1.NumGC == ms2.NumGC {
   725  		t.Fatalf("runtime.GC() did not trigger GC")
   726  	}
   727  	if ms1.NumForcedGC == ms2.NumForcedGC {
   728  		t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
   729  	}
   730  }
   731  
   732  func writeBarrierBenchmark(b *testing.B, f func()) {
   733  	runtime.GC()
   734  	var ms runtime.MemStats
   735  	runtime.ReadMemStats(&ms)
   736  	//b.Logf("heap size: %d MB", ms.HeapAlloc>>20)
   737  
   738  	// Keep GC running continuously during the benchmark, which in
   739  	// turn keeps the write barrier on continuously.
   740  	var stop uint32
   741  	done := make(chan bool)
   742  	go func() {
   743  		for atomic.LoadUint32(&stop) == 0 {
   744  			runtime.GC()
   745  		}
   746  		close(done)
   747  	}()
   748  	defer func() {
   749  		atomic.StoreUint32(&stop, 1)
   750  		<-done
   751  	}()
   752  
   753  	b.ResetTimer()
   754  	f()
   755  	b.StopTimer()
   756  }
   757  
   758  func BenchmarkWriteBarrier(b *testing.B) {
   759  	if runtime.GOMAXPROCS(-1) < 2 {
   760  		// We don't want GC to take our time.
   761  		b.Skip("need GOMAXPROCS >= 2")
   762  	}
   763  
   764  	// Construct a large tree both so the GC runs for a while and
   765  	// so we have a data structure to manipulate the pointers of.
   766  	type node struct {
   767  		l, r *node
   768  	}
   769  	var wbRoots []*node
   770  	var mkTree func(level int) *node
   771  	mkTree = func(level int) *node {
   772  		if level == 0 {
   773  			return nil
   774  		}
   775  		n := &node{mkTree(level - 1), mkTree(level - 1)}
   776  		if level == 10 {
   777  			// Seed GC with enough early pointers so it
   778  			// doesn't start termination barriers when it
   779  			// only has the top of the tree.
   780  			wbRoots = append(wbRoots, n)
   781  		}
   782  		return n
   783  	}
   784  	const depth = 22 // 64 MB
   785  	root := mkTree(22)
   786  
   787  	writeBarrierBenchmark(b, func() {
   788  		var stack [depth]*node
   789  		tos := -1
   790  
   791  		// There are two write barriers per iteration, so i+=2.
   792  		for i := 0; i < b.N; i += 2 {
   793  			if tos == -1 {
   794  				stack[0] = root
   795  				tos = 0
   796  			}
   797  
   798  			// Perform one step of reversing the tree.
   799  			n := stack[tos]
   800  			if n.l == nil {
   801  				tos--
   802  			} else {
   803  				n.l, n.r = n.r, n.l
   804  				stack[tos] = n.l
   805  				stack[tos+1] = n.r
   806  				tos++
   807  			}
   808  
   809  			if i%(1<<12) == 0 {
   810  				// Avoid non-preemptible loops (see issue #10958).
   811  				runtime.Gosched()
   812  			}
   813  		}
   814  	})
   815  
   816  	runtime.KeepAlive(wbRoots)
   817  }
   818  
   819  func BenchmarkBulkWriteBarrier(b *testing.B) {
   820  	if runtime.GOMAXPROCS(-1) < 2 {
   821  		// We don't want GC to take our time.
   822  		b.Skip("need GOMAXPROCS >= 2")
   823  	}
   824  
   825  	// Construct a large set of objects we can copy around.
   826  	const heapSize = 64 << 20
   827  	type obj [16]*byte
   828  	ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
   829  	for i := range ptrs {
   830  		ptrs[i] = new(obj)
   831  	}
   832  
   833  	writeBarrierBenchmark(b, func() {
   834  		const blockSize = 1024
   835  		var pos int
   836  		for i := 0; i < b.N; i += blockSize {
   837  			// Rotate block.
   838  			block := ptrs[pos : pos+blockSize]
   839  			first := block[0]
   840  			copy(block, block[1:])
   841  			block[blockSize-1] = first
   842  
   843  			pos += blockSize
   844  			if pos+blockSize > len(ptrs) {
   845  				pos = 0
   846  			}
   847  
   848  			runtime.Gosched()
   849  		}
   850  	})
   851  
   852  	runtime.KeepAlive(ptrs)
   853  }
   854  
   855  func BenchmarkScanStackNoLocals(b *testing.B) {
   856  	var ready sync.WaitGroup
   857  	teardown := make(chan bool)
   858  	for j := 0; j < 10; j++ {
   859  		ready.Add(1)
   860  		go func() {
   861  			x := 100000
   862  			countpwg(&x, &ready, teardown)
   863  		}()
   864  	}
   865  	ready.Wait()
   866  	b.ResetTimer()
   867  	for i := 0; i < b.N; i++ {
   868  		b.StartTimer()
   869  		runtime.GC()
   870  		runtime.GC()
   871  		b.StopTimer()
   872  	}
   873  	close(teardown)
   874  }
   875  
   876  func BenchmarkMSpanCountAlloc(b *testing.B) {
   877  	// Allocate one dummy mspan for the whole benchmark.
   878  	s := runtime.AllocMSpan()
   879  	defer runtime.FreeMSpan(s)
   880  
   881  	// n is the number of bytes to benchmark against.
   882  	// n must always be a multiple of 8, since gcBits is
   883  	// always rounded up 8 bytes.
   884  	for _, n := range []int{8, 16, 32, 64, 128} {
   885  		b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
   886  			// Initialize a new byte slice with pseduo-random data.
   887  			bits := make([]byte, n)
   888  			rand.Read(bits)
   889  
   890  			b.ResetTimer()
   891  			for i := 0; i < b.N; i++ {
   892  				runtime.MSpanCountAlloc(s, bits)
   893  			}
   894  		})
   895  	}
   896  }
   897  
   898  func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
   899  	if *n == 0 {
   900  		ready.Done()
   901  		<-teardown
   902  		return
   903  	}
   904  	*n--
   905  	countpwg(n, ready, teardown)
   906  }
   907  
   908  func TestMemoryLimit(t *testing.T) {
   909  	if testing.Short() {
   910  		t.Skip("stress test that takes time to run")
   911  	}
   912  	if runtime.NumCPU() < 4 {
   913  		t.Skip("want at least 4 CPUs for this test")
   914  	}
   915  	got := runTestProg(t, "testprog", "GCMemoryLimit")
   916  	want := "OK\n"
   917  	if got != want {
   918  		t.Fatalf("expected %q, but got %q", want, got)
   919  	}
   920  }
   921  
   922  func TestMemoryLimitNoGCPercent(t *testing.T) {
   923  	if testing.Short() {
   924  		t.Skip("stress test that takes time to run")
   925  	}
   926  	if runtime.NumCPU() < 4 {
   927  		t.Skip("want at least 4 CPUs for this test")
   928  	}
   929  	got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
   930  	want := "OK\n"
   931  	if got != want {
   932  		t.Fatalf("expected %q, but got %q", want, got)
   933  	}
   934  }