github.com/x04/go/src@v0.0.0-20200202162449-3d481ceb3525/runtime/malloc_test.go (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"github.com/x04/go/src/flag"
     9  	"github.com/x04/go/src/fmt"
    10  	"github.com/x04/go/src/internal/race"
    11  	"github.com/x04/go/src/internal/testenv"
    12  	"github.com/x04/go/src/os"
    13  	"github.com/x04/go/src/os/exec"
    14  	"github.com/x04/go/src/reflect"
    15  	. "github.com/x04/go/src/runtime"
    16  	"github.com/x04/go/src/strings"
    17  	"github.com/x04/go/src/testing"
    18  	"github.com/x04/go/src/time"
    19  	"github.com/x04/go/src/unsafe"
    20  )
    21  
    22  var testMemStatsCount int
    23  
    24  func TestMemStats(t *testing.T) {
    25  	testMemStatsCount++
    26  
    27  	// Make sure there's at least one forced GC.
    28  	GC()
    29  
    30  	// Test that MemStats has sane values.
    31  	st := new(MemStats)
    32  	ReadMemStats(st)
    33  
    34  	nz := func(x interface{}) error {
    35  		if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
    36  			return nil
    37  		}
    38  		return fmt.Errorf("zero value")
    39  	}
    40  	le := func(thresh float64) func(interface{}) error {
    41  		return func(x interface{}) error {
    42  			// These sanity tests aren't necessarily valid
    43  			// with high -test.count values, so only run
    44  			// them once.
    45  			if testMemStatsCount > 1 {
    46  				return nil
    47  			}
    48  
    49  			if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
    50  				return nil
    51  			}
    52  			return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
    53  		}
    54  	}
    55  	eq := func(x interface{}) func(interface{}) error {
    56  		return func(y interface{}) error {
    57  			if x == y {
    58  				return nil
    59  			}
    60  			return fmt.Errorf("want %v", x)
    61  		}
    62  	}
    63  	// Of the uint fields, HeapReleased, HeapIdle can be 0.
    64  	// PauseTotalNs can be 0 if timer resolution is poor.
    65  	fields := map[string][]func(interface{}) error{
    66  		"Alloc":	{nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
    67  		"Lookups":	{eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
    68  		"HeapAlloc":	{nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
    69  		"HeapInuse":	{nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
    70  		"StackInuse":	{nz, le(1e10)}, "StackSys": {nz, le(1e10)},
    71  		"MSpanInuse":	{nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
    72  		"MCacheInuse":	{nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
    73  		"BuckHashSys":	{nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
    74  		"NextGC":	{nz, le(1e10)}, "LastGC": {nz},
    75  		"PauseTotalNs":	{le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
    76  		"NumGC":	{nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
    77  		"GCCPUFraction":	{le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
    78  		"BySize":	nil,
    79  	}
    80  
    81  	rst := reflect.ValueOf(st).Elem()
    82  	for i := 0; i < rst.Type().NumField(); i++ {
    83  		name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
    84  		checks, ok := fields[name]
    85  		if !ok {
    86  			t.Errorf("unknown MemStats field %s", name)
    87  			continue
    88  		}
    89  		for _, check := range checks {
    90  			if err := check(val); err != nil {
    91  				t.Errorf("%s = %v: %s", name, val, err)
    92  			}
    93  		}
    94  	}
    95  
    96  	if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
    97  		st.BuckHashSys+st.GCSys+st.OtherSys {
    98  		t.Fatalf("Bad sys value: %+v", *st)
    99  	}
   100  
   101  	if st.HeapIdle+st.HeapInuse != st.HeapSys {
   102  		t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
   103  	}
   104  
   105  	if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
   106  		t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
   107  	}
   108  
   109  	var pauseTotal uint64
   110  	for _, pause := range st.PauseNs {
   111  		pauseTotal += pause
   112  	}
   113  	if int(st.NumGC) < len(st.PauseNs) {
   114  		// We have all pauses, so this should be exact.
   115  		if st.PauseTotalNs != pauseTotal {
   116  			t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   117  		}
   118  		for i := int(st.NumGC); i < len(st.PauseNs); i++ {
   119  			if st.PauseNs[i] != 0 {
   120  				t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
   121  			}
   122  			if st.PauseEnd[i] != 0 {
   123  				t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
   124  			}
   125  		}
   126  	} else {
   127  		if st.PauseTotalNs < pauseTotal {
   128  			t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   129  		}
   130  	}
   131  
   132  	if st.NumForcedGC > st.NumGC {
   133  		t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
   134  	}
   135  }
   136  
   137  func TestStringConcatenationAllocs(t *testing.T) {
   138  	n := testing.AllocsPerRun(1e3, func() {
   139  		b := make([]byte, 10)
   140  		for i := 0; i < 10; i++ {
   141  			b[i] = byte(i) + '0'
   142  		}
   143  		s := "foo" + string(b)
   144  		if want := "foo0123456789"; s != want {
   145  			t.Fatalf("want %v, got %v", want, s)
   146  		}
   147  	})
   148  	// Only string concatenation allocates.
   149  	if n != 1 {
   150  		t.Fatalf("want 1 allocation, got %v", n)
   151  	}
   152  }
   153  
   154  func TestTinyAlloc(t *testing.T) {
   155  	const N = 16
   156  	var v [N]unsafe.Pointer
   157  	for i := range v {
   158  		v[i] = unsafe.Pointer(new(byte))
   159  	}
   160  
   161  	chunks := make(map[uintptr]bool, N)
   162  	for _, p := range v {
   163  		chunks[uintptr(p)&^7] = true
   164  	}
   165  
   166  	if len(chunks) == N {
   167  		t.Fatal("no bytes allocated within the same 8-byte chunk")
   168  	}
   169  }
   170  
   171  func TestPageCacheLeak(t *testing.T) {
   172  	defer GOMAXPROCS(GOMAXPROCS(1))
   173  	leaked := PageCachePagesLeaked()
   174  	if leaked != 0 {
   175  		t.Fatalf("found %d leaked pages in page caches", leaked)
   176  	}
   177  }
   178  
   179  func TestPhysicalMemoryUtilization(t *testing.T) {
   180  	got := runTestProg(t, "testprog", "GCPhys")
   181  	want := "OK\n"
   182  	if got != want {
   183  		t.Fatalf("expected %q, but got %q", want, got)
   184  	}
   185  }
   186  
   187  func TestScavengedBitsCleared(t *testing.T) {
   188  	var mismatches [128]BitsMismatch
   189  	if n, ok := CheckScavengedBitsCleared(mismatches[:]); !ok {
   190  		t.Errorf("uncleared scavenged bits")
   191  		for _, m := range mismatches[:n] {
   192  			t.Logf("\t@ address 0x%x", m.Base)
   193  			t.Logf("\t|  got: %064b", m.Got)
   194  			t.Logf("\t| want: %064b", m.Want)
   195  		}
   196  		t.FailNow()
   197  	}
   198  }
   199  
   200  type acLink struct {
   201  	x [1 << 20]byte
   202  }
   203  
   204  var arenaCollisionSink []*acLink
   205  
   206  func TestArenaCollision(t *testing.T) {
   207  	testenv.MustHaveExec(t)
   208  
   209  	// Test that mheap.sysAlloc handles collisions with other
   210  	// memory mappings.
   211  	if os.Getenv("TEST_ARENA_COLLISION") != "1" {
   212  		cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestArenaCollision", "-test.v"))
   213  		cmd.Env = append(cmd.Env, "TEST_ARENA_COLLISION=1")
   214  		out, err := cmd.CombinedOutput()
   215  		if race.Enabled {
   216  			// This test runs the runtime out of hint
   217  			// addresses, so it will start mapping the
   218  			// heap wherever it can. The race detector
   219  			// doesn't support this, so look for the
   220  			// expected failure.
   221  			if want := "too many address space collisions"; !strings.Contains(string(out), want) {
   222  				t.Fatalf("want %q, got:\n%s", want, string(out))
   223  			}
   224  		} else if !strings.Contains(string(out), "PASS\n") || err != nil {
   225  			t.Fatalf("%s\n(exit status %v)", string(out), err)
   226  		}
   227  		return
   228  	}
   229  	disallowed := [][2]uintptr{}
   230  	// Drop all but the next 3 hints. 64-bit has a lot of hints,
   231  	// so it would take a lot of memory to go through all of them.
   232  	KeepNArenaHints(3)
   233  	// Consume these 3 hints and force the runtime to find some
   234  	// fallback hints.
   235  	for i := 0; i < 5; i++ {
   236  		// Reserve memory at the next hint so it can't be used
   237  		// for the heap.
   238  		start, end := MapNextArenaHint()
   239  		disallowed = append(disallowed, [2]uintptr{start, end})
   240  		// Allocate until the runtime tries to use the hint we
   241  		// just mapped over.
   242  		hint := GetNextArenaHint()
   243  		for GetNextArenaHint() == hint {
   244  			ac := new(acLink)
   245  			arenaCollisionSink = append(arenaCollisionSink, ac)
   246  			// The allocation must not have fallen into
   247  			// one of the reserved regions.
   248  			p := uintptr(unsafe.Pointer(ac))
   249  			for _, d := range disallowed {
   250  				if d[0] <= p && p < d[1] {
   251  					t.Fatalf("allocation %#x in reserved region [%#x, %#x)", p, d[0], d[1])
   252  				}
   253  			}
   254  		}
   255  	}
   256  }
   257  
   258  var mallocSink uintptr
   259  
   260  func BenchmarkMalloc8(b *testing.B) {
   261  	var x uintptr
   262  	for i := 0; i < b.N; i++ {
   263  		p := new(int64)
   264  		x ^= uintptr(unsafe.Pointer(p))
   265  	}
   266  	mallocSink = x
   267  }
   268  
   269  func BenchmarkMalloc16(b *testing.B) {
   270  	var x uintptr
   271  	for i := 0; i < b.N; i++ {
   272  		p := new([2]int64)
   273  		x ^= uintptr(unsafe.Pointer(p))
   274  	}
   275  	mallocSink = x
   276  }
   277  
   278  func BenchmarkMallocTypeInfo8(b *testing.B) {
   279  	var x uintptr
   280  	for i := 0; i < b.N; i++ {
   281  		p := new(struct {
   282  			p [8 / unsafe.Sizeof(uintptr(0))]*int
   283  		})
   284  		x ^= uintptr(unsafe.Pointer(p))
   285  	}
   286  	mallocSink = x
   287  }
   288  
   289  func BenchmarkMallocTypeInfo16(b *testing.B) {
   290  	var x uintptr
   291  	for i := 0; i < b.N; i++ {
   292  		p := new(struct {
   293  			p [16 / unsafe.Sizeof(uintptr(0))]*int
   294  		})
   295  		x ^= uintptr(unsafe.Pointer(p))
   296  	}
   297  	mallocSink = x
   298  }
   299  
   300  type LargeStruct struct {
   301  	x [16][]byte
   302  }
   303  
   304  func BenchmarkMallocLargeStruct(b *testing.B) {
   305  	var x uintptr
   306  	for i := 0; i < b.N; i++ {
   307  		p := make([]LargeStruct, 2)
   308  		x ^= uintptr(unsafe.Pointer(&p[0]))
   309  	}
   310  	mallocSink = x
   311  }
   312  
   313  var n = flag.Int("n", 1000, "number of goroutines")
   314  
   315  func BenchmarkGoroutineSelect(b *testing.B) {
   316  	quit := make(chan struct{})
   317  	read := func(ch chan struct{}) {
   318  		for {
   319  			select {
   320  			case _, ok := <-ch:
   321  				if !ok {
   322  					return
   323  				}
   324  			case <-quit:
   325  				return
   326  			}
   327  		}
   328  	}
   329  	benchHelper(b, *n, read)
   330  }
   331  
   332  func BenchmarkGoroutineBlocking(b *testing.B) {
   333  	read := func(ch chan struct{}) {
   334  		for {
   335  			if _, ok := <-ch; !ok {
   336  				return
   337  			}
   338  		}
   339  	}
   340  	benchHelper(b, *n, read)
   341  }
   342  
   343  func BenchmarkGoroutineForRange(b *testing.B) {
   344  	read := func(ch chan struct{}) {
   345  		for range ch {
   346  		}
   347  	}
   348  	benchHelper(b, *n, read)
   349  }
   350  
   351  func benchHelper(b *testing.B, n int, read func(chan struct{})) {
   352  	m := make([]chan struct{}, n)
   353  	for i := range m {
   354  		m[i] = make(chan struct{}, 1)
   355  		go read(m[i])
   356  	}
   357  	b.StopTimer()
   358  	b.ResetTimer()
   359  	GC()
   360  
   361  	for i := 0; i < b.N; i++ {
   362  		for _, ch := range m {
   363  			if ch != nil {
   364  				ch <- struct{}{}
   365  			}
   366  		}
   367  		time.Sleep(10 * time.Millisecond)
   368  		b.StartTimer()
   369  		GC()
   370  		b.StopTimer()
   371  	}
   372  
   373  	for _, ch := range m {
   374  		close(ch)
   375  	}
   376  	time.Sleep(10 * time.Millisecond)
   377  }
   378  
   379  func BenchmarkGoroutineIdle(b *testing.B) {
   380  	quit := make(chan struct{})
   381  	fn := func() {
   382  		<-quit
   383  	}
   384  	for i := 0; i < *n; i++ {
   385  		go fn()
   386  	}
   387  
   388  	GC()
   389  	b.ResetTimer()
   390  
   391  	for i := 0; i < b.N; i++ {
   392  		GC()
   393  	}
   394  
   395  	b.StopTimer()
   396  	close(quit)
   397  	time.Sleep(10 * time.Millisecond)
   398  }