github.com/epfl-dcsl/gotee@v0.0.0-20200909122901-014b35f5e5e9/src/runtime/malloc_test.go (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime_test
     6  
     7  import (
     8  	"flag"
     9  	"fmt"
    10  	"reflect"
    11  	. "runtime"
    12  	"testing"
    13  	"time"
    14  	"unsafe"
    15  )
    16  
    17  func TestMemStats(t *testing.T) {
    18  	// Make sure there's at least one forced GC.
    19  	GC()
    20  
    21  	// Test that MemStats has sane values.
    22  	st := new(MemStats)
    23  	ReadMemStats(st)
    24  
    25  	nz := func(x interface{}) error {
    26  		if x != reflect.Zero(reflect.TypeOf(x)).Interface() {
    27  			return nil
    28  		}
    29  		return fmt.Errorf("zero value")
    30  	}
    31  	le := func(thresh float64) func(interface{}) error {
    32  		return func(x interface{}) error {
    33  			if reflect.ValueOf(x).Convert(reflect.TypeOf(thresh)).Float() < thresh {
    34  				return nil
    35  			}
    36  			return fmt.Errorf("insanely high value (overflow?); want <= %v", thresh)
    37  		}
    38  	}
    39  	eq := func(x interface{}) func(interface{}) error {
    40  		return func(y interface{}) error {
    41  			if x == y {
    42  				return nil
    43  			}
    44  			return fmt.Errorf("want %v", x)
    45  		}
    46  	}
    47  	// Of the uint fields, HeapReleased, HeapIdle can be 0.
    48  	// PauseTotalNs can be 0 if timer resolution is poor.
    49  	fields := map[string][]func(interface{}) error{
    50  		"Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
    51  		"Lookups": {nz, le(1e10)}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
    52  		"HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
    53  		"HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
    54  		"StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
    55  		"MSpanInuse": {nz, le(1e10)}, "MSpanSys": {nz, le(1e10)},
    56  		"MCacheInuse": {nz, le(1e10)}, "MCacheSys": {nz, le(1e10)},
    57  		"BuckHashSys": {nz, le(1e10)}, "GCSys": {nz, le(1e10)}, "OtherSys": {nz, le(1e10)},
    58  		"NextGC": {nz, le(1e10)}, "LastGC": {nz},
    59  		"PauseTotalNs": {le(1e11)}, "PauseNs": nil, "PauseEnd": nil,
    60  		"NumGC": {nz, le(1e9)}, "NumForcedGC": {nz, le(1e9)},
    61  		"GCCPUFraction": {le(0.99)}, "EnableGC": {eq(true)}, "DebugGC": {eq(false)},
    62  		"BySize": nil,
    63  	}
    64  
    65  	rst := reflect.ValueOf(st).Elem()
    66  	for i := 0; i < rst.Type().NumField(); i++ {
    67  		name, val := rst.Type().Field(i).Name, rst.Field(i).Interface()
    68  		checks, ok := fields[name]
    69  		if !ok {
    70  			t.Errorf("unknown MemStats field %s", name)
    71  			continue
    72  		}
    73  		for _, check := range checks {
    74  			if err := check(val); err != nil {
    75  				t.Errorf("%s = %v: %s", name, val, err)
    76  			}
    77  		}
    78  	}
    79  
    80  	if st.Sys != st.HeapSys+st.StackSys+st.MSpanSys+st.MCacheSys+
    81  		st.BuckHashSys+st.GCSys+st.OtherSys {
    82  		t.Fatalf("Bad sys value: %+v", *st)
    83  	}
    84  
    85  	if st.HeapIdle+st.HeapInuse != st.HeapSys {
    86  		t.Fatalf("HeapIdle(%d) + HeapInuse(%d) should be equal to HeapSys(%d), but isn't.", st.HeapIdle, st.HeapInuse, st.HeapSys)
    87  	}
    88  
    89  	if lpe := st.PauseEnd[int(st.NumGC+255)%len(st.PauseEnd)]; st.LastGC != lpe {
    90  		t.Fatalf("LastGC(%d) != last PauseEnd(%d)", st.LastGC, lpe)
    91  	}
    92  
    93  	var pauseTotal uint64
    94  	for _, pause := range st.PauseNs {
    95  		pauseTotal += pause
    96  	}
    97  	if int(st.NumGC) < len(st.PauseNs) {
    98  		// We have all pauses, so this should be exact.
    99  		if st.PauseTotalNs != pauseTotal {
   100  			t.Fatalf("PauseTotalNs(%d) != sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   101  		}
   102  		for i := int(st.NumGC); i < len(st.PauseNs); i++ {
   103  			if st.PauseNs[i] != 0 {
   104  				t.Fatalf("Non-zero PauseNs[%d]: %+v", i, st)
   105  			}
   106  			if st.PauseEnd[i] != 0 {
   107  				t.Fatalf("Non-zero PauseEnd[%d]: %+v", i, st)
   108  			}
   109  		}
   110  	} else {
   111  		if st.PauseTotalNs < pauseTotal {
   112  			t.Fatalf("PauseTotalNs(%d) < sum PauseNs(%d)", st.PauseTotalNs, pauseTotal)
   113  		}
   114  	}
   115  
   116  	if st.NumForcedGC > st.NumGC {
   117  		t.Fatalf("NumForcedGC(%d) > NumGC(%d)", st.NumForcedGC, st.NumGC)
   118  	}
   119  }
   120  
   121  func TestStringConcatenationAllocs(t *testing.T) {
   122  	n := testing.AllocsPerRun(1e3, func() {
   123  		b := make([]byte, 10)
   124  		for i := 0; i < 10; i++ {
   125  			b[i] = byte(i) + '0'
   126  		}
   127  		s := "foo" + string(b)
   128  		if want := "foo0123456789"; s != want {
   129  			t.Fatalf("want %v, got %v", want, s)
   130  		}
   131  	})
   132  	// Only string concatenation allocates.
   133  	if n != 1 {
   134  		t.Fatalf("want 1 allocation, got %v", n)
   135  	}
   136  }
   137  
   138  func TestTinyAlloc(t *testing.T) {
   139  	const N = 16
   140  	var v [N]unsafe.Pointer
   141  	for i := range v {
   142  		v[i] = unsafe.Pointer(new(byte))
   143  	}
   144  
   145  	chunks := make(map[uintptr]bool, N)
   146  	for _, p := range v {
   147  		chunks[uintptr(p)&^7] = true
   148  	}
   149  
   150  	if len(chunks) == N {
   151  		t.Fatal("no bytes allocated within the same 8-byte chunk")
   152  	}
   153  }
   154  
   155  var mallocSink uintptr
   156  
   157  func BenchmarkMalloc8(b *testing.B) {
   158  	var x uintptr
   159  	for i := 0; i < b.N; i++ {
   160  		p := new(int64)
   161  		x ^= uintptr(unsafe.Pointer(p))
   162  	}
   163  	mallocSink = x
   164  }
   165  
   166  func BenchmarkMalloc16(b *testing.B) {
   167  	var x uintptr
   168  	for i := 0; i < b.N; i++ {
   169  		p := new([2]int64)
   170  		x ^= uintptr(unsafe.Pointer(p))
   171  	}
   172  	mallocSink = x
   173  }
   174  
   175  func BenchmarkMallocTypeInfo8(b *testing.B) {
   176  	var x uintptr
   177  	for i := 0; i < b.N; i++ {
   178  		p := new(struct {
   179  			p [8 / unsafe.Sizeof(uintptr(0))]*int
   180  		})
   181  		x ^= uintptr(unsafe.Pointer(p))
   182  	}
   183  	mallocSink = x
   184  }
   185  
   186  func BenchmarkMallocTypeInfo16(b *testing.B) {
   187  	var x uintptr
   188  	for i := 0; i < b.N; i++ {
   189  		p := new(struct {
   190  			p [16 / unsafe.Sizeof(uintptr(0))]*int
   191  		})
   192  		x ^= uintptr(unsafe.Pointer(p))
   193  	}
   194  	mallocSink = x
   195  }
   196  
   197  type LargeStruct struct {
   198  	x [16][]byte
   199  }
   200  
   201  func BenchmarkMallocLargeStruct(b *testing.B) {
   202  	var x uintptr
   203  	for i := 0; i < b.N; i++ {
   204  		p := make([]LargeStruct, 2)
   205  		x ^= uintptr(unsafe.Pointer(&p[0]))
   206  	}
   207  	mallocSink = x
   208  }
   209  
   210  var n = flag.Int("n", 1000, "number of goroutines")
   211  
   212  func BenchmarkGoroutineSelect(b *testing.B) {
   213  	quit := make(chan struct{})
   214  	read := func(ch chan struct{}) {
   215  		for {
   216  			select {
   217  			case _, ok := <-ch:
   218  				if !ok {
   219  					return
   220  				}
   221  			case <-quit:
   222  				return
   223  			}
   224  		}
   225  	}
   226  	benchHelper(b, *n, read)
   227  }
   228  
   229  func BenchmarkGoroutineBlocking(b *testing.B) {
   230  	read := func(ch chan struct{}) {
   231  		for {
   232  			if _, ok := <-ch; !ok {
   233  				return
   234  			}
   235  		}
   236  	}
   237  	benchHelper(b, *n, read)
   238  }
   239  
   240  func BenchmarkGoroutineForRange(b *testing.B) {
   241  	read := func(ch chan struct{}) {
   242  		for range ch {
   243  		}
   244  	}
   245  	benchHelper(b, *n, read)
   246  }
   247  
   248  func benchHelper(b *testing.B, n int, read func(chan struct{})) {
   249  	m := make([]chan struct{}, n)
   250  	for i := range m {
   251  		m[i] = make(chan struct{}, 1)
   252  		go read(m[i])
   253  	}
   254  	b.StopTimer()
   255  	b.ResetTimer()
   256  	GC()
   257  
   258  	for i := 0; i < b.N; i++ {
   259  		for _, ch := range m {
   260  			if ch != nil {
   261  				ch <- struct{}{}
   262  			}
   263  		}
   264  		time.Sleep(10 * time.Millisecond)
   265  		b.StartTimer()
   266  		GC()
   267  		b.StopTimer()
   268  	}
   269  
   270  	for _, ch := range m {
   271  		close(ch)
   272  	}
   273  	time.Sleep(10 * time.Millisecond)
   274  }
   275  
   276  func BenchmarkGoroutineIdle(b *testing.B) {
   277  	quit := make(chan struct{})
   278  	fn := func() {
   279  		<-quit
   280  	}
   281  	for i := 0; i < *n; i++ {
   282  		go fn()
   283  	}
   284  
   285  	GC()
   286  	b.ResetTimer()
   287  
   288  	for i := 0; i < b.N; i++ {
   289  		GC()
   290  	}
   291  
   292  	b.StopTimer()
   293  	close(quit)
   294  	time.Sleep(10 * time.Millisecond)
   295  }