github.com/c12o16h1/go/src@v0.0.0-20200114212001-5a151c0f00ed/runtime/testdata/testprog/gc.go (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package main
     6  
     7  import (
     8  	"fmt"
     9  	"os"
    10  	"runtime"
    11  	"runtime/debug"
    12  	"sync/atomic"
    13  	"time"
    14  )
    15  
    16  func init() {
    17  	register("GCFairness", GCFairness)
    18  	register("GCFairness2", GCFairness2)
    19  	register("GCSys", GCSys)
    20  	register("GCPhys", GCPhys)
    21  	register("DeferLiveness", DeferLiveness)
    22  }
    23  
    24  func GCSys() {
    25  	runtime.GOMAXPROCS(1)
    26  	memstats := new(runtime.MemStats)
    27  	runtime.GC()
    28  	runtime.ReadMemStats(memstats)
    29  	sys := memstats.Sys
    30  
    31  	runtime.MemProfileRate = 0 // disable profiler
    32  
    33  	itercount := 100000
    34  	for i := 0; i < itercount; i++ {
    35  		workthegc()
    36  	}
    37  
    38  	// Should only be using a few MB.
    39  	// We allocated 100 MB or (if not short) 1 GB.
    40  	runtime.ReadMemStats(memstats)
    41  	if sys > memstats.Sys {
    42  		sys = 0
    43  	} else {
    44  		sys = memstats.Sys - sys
    45  	}
    46  	if sys > 16<<20 {
    47  		fmt.Printf("using too much memory: %d bytes\n", sys)
    48  		return
    49  	}
    50  	fmt.Printf("OK\n")
    51  }
    52  
    53  var sink []byte
    54  
    55  func workthegc() []byte {
    56  	sink = make([]byte, 1029)
    57  	return sink
    58  }
    59  
    60  func GCFairness() {
    61  	runtime.GOMAXPROCS(1)
    62  	f, err := os.Open("/dev/null")
    63  	if os.IsNotExist(err) {
    64  		// This test tests what it is intended to test only if writes are fast.
    65  		// If there is no /dev/null, we just don't execute the test.
    66  		fmt.Println("OK")
    67  		return
    68  	}
    69  	if err != nil {
    70  		fmt.Println(err)
    71  		os.Exit(1)
    72  	}
    73  	for i := 0; i < 2; i++ {
    74  		go func() {
    75  			for {
    76  				f.Write([]byte("."))
    77  			}
    78  		}()
    79  	}
    80  	time.Sleep(10 * time.Millisecond)
    81  	fmt.Println("OK")
    82  }
    83  
    84  func GCFairness2() {
    85  	// Make sure user code can't exploit the GC's high priority
    86  	// scheduling to make scheduling of user code unfair. See
    87  	// issue #15706.
    88  	runtime.GOMAXPROCS(1)
    89  	debug.SetGCPercent(1)
    90  	var count [3]int64
    91  	var sink [3]interface{}
    92  	for i := range count {
    93  		go func(i int) {
    94  			for {
    95  				sink[i] = make([]byte, 1024)
    96  				atomic.AddInt64(&count[i], 1)
    97  			}
    98  		}(i)
    99  	}
   100  	// Note: If the unfairness is really bad, it may not even get
   101  	// past the sleep.
   102  	//
   103  	// If the scheduling rules change, this may not be enough time
   104  	// to let all goroutines run, but for now we cycle through
   105  	// them rapidly.
   106  	//
   107  	// OpenBSD's scheduler makes every usleep() take at least
   108  	// 20ms, so we need a long time to ensure all goroutines have
   109  	// run. If they haven't run after 30ms, give it another 1000ms
   110  	// and check again.
   111  	time.Sleep(30 * time.Millisecond)
   112  	var fail bool
   113  	for i := range count {
   114  		if atomic.LoadInt64(&count[i]) == 0 {
   115  			fail = true
   116  		}
   117  	}
   118  	if fail {
   119  		time.Sleep(1 * time.Second)
   120  		for i := range count {
   121  			if atomic.LoadInt64(&count[i]) == 0 {
   122  				fmt.Printf("goroutine %d did not run\n", i)
   123  				return
   124  			}
   125  		}
   126  	}
   127  	fmt.Println("OK")
   128  }
   129  
   130  func GCPhys() {
   131  	// This test ensures that heap-growth scavenging is working as intended.
   132  	//
   133  	// It sets up a specific scenario: it allocates two pairs of objects whose
   134  	// sizes sum to size. One object in each pair is "small" (though must be
   135  	// large enough to be considered a large object by the runtime) and one is
   136  	// large. The small objects are kept while the large objects are freed,
   137  	// creating two large unscavenged holes in the heap. The heap goal should
   138  	// also be small as a result (so size must be at least as large as the
   139  	// minimum heap size). We then allocate one large object, bigger than both
   140  	// pairs of objects combined. This allocation, because it will tip
   141  	// HeapSys-HeapReleased well above the heap goal, should trigger heap-growth
   142  	// scavenging and scavenge most, if not all, of the large holes we created
   143  	// earlier.
   144  	const (
   145  		// Size must be also large enough to be considered a large
   146  		// object (not in any size-segregated span).
   147  		size    = 4 << 20
   148  		split   = 64 << 10
   149  		objects = 2
   150  
   151  		// The page cache could hide 64 8-KiB pages from the scavenger today.
   152  		maxPageCache = (8 << 10) * 64
   153  	)
   154  	// Set GOGC so that this test operates under consistent assumptions.
   155  	debug.SetGCPercent(100)
   156  	// Reduce GOMAXPROCS down to 4 if it's greater. We need to bound the amount
   157  	// of memory held in the page cache because the scavenger can't reach it.
   158  	// The page cache will hold at most maxPageCache of memory per-P, so this
   159  	// bounds the amount of memory hidden from the scavenger to 4*maxPageCache.
   160  	procs := runtime.GOMAXPROCS(-1)
   161  	if procs > 4 {
   162  		defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
   163  	}
   164  	// Save objects which we want to survive, and condemn objects which we don't.
   165  	// Note that we condemn objects in this way and release them all at once in
   166  	// order to avoid having the GC start freeing up these objects while the loop
   167  	// is still running and filling in the holes we intend to make.
   168  	saved := make([][]byte, 0, objects+1)
   169  	condemned := make([][]byte, 0, objects)
   170  	for i := 0; i < 2*objects; i++ {
   171  		if i%2 == 0 {
   172  			saved = append(saved, make([]byte, split))
   173  		} else {
   174  			condemned = append(condemned, make([]byte, size-split))
   175  		}
   176  	}
   177  	condemned = nil
   178  	// Clean up the heap. This will free up every other object created above
   179  	// (i.e. everything in condemned) creating holes in the heap.
   180  	// Also, if the condemned objects are still being swept, its possible that
   181  	// the scavenging that happens as a result of the next allocation won't see
   182  	// the holes at all. We call runtime.GC() twice here so that when we allocate
   183  	// our large object there's no race with sweeping.
   184  	runtime.GC()
   185  	runtime.GC()
   186  	// Perform one big allocation which should also scavenge any holes.
   187  	//
   188  	// The heap goal will rise after this object is allocated, so it's very
   189  	// important that we try to do all the scavenging in a single allocation
   190  	// that exceeds the heap goal. Otherwise the rising heap goal could foil our
   191  	// test.
   192  	saved = append(saved, make([]byte, objects*size))
   193  	// Clean up the heap again just to put it in a known state.
   194  	runtime.GC()
   195  	// heapBacked is an estimate of the amount of physical memory used by
   196  	// this test. HeapSys is an estimate of the size of the mapped virtual
   197  	// address space (which may or may not be backed by physical pages)
   198  	// whereas HeapReleased is an estimate of the amount of bytes returned
   199  	// to the OS. Their difference then roughly corresponds to the amount
   200  	// of virtual address space that is backed by physical pages.
   201  	var stats runtime.MemStats
   202  	runtime.ReadMemStats(&stats)
   203  	heapBacked := stats.HeapSys - stats.HeapReleased
   204  	// If heapBacked does not exceed the heap goal by more than retainExtraPercent
   205  	// then the scavenger is working as expected; the newly-created holes have been
   206  	// scavenged immediately as part of the allocations which cannot fit in the holes.
   207  	//
   208  	// Since the runtime should scavenge the entirety of the remaining holes,
   209  	// theoretically there should be no more free and unscavenged memory. However due
   210  	// to other allocations that happen during this test we may still see some physical
   211  	// memory over-use.
   212  	overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc)
   213  	// Compute the threshold.
   214  	//
   215  	// In theory, this threshold should just be zero, but that's not possible in practice.
   216  	// Firstly, the runtime's page cache can hide up to maxPageCache of free memory from the
   217  	// scavenger per P. To account for this, we increase the threshold by the ratio between the
   218  	// total amount the runtime could hide from the scavenger to the amount of memory we expect
   219  	// to be able to scavenge here, which is (size-split)*objects. This computation is the crux
   220  	// GOMAXPROCS above; if GOMAXPROCS is too high the threshold just becomes 100%+ since the
   221  	// amount of memory being allocated is fixed. Then we add 5% to account for noise, such as
   222  	// other allocations this test may have performed that we don't explicitly account for The
   223  	// baseline threshold here is around 11% for GOMAXPROCS=1, capping out at around 30% for
   224  	// GOMAXPROCS=4.
   225  	threshold := 0.05 + float64(procs)*maxPageCache/float64((size-split)*objects)
   226  	if overuse <= threshold {
   227  		fmt.Println("OK")
   228  		return
   229  	}
   230  	// Physical memory utilization exceeds the threshold, so heap-growth scavenging
   231  	// did not operate as expected.
   232  	//
   233  	// In the context of this test, this indicates a large amount of
   234  	// fragmentation with physical pages that are otherwise unused but not
   235  	// returned to the OS.
   236  	fmt.Printf("exceeded physical memory overuse threshold of %3.2f%%: %3.2f%%\n"+
   237  		"(alloc: %d, goal: %d, sys: %d, rel: %d, objs: %d)\n", threshold*100, overuse*100,
   238  		stats.HeapAlloc, stats.NextGC, stats.HeapSys, stats.HeapReleased, len(saved))
   239  	runtime.KeepAlive(saved)
   240  }
   241  
   242  // Test that defer closure is correctly scanned when the stack is scanned.
   243  func DeferLiveness() {
   244  	var x [10]int
   245  	escape(&x)
   246  	fn := func() {
   247  		if x[0] != 42 {
   248  			panic("FAIL")
   249  		}
   250  	}
   251  	defer fn()
   252  
   253  	x[0] = 42
   254  	runtime.GC()
   255  	runtime.GC()
   256  	runtime.GC()
   257  }
   258  
   259  //go:noinline
   260  func escape(x interface{}) { sink2 = x; sink2 = nil }
   261  
   262  var sink2 interface{}