github.com/go-darwin/sys@v0.0.0-20220510002607-68fd01f054ca/testdata/testprog/gc.go (about)

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package main
     6  
     7  import (
     8  	"fmt"
     9  	"os"
    10  	"runtime"
    11  	"runtime/debug"
    12  	"sync/atomic"
    13  	"time"
    14  	"unsafe"
    15  )
    16  
    17  func init() {
    18  	register("GCFairness", GCFairness)
    19  	register("GCFairness2", GCFairness2)
    20  	register("GCSys", GCSys)
    21  	register("GCPhys", GCPhys)
    22  	register("DeferLiveness", DeferLiveness)
    23  	register("GCZombie", GCZombie)
    24  }
    25  
    26  func GCSys() {
    27  	runtime.GOMAXPROCS(1)
    28  	memstats := new(runtime.MemStats)
    29  	runtime.GC()
    30  	runtime.ReadMemStats(memstats)
    31  	sys := memstats.Sys
    32  
    33  	runtime.MemProfileRate = 0 // disable profiler
    34  
    35  	itercount := 100000
    36  	for i := 0; i < itercount; i++ {
    37  		workthegc()
    38  	}
    39  
    40  	// Should only be using a few MB.
    41  	// We allocated 100 MB or (if not short) 1 GB.
    42  	runtime.ReadMemStats(memstats)
    43  	if sys > memstats.Sys {
    44  		sys = 0
    45  	} else {
    46  		sys = memstats.Sys - sys
    47  	}
    48  	if sys > 16<<20 {
    49  		fmt.Printf("using too much memory: %d bytes\n", sys)
    50  		return
    51  	}
    52  	fmt.Printf("OK\n")
    53  }
    54  
    55  var sink []byte
    56  
    57  func workthegc() []byte {
    58  	sink = make([]byte, 1029)
    59  	return sink
    60  }
    61  
    62  func GCFairness() {
    63  	runtime.GOMAXPROCS(1)
    64  	f, err := os.Open("/dev/null")
    65  	if os.IsNotExist(err) {
    66  		// This test tests what it is intended to test only if writes are fast.
    67  		// If there is no /dev/null, we just don't execute the test.
    68  		fmt.Println("OK")
    69  		return
    70  	}
    71  	if err != nil {
    72  		fmt.Println(err)
    73  		os.Exit(1)
    74  	}
    75  	for i := 0; i < 2; i++ {
    76  		go func() {
    77  			for {
    78  				f.Write([]byte("."))
    79  			}
    80  		}()
    81  	}
    82  	time.Sleep(10 * time.Millisecond)
    83  	fmt.Println("OK")
    84  }
    85  
    86  func GCFairness2() {
    87  	// Make sure user code can't exploit the GC's high priority
    88  	// scheduling to make scheduling of user code unfair. See
    89  	// issue #15706.
    90  	runtime.GOMAXPROCS(1)
    91  	debug.SetGCPercent(1)
    92  	var count [3]int64
    93  	var sink [3]interface{}
    94  	for i := range count {
    95  		go func(i int) {
    96  			for {
    97  				sink[i] = make([]byte, 1024)
    98  				atomic.AddInt64(&count[i], 1)
    99  			}
   100  		}(i)
   101  	}
   102  	// Note: If the unfairness is really bad, it may not even get
   103  	// past the sleep.
   104  	//
   105  	// If the scheduling rules change, this may not be enough time
   106  	// to let all goroutines run, but for now we cycle through
   107  	// them rapidly.
   108  	//
   109  	// OpenBSD's scheduler makes every usleep() take at least
   110  	// 20ms, so we need a long time to ensure all goroutines have
   111  	// run. If they haven't run after 30ms, give it another 1000ms
   112  	// and check again.
   113  	time.Sleep(30 * time.Millisecond)
   114  	var fail bool
   115  	for i := range count {
   116  		if atomic.LoadInt64(&count[i]) == 0 {
   117  			fail = true
   118  		}
   119  	}
   120  	if fail {
   121  		time.Sleep(1 * time.Second)
   122  		for i := range count {
   123  			if atomic.LoadInt64(&count[i]) == 0 {
   124  				fmt.Printf("goroutine %d did not run\n", i)
   125  				return
   126  			}
   127  		}
   128  	}
   129  	fmt.Println("OK")
   130  }
   131  
   132  func GCPhys() {
   133  	// This test ensures that heap-growth scavenging is working as intended.
   134  	//
   135  	// It sets up a specific scenario: it allocates two pairs of objects whose
   136  	// sizes sum to size. One object in each pair is "small" (though must be
   137  	// large enough to be considered a large object by the runtime) and one is
   138  	// large. The small objects are kept while the large objects are freed,
   139  	// creating two large unscavenged holes in the heap. The heap goal should
   140  	// also be small as a result (so size must be at least as large as the
   141  	// minimum heap size). We then allocate one large object, bigger than both
   142  	// pairs of objects combined. This allocation, because it will tip
   143  	// HeapSys-HeapReleased well above the heap goal, should trigger heap-growth
   144  	// scavenging and scavenge most, if not all, of the large holes we created
   145  	// earlier.
   146  	const (
   147  		// Size must be also large enough to be considered a large
   148  		// object (not in any size-segregated span).
   149  		size    = 4 << 20
   150  		split   = 64 << 10
   151  		objects = 2
   152  
   153  		// The page cache could hide 64 8-KiB pages from the scavenger today.
   154  		maxPageCache = (8 << 10) * 64
   155  
   156  		// Reduce GOMAXPROCS down to 4 if it's greater. We need to bound the amount
   157  		// of memory held in the page cache because the scavenger can't reach it.
   158  		// The page cache will hold at most maxPageCache of memory per-P, so this
   159  		// bounds the amount of memory hidden from the scavenger to 4*maxPageCache
   160  		// at most.
   161  		maxProcs = 4
   162  	)
   163  	// Set GOGC so that this test operates under consistent assumptions.
   164  	debug.SetGCPercent(100)
   165  	procs := runtime.GOMAXPROCS(-1)
   166  	if procs > maxProcs {
   167  		defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
   168  		procs = runtime.GOMAXPROCS(-1)
   169  	}
   170  	// Save objects which we want to survive, and condemn objects which we don't.
   171  	// Note that we condemn objects in this way and release them all at once in
   172  	// order to avoid having the GC start freeing up these objects while the loop
   173  	// is still running and filling in the holes we intend to make.
   174  	saved := make([][]byte, 0, objects+1)
   175  	condemned := make([][]byte, 0, objects)
   176  	for i := 0; i < 2*objects; i++ {
   177  		if i%2 == 0 {
   178  			saved = append(saved, make([]byte, split))
   179  		} else {
   180  			condemned = append(condemned, make([]byte, size-split))
   181  		}
   182  	}
   183  	condemned = nil
   184  	// Clean up the heap. This will free up every other object created above
   185  	// (i.e. everything in condemned) creating holes in the heap.
   186  	// Also, if the condemned objects are still being swept, its possible that
   187  	// the scavenging that happens as a result of the next allocation won't see
   188  	// the holes at all. We call runtime.GC() twice here so that when we allocate
   189  	// our large object there's no race with sweeping.
   190  	runtime.GC()
   191  	runtime.GC()
   192  	// Perform one big allocation which should also scavenge any holes.
   193  	//
   194  	// The heap goal will rise after this object is allocated, so it's very
   195  	// important that we try to do all the scavenging in a single allocation
   196  	// that exceeds the heap goal. Otherwise the rising heap goal could foil our
   197  	// test.
   198  	saved = append(saved, make([]byte, objects*size))
   199  	// Clean up the heap again just to put it in a known state.
   200  	runtime.GC()
   201  	// heapBacked is an estimate of the amount of physical memory used by
   202  	// this test. HeapSys is an estimate of the size of the mapped virtual
   203  	// address space (which may or may not be backed by physical pages)
   204  	// whereas HeapReleased is an estimate of the amount of bytes returned
   205  	// to the OS. Their difference then roughly corresponds to the amount
   206  	// of virtual address space that is backed by physical pages.
   207  	var stats runtime.MemStats
   208  	runtime.ReadMemStats(&stats)
   209  	heapBacked := stats.HeapSys - stats.HeapReleased
   210  	// If heapBacked does not exceed the heap goal by more than retainExtraPercent
   211  	// then the scavenger is working as expected; the newly-created holes have been
   212  	// scavenged immediately as part of the allocations which cannot fit in the holes.
   213  	//
   214  	// Since the runtime should scavenge the entirety of the remaining holes,
   215  	// theoretically there should be no more free and unscavenged memory. However due
   216  	// to other allocations that happen during this test we may still see some physical
   217  	// memory over-use.
   218  	overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc)
   219  	// Compute the threshold.
   220  	//
   221  	// In theory, this threshold should just be zero, but that's not possible in practice.
   222  	// Firstly, the runtime's page cache can hide up to maxPageCache of free memory from the
   223  	// scavenger per P. To account for this, we increase the threshold by the ratio between the
   224  	// total amount the runtime could hide from the scavenger to the amount of memory we expect
   225  	// to be able to scavenge here, which is (size-split)*objects. This computation is the crux
   226  	// GOMAXPROCS above; if GOMAXPROCS is too high the threshold just becomes 100%+ since the
   227  	// amount of memory being allocated is fixed. Then we add 5% to account for noise, such as
   228  	// other allocations this test may have performed that we don't explicitly account for The
   229  	// baseline threshold here is around 11% for GOMAXPROCS=1, capping out at around 30% for
   230  	// GOMAXPROCS=4.
   231  	threshold := 0.05 + float64(procs)*maxPageCache/float64((size-split)*objects)
   232  	if overuse <= threshold {
   233  		fmt.Println("OK")
   234  		return
   235  	}
   236  	// Physical memory utilization exceeds the threshold, so heap-growth scavenging
   237  	// did not operate as expected.
   238  	//
   239  	// In the context of this test, this indicates a large amount of
   240  	// fragmentation with physical pages that are otherwise unused but not
   241  	// returned to the OS.
   242  	fmt.Printf("exceeded physical memory overuse threshold of %3.2f%%: %3.2f%%\n"+
   243  		"(alloc: %d, goal: %d, sys: %d, rel: %d, objs: %d)\n", threshold*100, overuse*100,
   244  		stats.HeapAlloc, stats.NextGC, stats.HeapSys, stats.HeapReleased, len(saved))
   245  	runtime.KeepAlive(saved)
   246  }
   247  
   248  // Test that defer closure is correctly scanned when the stack is scanned.
   249  func DeferLiveness() {
   250  	var x [10]int
   251  	escape(&x)
   252  	fn := func() {
   253  		if x[0] != 42 {
   254  			panic("FAIL")
   255  		}
   256  	}
   257  	defer fn()
   258  
   259  	x[0] = 42
   260  	runtime.GC()
   261  	runtime.GC()
   262  	runtime.GC()
   263  }
   264  
   265  //go:noinline
   266  func escape(x interface{}) { sink2 = x; sink2 = nil }
   267  
   268  var sink2 interface{}
   269  
   270  // Test zombie object detection and reporting.
   271  func GCZombie() {
   272  	// Allocate several objects of unusual size (so free slots are
   273  	// unlikely to all be re-allocated by the runtime).
   274  	const size = 190
   275  	const count = 8192 / size
   276  	keep := make([]*byte, 0, (count+1)/2)
   277  	free := make([]uintptr, 0, (count+1)/2)
   278  	zombies := make([]*byte, 0, len(free))
   279  	for i := 0; i < count; i++ {
   280  		obj := make([]byte, size)
   281  		p := &obj[0]
   282  		if i%2 == 0 {
   283  			keep = append(keep, p)
   284  		} else {
   285  			free = append(free, uintptr(unsafe.Pointer(p)))
   286  		}
   287  	}
   288  
   289  	// Free the unreferenced objects.
   290  	runtime.GC()
   291  
   292  	// Bring the free objects back to life.
   293  	for _, p := range free {
   294  		zombies = append(zombies, (*byte)(unsafe.Pointer(p)))
   295  	}
   296  
   297  	// GC should detect the zombie objects.
   298  	runtime.GC()
   299  	println("failed")
   300  	runtime.KeepAlive(keep)
   301  	runtime.KeepAlive(zombies)
   302  }