github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/runtime/testdata/testprog/gc.go (about) 1 // Copyright 2015 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package main 6 7 import ( 8 "fmt" 9 "os" 10 "runtime" 11 "runtime/debug" 12 "sync/atomic" 13 "time" 14 ) 15 16 func init() { 17 register("GCFairness", GCFairness) 18 register("GCFairness2", GCFairness2) 19 register("GCSys", GCSys) 20 register("GCPhys", GCPhys) 21 register("DeferLiveness", DeferLiveness) 22 } 23 24 func GCSys() { 25 runtime.GOMAXPROCS(1) 26 memstats := new(runtime.MemStats) 27 runtime.GC() 28 runtime.ReadMemStats(memstats) 29 sys := memstats.Sys 30 31 runtime.MemProfileRate = 0 // disable profiler 32 33 itercount := 100000 34 for i := 0; i < itercount; i++ { 35 workthegc() 36 } 37 38 // Should only be using a few MB. 39 // We allocated 100 MB or (if not short) 1 GB. 40 runtime.ReadMemStats(memstats) 41 if sys > memstats.Sys { 42 sys = 0 43 } else { 44 sys = memstats.Sys - sys 45 } 46 if sys > 16<<20 { 47 fmt.Printf("using too much memory: %d bytes\n", sys) 48 return 49 } 50 fmt.Printf("OK\n") 51 } 52 53 var sink []byte 54 55 func workthegc() []byte { 56 sink = make([]byte, 1029) 57 return sink 58 } 59 60 func GCFairness() { 61 runtime.GOMAXPROCS(1) 62 f, err := os.Open("/dev/null") 63 if os.IsNotExist(err) { 64 // This test tests what it is intended to test only if writes are fast. 65 // If there is no /dev/null, we just don't execute the test. 66 fmt.Println("OK") 67 return 68 } 69 if err != nil { 70 fmt.Println(err) 71 os.Exit(1) 72 } 73 for i := 0; i < 2; i++ { 74 go func() { 75 for { 76 f.Write([]byte(".")) 77 } 78 }() 79 } 80 time.Sleep(10 * time.Millisecond) 81 fmt.Println("OK") 82 } 83 84 func GCFairness2() { 85 // Make sure user code can't exploit the GC's high priority 86 // scheduling to make scheduling of user code unfair. See 87 // issue #15706. 88 runtime.GOMAXPROCS(1) 89 debug.SetGCPercent(1) 90 var count [3]int64 91 var sink [3]interface{} 92 for i := range count { 93 go func(i int) { 94 for { 95 sink[i] = make([]byte, 1024) 96 atomic.AddInt64(&count[i], 1) 97 } 98 }(i) 99 } 100 // Note: If the unfairness is really bad, it may not even get 101 // past the sleep. 102 // 103 // If the scheduling rules change, this may not be enough time 104 // to let all goroutines run, but for now we cycle through 105 // them rapidly. 106 // 107 // OpenBSD's scheduler makes every usleep() take at least 108 // 20ms, so we need a long time to ensure all goroutines have 109 // run. If they haven't run after 30ms, give it another 1000ms 110 // and check again. 111 time.Sleep(30 * time.Millisecond) 112 var fail bool 113 for i := range count { 114 if atomic.LoadInt64(&count[i]) == 0 { 115 fail = true 116 } 117 } 118 if fail { 119 time.Sleep(1 * time.Second) 120 for i := range count { 121 if atomic.LoadInt64(&count[i]) == 0 { 122 fmt.Printf("goroutine %d did not run\n", i) 123 return 124 } 125 } 126 } 127 fmt.Println("OK") 128 } 129 130 var maybeSaved []byte 131 132 func GCPhys() { 133 // In this test, we construct a very specific scenario. We first 134 // allocate N objects and drop half of their pointers on the floor, 135 // effectively creating N/2 'holes' in our allocated arenas. We then 136 // try to allocate objects twice as big. At the end, we measure the 137 // physical memory overhead of large objects. 138 // 139 // The purpose of this test is to ensure that the GC scavenges free 140 // spans eagerly to ensure high physical memory utilization even 141 // during fragmentation. 142 const ( 143 // Unfortunately, measuring actual used physical pages is 144 // difficult because HeapReleased doesn't include the parts 145 // of an arena that haven't yet been touched. So, we just 146 // make objects and size sufficiently large such that even 147 // 64 MB overhead is relatively small in the final 148 // calculation. 149 // 150 // Currently, we target 480MiB worth of memory for our test, 151 // computed as size * objects + (size*2) * (objects/2) 152 // = 2 * size * objects 153 // 154 // Size must be also large enough to be considered a large 155 // object (not in any size-segregated span). 156 size = 1 << 20 157 objects = 240 158 ) 159 // Save objects which we want to survive, and condemn objects which we don't. 160 // Note that we condemn objects in this way and release them all at once in 161 // order to avoid having the GC start freeing up these objects while the loop 162 // is still running and filling in the holes we intend to make. 163 saved := make([][]byte, 0, objects) 164 condemned := make([][]byte, 0, objects/2+1) 165 for i := 0; i < objects; i++ { 166 // Write into a global, to prevent this from being optimized away by 167 // the compiler in the future. 168 maybeSaved = make([]byte, size) 169 if i%2 == 0 { 170 saved = append(saved, maybeSaved) 171 } else { 172 condemned = append(condemned, maybeSaved) 173 } 174 } 175 condemned = nil 176 // Clean up the heap. This will free up every other object created above 177 // (i.e. everything in condemned) creating holes in the heap. 178 runtime.GC() 179 // Allocate many new objects of 2x size. 180 for i := 0; i < objects/2; i++ { 181 saved = append(saved, make([]byte, size*2)) 182 } 183 // Clean up the heap again just to put it in a known state. 184 runtime.GC() 185 // heapBacked is an estimate of the amount of physical memory used by 186 // this test. HeapSys is an estimate of the size of the mapped virtual 187 // address space (which may or may not be backed by physical pages) 188 // whereas HeapReleased is an estimate of the amount of bytes returned 189 // to the OS. Their difference then roughly corresponds to the amount 190 // of virtual address space that is backed by physical pages. 191 var stats runtime.MemStats 192 runtime.ReadMemStats(&stats) 193 heapBacked := stats.HeapSys - stats.HeapReleased 194 // If heapBacked exceeds the amount of memory actually used for heap 195 // allocated objects by 10% (post-GC HeapAlloc should be quite close to 196 // the size of the working set), then fail. 197 // 198 // In the context of this test, that indicates a large amount of 199 // fragmentation with physical pages that are otherwise unused but not 200 // returned to the OS. 201 overuse := (float64(heapBacked) - float64(stats.HeapAlloc)) / float64(stats.HeapAlloc) 202 if overuse > 0.1 { 203 fmt.Printf("exceeded physical memory overuse threshold of 10%%: %3.2f%%\n"+ 204 "(alloc: %d, sys: %d, rel: %d, objs: %d)\n", overuse*100, stats.HeapAlloc, 205 stats.HeapSys, stats.HeapReleased, len(saved)) 206 return 207 } 208 fmt.Println("OK") 209 runtime.KeepAlive(saved) 210 } 211 212 // Test that defer closure is correctly scanned when the stack is scanned. 213 func DeferLiveness() { 214 var x [10]int 215 escape(&x) 216 fn := func() { 217 if x[0] != 42 { 218 panic("FAIL") 219 } 220 } 221 defer fn() 222 223 x[0] = 42 224 runtime.GC() 225 runtime.GC() 226 runtime.GC() 227 } 228 229 //go:noinline 230 func escape(x interface{}) { sink2 = x; sink2 = nil } 231 232 var sink2 interface{}