github.com/axw/llgo@v0.0.0-20160805011314-95b5fe4dca20/third_party/gofrontend/libgo/go/runtime/gc_test.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "os" 9 "reflect" 10 "runtime" 11 "runtime/debug" 12 "testing" 13 "time" 14 "unsafe" 15 ) 16 17 func TestGcSys(t *testing.T) { 18 if os.Getenv("GOGC") == "off" { 19 t.Skip("skipping test; GOGC=off in environment") 20 } 21 data := struct{ Short bool }{testing.Short()} 22 got := executeTest(t, testGCSysSource, &data) 23 want := "OK\n" 24 if got != want { 25 t.Fatalf("expected %q, but got %q", want, got) 26 } 27 } 28 29 const testGCSysSource = ` 30 package main 31 32 import ( 33 "fmt" 34 "runtime" 35 ) 36 37 func main() { 38 runtime.GOMAXPROCS(1) 39 memstats := new(runtime.MemStats) 40 runtime.GC() 41 runtime.ReadMemStats(memstats) 42 sys := memstats.Sys 43 44 runtime.MemProfileRate = 0 // disable profiler 45 46 itercount := 1000000 47 {{if .Short}} 48 itercount = 100000 49 {{end}} 50 for i := 0; i < itercount; i++ { 51 workthegc() 52 } 53 54 // Should only be using a few MB. 55 // We allocated 100 MB or (if not short) 1 GB. 56 runtime.ReadMemStats(memstats) 57 if sys > memstats.Sys { 58 sys = 0 59 } else { 60 sys = memstats.Sys - sys 61 } 62 if sys > 16<<20 { 63 fmt.Printf("using too much memory: %d bytes\n", sys) 64 return 65 } 66 fmt.Printf("OK\n") 67 } 68 69 func workthegc() []byte { 70 return make([]byte, 1029) 71 } 72 ` 73 74 func TestGcDeepNesting(t *testing.T) { 75 type T [2][2][2][2][2][2][2][2][2][2]*int 76 a := new(T) 77 78 // Prevent the compiler from applying escape analysis. 79 // This makes sure new(T) is allocated on heap, not on the stack. 80 t.Logf("%p", a) 81 82 a[0][0][0][0][0][0][0][0][0][0] = new(int) 83 *a[0][0][0][0][0][0][0][0][0][0] = 13 84 runtime.GC() 85 if *a[0][0][0][0][0][0][0][0][0][0] != 13 { 86 t.Fail() 87 } 88 } 89 90 func TestGcHashmapIndirection(t *testing.T) { 91 defer debug.SetGCPercent(debug.SetGCPercent(1)) 92 runtime.GC() 93 type T struct { 94 a [256]int 95 } 96 m := make(map[T]T) 97 for i := 0; i < 2000; i++ { 98 var a T 99 a.a[0] = i 100 m[a] = T{} 101 } 102 } 103 104 func TestGcArraySlice(t *testing.T) { 105 type X struct { 106 buf [1]byte 107 nextbuf []byte 108 next *X 109 } 110 var head *X 111 for i := 0; i < 10; i++ { 112 p := &X{} 113 p.buf[0] = 42 114 p.next = head 115 if head != nil { 116 p.nextbuf = head.buf[:] 117 } 118 head = p 119 runtime.GC() 120 } 121 for p := head; p != nil; p = p.next { 122 if p.buf[0] != 42 { 123 t.Fatal("corrupted heap") 124 } 125 } 126 } 127 128 func TestGcRescan(t *testing.T) { 129 type X struct { 130 c chan error 131 nextx *X 132 } 133 type Y struct { 134 X 135 nexty *Y 136 p *int 137 } 138 var head *Y 139 for i := 0; i < 10; i++ { 140 p := &Y{} 141 p.c = make(chan error) 142 if head != nil { 143 p.nextx = &head.X 144 } 145 p.nexty = head 146 p.p = new(int) 147 *p.p = 42 148 head = p 149 runtime.GC() 150 } 151 for p := head; p != nil; p = p.nexty { 152 if *p.p != 42 { 153 t.Fatal("corrupted heap") 154 } 155 } 156 } 157 158 func TestGcLastTime(t *testing.T) { 159 ms := new(runtime.MemStats) 160 t0 := time.Now().UnixNano() 161 runtime.GC() 162 t1 := time.Now().UnixNano() 163 runtime.ReadMemStats(ms) 164 last := int64(ms.LastGC) 165 if t0 > last || last > t1 { 166 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1) 167 } 168 pause := ms.PauseNs[(ms.NumGC+255)%256] 169 // Due to timer granularity, pause can actually be 0 on windows 170 // or on virtualized environments. 171 if pause == 0 { 172 t.Logf("last GC pause was 0") 173 } else if pause > 10e9 { 174 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause) 175 } 176 } 177 178 var hugeSink interface{} 179 180 func TestHugeGCInfo(t *testing.T) { 181 // The test ensures that compiler can chew these huge types even on weakest machines. 182 // The types are not allocated at runtime. 183 if hugeSink != nil { 184 // 400MB on 32 bots, 4TB on 64-bits. 185 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40 186 hugeSink = new([n]*byte) 187 hugeSink = new([n]uintptr) 188 hugeSink = new(struct { 189 x float64 190 y [n]*byte 191 z []string 192 }) 193 hugeSink = new(struct { 194 x float64 195 y [n]uintptr 196 z []string 197 }) 198 } 199 } 200 201 func BenchmarkSetTypePtr(b *testing.B) { 202 benchSetType(b, new(*byte)) 203 } 204 205 func BenchmarkSetTypePtr8(b *testing.B) { 206 benchSetType(b, new([8]*byte)) 207 } 208 209 func BenchmarkSetTypePtr16(b *testing.B) { 210 benchSetType(b, new([16]*byte)) 211 } 212 213 func BenchmarkSetTypePtr32(b *testing.B) { 214 benchSetType(b, new([32]*byte)) 215 } 216 217 func BenchmarkSetTypePtr64(b *testing.B) { 218 benchSetType(b, new([64]*byte)) 219 } 220 221 func BenchmarkSetTypePtr126(b *testing.B) { 222 benchSetType(b, new([126]*byte)) 223 } 224 225 func BenchmarkSetTypePtr128(b *testing.B) { 226 benchSetType(b, new([128]*byte)) 227 } 228 229 func BenchmarkSetTypePtrSlice(b *testing.B) { 230 benchSetType(b, make([]*byte, 1<<10)) 231 } 232 233 type Node1 struct { 234 Value [1]uintptr 235 Left, Right *byte 236 } 237 238 func BenchmarkSetTypeNode1(b *testing.B) { 239 benchSetType(b, new(Node1)) 240 } 241 242 func BenchmarkSetTypeNode1Slice(b *testing.B) { 243 benchSetType(b, make([]Node1, 32)) 244 } 245 246 type Node8 struct { 247 Value [8]uintptr 248 Left, Right *byte 249 } 250 251 func BenchmarkSetTypeNode8(b *testing.B) { 252 benchSetType(b, new(Node8)) 253 } 254 255 func BenchmarkSetTypeNode8Slice(b *testing.B) { 256 benchSetType(b, make([]Node8, 32)) 257 } 258 259 type Node64 struct { 260 Value [64]uintptr 261 Left, Right *byte 262 } 263 264 func BenchmarkSetTypeNode64(b *testing.B) { 265 benchSetType(b, new(Node64)) 266 } 267 268 func BenchmarkSetTypeNode64Slice(b *testing.B) { 269 benchSetType(b, make([]Node64, 32)) 270 } 271 272 type Node64Dead struct { 273 Left, Right *byte 274 Value [64]uintptr 275 } 276 277 func BenchmarkSetTypeNode64Dead(b *testing.B) { 278 benchSetType(b, new(Node64Dead)) 279 } 280 281 func BenchmarkSetTypeNode64DeadSlice(b *testing.B) { 282 benchSetType(b, make([]Node64Dead, 32)) 283 } 284 285 type Node124 struct { 286 Value [124]uintptr 287 Left, Right *byte 288 } 289 290 func BenchmarkSetTypeNode124(b *testing.B) { 291 benchSetType(b, new(Node124)) 292 } 293 294 func BenchmarkSetTypeNode124Slice(b *testing.B) { 295 benchSetType(b, make([]Node124, 32)) 296 } 297 298 type Node126 struct { 299 Value [126]uintptr 300 Left, Right *byte 301 } 302 303 func BenchmarkSetTypeNode126(b *testing.B) { 304 benchSetType(b, new(Node126)) 305 } 306 307 func BenchmarkSetTypeNode126Slice(b *testing.B) { 308 benchSetType(b, make([]Node126, 32)) 309 } 310 311 type Node128 struct { 312 Value [128]uintptr 313 Left, Right *byte 314 } 315 316 func BenchmarkSetTypeNode128(b *testing.B) { 317 benchSetType(b, new(Node128)) 318 } 319 320 func BenchmarkSetTypeNode128Slice(b *testing.B) { 321 benchSetType(b, make([]Node128, 32)) 322 } 323 324 type Node130 struct { 325 Value [130]uintptr 326 Left, Right *byte 327 } 328 329 func BenchmarkSetTypeNode130(b *testing.B) { 330 benchSetType(b, new(Node130)) 331 } 332 333 func BenchmarkSetTypeNode130Slice(b *testing.B) { 334 benchSetType(b, make([]Node130, 32)) 335 } 336 337 type Node1024 struct { 338 Value [1024]uintptr 339 Left, Right *byte 340 } 341 342 func BenchmarkSetTypeNode1024(b *testing.B) { 343 benchSetType(b, new(Node1024)) 344 } 345 346 func BenchmarkSetTypeNode1024Slice(b *testing.B) { 347 benchSetType(b, make([]Node1024, 32)) 348 } 349 350 func benchSetType(b *testing.B, x interface{}) { 351 v := reflect.ValueOf(x) 352 t := v.Type() 353 switch t.Kind() { 354 case reflect.Ptr: 355 b.SetBytes(int64(t.Elem().Size())) 356 case reflect.Slice: 357 b.SetBytes(int64(t.Elem().Size()) * int64(v.Len())) 358 } 359 b.ResetTimer() 360 //runtime.BenchSetType(b.N, x) 361 } 362 363 func BenchmarkAllocation(b *testing.B) { 364 type T struct { 365 x, y *byte 366 } 367 ngo := runtime.GOMAXPROCS(0) 368 work := make(chan bool, b.N+ngo) 369 result := make(chan *T) 370 for i := 0; i < b.N; i++ { 371 work <- true 372 } 373 for i := 0; i < ngo; i++ { 374 work <- false 375 } 376 for i := 0; i < ngo; i++ { 377 go func() { 378 var x *T 379 for <-work { 380 for i := 0; i < 1000; i++ { 381 x = &T{} 382 } 383 } 384 result <- x 385 }() 386 } 387 for i := 0; i < ngo; i++ { 388 <-result 389 } 390 } 391 392 func TestPrintGC(t *testing.T) { 393 if testing.Short() { 394 t.Skip("Skipping in short mode") 395 } 396 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 397 done := make(chan bool) 398 go func() { 399 for { 400 select { 401 case <-done: 402 return 403 default: 404 runtime.GC() 405 } 406 } 407 }() 408 for i := 0; i < 1e4; i++ { 409 func() { 410 defer print("") 411 }() 412 } 413 close(done) 414 } 415 416 /* 417 418 // The implicit y, ok := x.(error) for the case error 419 // in testTypeSwitch used to not initialize the result y 420 // before passing &y to assertE2I2GC. 421 // Catch this by making assertE2I2 call runtime.GC, 422 // which will force a stack scan and failure if there are 423 // bad pointers, and then fill the stack with bad pointers 424 // and run the type switch. 425 func TestAssertE2I2Liveness(t *testing.T) { 426 // Note that this flag is defined in export_test.go 427 // and is not available to ordinary imports of runtime. 428 *runtime.TestingAssertE2I2GC = true 429 defer func() { 430 *runtime.TestingAssertE2I2GC = false 431 }() 432 433 poisonStack() 434 testTypeSwitch(io.EOF) 435 poisonStack() 436 testAssert(io.EOF) 437 poisonStack() 438 testAssertVar(io.EOF) 439 } 440 441 func poisonStack() uintptr { 442 var x [1000]uintptr 443 for i := range x { 444 x[i] = 0xff 445 } 446 return x[123] 447 } 448 449 func testTypeSwitch(x interface{}) error { 450 switch y := x.(type) { 451 case nil: 452 // ok 453 case error: 454 return y 455 } 456 return nil 457 } 458 459 func testAssert(x interface{}) error { 460 if y, ok := x.(error); ok { 461 return y 462 } 463 return nil 464 } 465 466 func testAssertVar(x interface{}) error { 467 var y, ok = x.(error) 468 if ok { 469 return y 470 } 471 return nil 472 } 473 474 func TestAssertE2T2Liveness(t *testing.T) { 475 *runtime.TestingAssertE2T2GC = true 476 defer func() { 477 *runtime.TestingAssertE2T2GC = false 478 }() 479 480 poisonStack() 481 testIfaceEqual(io.EOF) 482 } 483 484 func testIfaceEqual(x interface{}) { 485 if x == "abc" { 486 // Prevent inlining 487 panic("") 488 } 489 } 490 491 */