github.com/llvm-mirror/llgo@v0.0.0-20190322182713-bf6f0a60fce1/third_party/gofrontend/libgo/go/runtime/pprof/pprof_test.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build !nacl 6 7 package pprof_test 8 9 import ( 10 "bytes" 11 "fmt" 12 "internal/testenv" 13 "math/big" 14 "os" 15 "os/exec" 16 "regexp" 17 "runtime" 18 . "runtime/pprof" 19 "strings" 20 "sync" 21 "testing" 22 "time" 23 "unsafe" 24 ) 25 26 func cpuHogger(f func()) { 27 // We only need to get one 100 Hz clock tick, so we've got 28 // a 25x safety buffer. 29 // But do at least 500 iterations (which should take about 100ms), 30 // otherwise TestCPUProfileMultithreaded can fail if only one 31 // thread is scheduled during the 250ms period. 32 t0 := time.Now() 33 for i := 0; i < 500 || time.Since(t0) < 250*time.Millisecond; i++ { 34 f() 35 } 36 } 37 38 var ( 39 salt1 = 0 40 salt2 = 0 41 ) 42 43 // The actual CPU hogging function. 44 // Must not call other functions nor access heap/globals in the loop, 45 // otherwise under race detector the samples will be in the race runtime. 46 func cpuHog1() { 47 foo := salt1 48 for i := 0; i < 1e5; i++ { 49 if foo > 0 { 50 foo *= foo 51 } else { 52 foo *= foo + 1 53 } 54 } 55 salt1 = foo 56 } 57 58 func cpuHog2() { 59 foo := salt2 60 for i := 0; i < 1e5; i++ { 61 if foo > 0 { 62 foo *= foo 63 } else { 64 foo *= foo + 2 65 } 66 } 67 salt2 = foo 68 } 69 70 func TestCPUProfile(t *testing.T) { 71 testCPUProfile(t, []string{"pprof_test.cpuHog1"}, func() { 72 cpuHogger(cpuHog1) 73 }) 74 } 75 76 func TestCPUProfileMultithreaded(t *testing.T) { 77 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 78 testCPUProfile(t, []string{"pprof_test.cpuHog1", "pprof_test.cpuHog2"}, func() { 79 c := make(chan int) 80 go func() { 81 cpuHogger(cpuHog1) 82 c <- 1 83 }() 84 cpuHogger(cpuHog2) 85 <-c 86 }) 87 } 88 89 func parseProfile(t *testing.T, bytes []byte, f func(uintptr, []uintptr)) { 90 // Convert []byte to []uintptr. 91 l := len(bytes) / int(unsafe.Sizeof(uintptr(0))) 92 val := *(*[]uintptr)(unsafe.Pointer(&bytes)) 93 val = val[:l] 94 95 // 5 for the header, 2 for the per-sample header on at least one sample, 3 for the trailer. 96 if l < 5+2+3 { 97 t.Logf("profile too short: %#x", val) 98 if badOS[runtime.GOOS] { 99 t.Skipf("ignoring failure on %s; see golang.org/issue/6047", runtime.GOOS) 100 return 101 } 102 t.FailNow() 103 } 104 105 hd, val, tl := val[:5], val[5:l-3], val[l-3:] 106 if hd[0] != 0 || hd[1] != 3 || hd[2] != 0 || hd[3] != 1e6/100 || hd[4] != 0 { 107 t.Fatalf("unexpected header %#x", hd) 108 } 109 110 if tl[0] != 0 || tl[1] != 1 || tl[2] != 0 { 111 t.Fatalf("malformed end-of-data marker %#x", tl) 112 } 113 114 for len(val) > 0 { 115 if len(val) < 2 || val[0] < 1 || val[1] < 1 || uintptr(len(val)) < 2+val[1] { 116 t.Fatalf("malformed profile. leftover: %#x", val) 117 } 118 f(val[0], val[2:2+val[1]]) 119 val = val[2+val[1]:] 120 } 121 } 122 123 func testCPUProfile(t *testing.T, need []string, f func()) { 124 switch runtime.GOOS { 125 case "darwin": 126 switch runtime.GOARCH { 127 case "arm", "arm64": 128 // nothing 129 default: 130 out, err := exec.Command("uname", "-a").CombinedOutput() 131 if err != nil { 132 t.Fatal(err) 133 } 134 vers := string(out) 135 t.Logf("uname -a: %v", vers) 136 } 137 case "plan9": 138 t.Skip("skipping on plan9") 139 } 140 141 var prof bytes.Buffer 142 if err := StartCPUProfile(&prof); err != nil { 143 t.Fatal(err) 144 } 145 f() 146 StopCPUProfile() 147 148 // Check that profile is well formed and contains need. 149 have := make([]uintptr, len(need)) 150 var samples uintptr 151 parseProfile(t, prof.Bytes(), func(count uintptr, stk []uintptr) { 152 samples += count 153 for _, pc := range stk { 154 f := runtime.FuncForPC(pc) 155 if f == nil { 156 continue 157 } 158 t.Log(f.Name(), count) 159 for i, name := range need { 160 if strings.Contains(f.Name(), name) { 161 have[i] += count 162 } 163 } 164 } 165 }) 166 t.Logf("total %d CPU profile samples collected", samples) 167 168 if samples < 10 && runtime.GOOS == "windows" { 169 // On some windows machines we end up with 170 // not enough samples due to coarse timer 171 // resolution. Let it go. 172 t.Skip("too few samples on Windows (golang.org/issue/10842)") 173 } 174 175 if len(need) == 0 { 176 return 177 } 178 179 var total uintptr 180 for i, name := range need { 181 total += have[i] 182 t.Logf("%s: %d\n", name, have[i]) 183 } 184 ok := true 185 if total == 0 { 186 t.Logf("no CPU profile samples collected") 187 ok = false 188 } 189 // We'd like to check a reasonable minimum, like 190 // total / len(have) / smallconstant, but this test is 191 // pretty flaky (see bug 7095). So we'll just test to 192 // make sure we got at least one sample. 193 min := uintptr(1) 194 for i, name := range need { 195 if have[i] < min { 196 t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have))) 197 ok = false 198 } 199 } 200 201 if !ok { 202 if badOS[runtime.GOOS] { 203 t.Skipf("ignoring failure on %s; see golang.org/issue/6047", runtime.GOOS) 204 return 205 } 206 // Ignore the failure if the tests are running in a QEMU-based emulator, 207 // QEMU is not perfect at emulating everything. 208 // IN_QEMU environmental variable is set by some of the Go builders. 209 // IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605. 210 if os.Getenv("IN_QEMU") == "1" { 211 t.Skip("ignore the failure in QEMU; see golang.org/issue/9605") 212 return 213 } 214 t.FailNow() 215 } 216 } 217 218 // Fork can hang if preempted with signals frequently enough (see issue 5517). 219 // Ensure that we do not do this. 220 func TestCPUProfileWithFork(t *testing.T) { 221 testenv.MustHaveExec(t) 222 223 heap := 1 << 30 224 if runtime.GOOS == "android" { 225 // Use smaller size for Android to avoid crash. 226 heap = 100 << 20 227 } 228 if testing.Short() { 229 heap = 100 << 20 230 } 231 // This makes fork slower. 232 garbage := make([]byte, heap) 233 // Need to touch the slice, otherwise it won't be paged in. 234 done := make(chan bool) 235 go func() { 236 for i := range garbage { 237 garbage[i] = 42 238 } 239 done <- true 240 }() 241 <-done 242 243 var prof bytes.Buffer 244 if err := StartCPUProfile(&prof); err != nil { 245 t.Fatal(err) 246 } 247 defer StopCPUProfile() 248 249 for i := 0; i < 10; i++ { 250 exec.Command(os.Args[0], "-h").CombinedOutput() 251 } 252 } 253 254 // Test that profiler does not observe runtime.gogo as "user" goroutine execution. 255 // If it did, it would see inconsistent state and would either record an incorrect stack 256 // or crash because the stack was malformed. 257 func TestGoroutineSwitch(t *testing.T) { 258 // How much to try. These defaults take about 1 seconds 259 // on a 2012 MacBook Pro. The ones in short mode take 260 // about 0.1 seconds. 261 tries := 10 262 count := 1000000 263 if testing.Short() { 264 tries = 1 265 } 266 for try := 0; try < tries; try++ { 267 var prof bytes.Buffer 268 if err := StartCPUProfile(&prof); err != nil { 269 t.Fatal(err) 270 } 271 for i := 0; i < count; i++ { 272 runtime.Gosched() 273 } 274 StopCPUProfile() 275 276 // Read profile to look for entries for runtime.gogo with an attempt at a traceback. 277 // The special entry 278 parseProfile(t, prof.Bytes(), func(count uintptr, stk []uintptr) { 279 // An entry with two frames with 'System' in its top frame 280 // exists to record a PC without a traceback. Those are okay. 281 if len(stk) == 2 { 282 f := runtime.FuncForPC(stk[1]) 283 if f != nil && (f.Name() == "runtime._System" || f.Name() == "runtime._ExternalCode" || f.Name() == "runtime._GC") { 284 return 285 } 286 } 287 288 // Otherwise, should not see runtime.gogo. 289 // The place we'd see it would be the inner most frame. 290 f := runtime.FuncForPC(stk[0]) 291 if f != nil && f.Name() == "runtime.gogo" { 292 var buf bytes.Buffer 293 for _, pc := range stk { 294 f := runtime.FuncForPC(pc) 295 if f == nil { 296 fmt.Fprintf(&buf, "%#x ?:0\n", pc) 297 } else { 298 file, line := f.FileLine(pc) 299 fmt.Fprintf(&buf, "%#x %s:%d\n", pc, file, line) 300 } 301 } 302 t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String()) 303 } 304 }) 305 } 306 } 307 308 // Test that profiling of division operations is okay, especially on ARM. See issue 6681. 309 func TestMathBigDivide(t *testing.T) { 310 testCPUProfile(t, nil, func() { 311 t := time.After(5 * time.Second) 312 pi := new(big.Int) 313 for { 314 for i := 0; i < 100; i++ { 315 n := big.NewInt(2646693125139304345) 316 d := big.NewInt(842468587426513207) 317 pi.Div(n, d) 318 } 319 select { 320 case <-t: 321 return 322 default: 323 } 324 } 325 }) 326 } 327 328 // Operating systems that are expected to fail the tests. See issue 6047. 329 var badOS = map[string]bool{ 330 "darwin": true, 331 "netbsd": true, 332 "plan9": true, 333 } 334 335 func TestBlockProfile(t *testing.T) { 336 t.Skip("lots of details are different for gccgo; FIXME") 337 type TestCase struct { 338 name string 339 f func() 340 re string 341 } 342 tests := [...]TestCase{ 343 {"chan recv", blockChanRecv, ` 344 [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 345 # 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+ 346 # 0x[0-9,a-f]+ runtime/pprof_test\.blockChanRecv\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 347 # 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 348 `}, 349 {"chan send", blockChanSend, ` 350 [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 351 # 0x[0-9,a-f]+ runtime\.chansend1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+ 352 # 0x[0-9,a-f]+ runtime/pprof_test\.blockChanSend\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 353 # 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 354 `}, 355 {"chan close", blockChanClose, ` 356 [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 357 # 0x[0-9,a-f]+ runtime\.chanrecv1\+0x[0-9,a-f]+ .*/src/runtime/chan.go:[0-9]+ 358 # 0x[0-9,a-f]+ runtime/pprof_test\.blockChanClose\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 359 # 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 360 `}, 361 {"select recv async", blockSelectRecvAsync, ` 362 [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 363 # 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/runtime/select.go:[0-9]+ 364 # 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectRecvAsync\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 365 # 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 366 `}, 367 {"select send sync", blockSelectSendSync, ` 368 [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 369 # 0x[0-9,a-f]+ runtime\.selectgo\+0x[0-9,a-f]+ .*/src/runtime/select.go:[0-9]+ 370 # 0x[0-9,a-f]+ runtime/pprof_test\.blockSelectSendSync\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 371 # 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 372 `}, 373 {"mutex", blockMutex, ` 374 [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 375 # 0x[0-9,a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9,a-f]+ .*/src/sync/mutex\.go:[0-9]+ 376 # 0x[0-9,a-f]+ runtime/pprof_test\.blockMutex\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 377 # 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 378 `}, 379 {"cond", blockCond, ` 380 [0-9]+ [0-9]+ @ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 0x[0-9,a-f]+ 381 # 0x[0-9,a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9,a-f]+ .*/src/sync/cond\.go:[0-9]+ 382 # 0x[0-9,a-f]+ runtime/pprof_test\.blockCond\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 383 # 0x[0-9,a-f]+ runtime/pprof_test\.TestBlockProfile\+0x[0-9,a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 384 `}, 385 } 386 387 runtime.SetBlockProfileRate(1) 388 defer runtime.SetBlockProfileRate(0) 389 for _, test := range tests { 390 test.f() 391 } 392 var w bytes.Buffer 393 Lookup("block").WriteTo(&w, 1) 394 prof := w.String() 395 396 if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") { 397 t.Fatalf("Bad profile header:\n%v", prof) 398 } 399 400 for _, test := range tests { 401 if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) { 402 t.Fatalf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof) 403 } 404 } 405 } 406 407 const blockDelay = 10 * time.Millisecond 408 409 func blockChanRecv() { 410 c := make(chan bool) 411 go func() { 412 time.Sleep(blockDelay) 413 c <- true 414 }() 415 <-c 416 } 417 418 func blockChanSend() { 419 c := make(chan bool) 420 go func() { 421 time.Sleep(blockDelay) 422 <-c 423 }() 424 c <- true 425 } 426 427 func blockChanClose() { 428 c := make(chan bool) 429 go func() { 430 time.Sleep(blockDelay) 431 close(c) 432 }() 433 <-c 434 } 435 436 func blockSelectRecvAsync() { 437 c := make(chan bool, 1) 438 c2 := make(chan bool, 1) 439 go func() { 440 time.Sleep(blockDelay) 441 c <- true 442 }() 443 select { 444 case <-c: 445 case <-c2: 446 } 447 } 448 449 func blockSelectSendSync() { 450 c := make(chan bool) 451 c2 := make(chan bool) 452 go func() { 453 time.Sleep(blockDelay) 454 <-c 455 }() 456 select { 457 case c <- true: 458 case c2 <- true: 459 } 460 } 461 462 func blockMutex() { 463 var mu sync.Mutex 464 mu.Lock() 465 go func() { 466 time.Sleep(blockDelay) 467 mu.Unlock() 468 }() 469 mu.Lock() 470 } 471 472 func blockCond() { 473 var mu sync.Mutex 474 c := sync.NewCond(&mu) 475 mu.Lock() 476 go func() { 477 time.Sleep(blockDelay) 478 mu.Lock() 479 c.Signal() 480 mu.Unlock() 481 }() 482 c.Wait() 483 mu.Unlock() 484 }