github.com/activestate/go@v0.0.0-20170614201249-0b81c023a722/src/runtime/pprof/pprof_test.go (about) 1 // Copyright 2011 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 // +build !nacl 6 7 package pprof 8 9 import ( 10 "bytes" 11 "context" 12 "fmt" 13 "internal/testenv" 14 "io" 15 "math/big" 16 "os" 17 "os/exec" 18 "regexp" 19 "runtime" 20 "runtime/pprof/internal/profile" 21 "strings" 22 "sync" 23 "testing" 24 "time" 25 ) 26 27 func cpuHogger(f func(), dur time.Duration) { 28 // We only need to get one 100 Hz clock tick, so we've got 29 // a large safety buffer. 30 // But do at least 500 iterations (which should take about 100ms), 31 // otherwise TestCPUProfileMultithreaded can fail if only one 32 // thread is scheduled during the testing period. 33 t0 := time.Now() 34 for i := 0; i < 500 || time.Since(t0) < dur; i++ { 35 f() 36 } 37 } 38 39 var ( 40 salt1 = 0 41 salt2 = 0 42 ) 43 44 // The actual CPU hogging function. 45 // Must not call other functions nor access heap/globals in the loop, 46 // otherwise under race detector the samples will be in the race runtime. 47 func cpuHog1() { 48 foo := salt1 49 for i := 0; i < 1e5; i++ { 50 if foo > 0 { 51 foo *= foo 52 } else { 53 foo *= foo + 1 54 } 55 } 56 salt1 = foo 57 } 58 59 func cpuHog2() { 60 foo := salt2 61 for i := 0; i < 1e5; i++ { 62 if foo > 0 { 63 foo *= foo 64 } else { 65 foo *= foo + 2 66 } 67 } 68 salt2 = foo 69 } 70 71 func TestCPUProfile(t *testing.T) { 72 testCPUProfile(t, []string{"runtime/pprof.cpuHog1"}, func(dur time.Duration) { 73 cpuHogger(cpuHog1, dur) 74 }) 75 } 76 77 func TestCPUProfileMultithreaded(t *testing.T) { 78 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2)) 79 testCPUProfile(t, []string{"runtime/pprof.cpuHog1", "runtime/pprof.cpuHog2"}, func(dur time.Duration) { 80 c := make(chan int) 81 go func() { 82 cpuHogger(cpuHog1, dur) 83 c <- 1 84 }() 85 cpuHogger(cpuHog2, dur) 86 <-c 87 }) 88 } 89 90 func TestCPUProfileInlining(t *testing.T) { 91 testCPUProfile(t, []string{"runtime/pprof.inlinedCallee", "runtime/pprof.inlinedCaller"}, func(dur time.Duration) { 92 cpuHogger(inlinedCaller, dur) 93 }) 94 } 95 96 func inlinedCaller() { 97 inlinedCallee() 98 } 99 100 func inlinedCallee() { 101 // We could just use cpuHog1, but for loops prevent inlining 102 // right now. :( 103 foo := salt1 104 i := 0 105 loop: 106 if foo > 0 { 107 foo *= foo 108 } else { 109 foo *= foo + 1 110 } 111 if i++; i < 1e5 { 112 goto loop 113 } 114 salt1 = foo 115 } 116 117 func parseProfile(t *testing.T, valBytes []byte, f func(uintptr, []*profile.Location, map[string][]string)) { 118 p, err := profile.Parse(bytes.NewReader(valBytes)) 119 if err != nil { 120 t.Fatal(err) 121 } 122 for _, sample := range p.Sample { 123 count := uintptr(sample.Value[0]) 124 f(count, sample.Location, sample.Label) 125 } 126 } 127 128 func testCPUProfile(t *testing.T, need []string, f func(dur time.Duration)) { 129 switch runtime.GOOS { 130 case "darwin": 131 switch runtime.GOARCH { 132 case "arm", "arm64": 133 // nothing 134 default: 135 out, err := exec.Command("uname", "-a").CombinedOutput() 136 if err != nil { 137 t.Fatal(err) 138 } 139 vers := string(out) 140 t.Logf("uname -a: %v", vers) 141 } 142 case "plan9": 143 t.Skip("skipping on plan9") 144 } 145 146 const maxDuration = 5 * time.Second 147 // If we're running a long test, start with a long duration 148 // for tests that try to make sure something *doesn't* happen. 149 duration := 5 * time.Second 150 if testing.Short() { 151 duration = 200 * time.Millisecond 152 } 153 154 // Profiling tests are inherently flaky, especially on a 155 // loaded system, such as when this test is running with 156 // several others under go test std. If a test fails in a way 157 // that could mean it just didn't run long enough, try with a 158 // longer duration. 159 for duration <= maxDuration { 160 var prof bytes.Buffer 161 if err := StartCPUProfile(&prof); err != nil { 162 t.Fatal(err) 163 } 164 f(duration) 165 StopCPUProfile() 166 167 if profileOk(t, need, prof, duration) { 168 return 169 } 170 171 duration *= 2 172 if duration <= maxDuration { 173 t.Logf("retrying with %s duration", duration) 174 } 175 } 176 177 if badOS[runtime.GOOS] { 178 t.Skipf("ignoring failure on %s; see golang.org/issue/13841", runtime.GOOS) 179 return 180 } 181 // Ignore the failure if the tests are running in a QEMU-based emulator, 182 // QEMU is not perfect at emulating everything. 183 // IN_QEMU environmental variable is set by some of the Go builders. 184 // IN_QEMU=1 indicates that the tests are running in QEMU. See issue 9605. 185 if os.Getenv("IN_QEMU") == "1" { 186 t.Skip("ignore the failure in QEMU; see golang.org/issue/9605") 187 return 188 } 189 t.FailNow() 190 } 191 192 func contains(slice []string, s string) bool { 193 for i := range slice { 194 if slice[i] == s { 195 return true 196 } 197 } 198 return false 199 } 200 201 func profileOk(t *testing.T, need []string, prof bytes.Buffer, duration time.Duration) (ok bool) { 202 ok = true 203 204 // Check that profile is well formed and contains need. 205 have := make([]uintptr, len(need)) 206 var samples uintptr 207 var buf bytes.Buffer 208 parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, labels map[string][]string) { 209 fmt.Fprintf(&buf, "%d:", count) 210 fprintStack(&buf, stk) 211 samples += count 212 for i, name := range need { 213 if semi := strings.Index(name, ";"); semi > -1 { 214 kv := strings.SplitN(name[semi+1:], "=", 2) 215 if len(kv) != 2 || !contains(labels[kv[0]], kv[1]) { 216 continue 217 } 218 name = name[:semi] 219 } 220 for _, loc := range stk { 221 for _, line := range loc.Line { 222 if strings.Contains(line.Function.Name, name) { 223 have[i] += count 224 } 225 } 226 } 227 } 228 fmt.Fprintf(&buf, "\n") 229 }) 230 t.Logf("total %d CPU profile samples collected:\n%s", samples, buf.String()) 231 232 if samples < 10 && runtime.GOOS == "windows" { 233 // On some windows machines we end up with 234 // not enough samples due to coarse timer 235 // resolution. Let it go. 236 t.Log("too few samples on Windows (golang.org/issue/10842)") 237 return false 238 } 239 240 // Check that we got a reasonable number of samples. 241 // We used to always require at least ideal/4 samples, 242 // but that is too hard to guarantee on a loaded system. 243 // Now we accept 10 or more samples, which we take to be 244 // enough to show that at least some profiling is occurring. 245 if ideal := uintptr(duration * 100 / time.Second); samples == 0 || (samples < ideal/4 && samples < 10) { 246 t.Logf("too few samples; got %d, want at least %d, ideally %d", samples, ideal/4, ideal) 247 ok = false 248 } 249 250 if len(need) == 0 { 251 return ok 252 } 253 254 var total uintptr 255 for i, name := range need { 256 total += have[i] 257 t.Logf("%s: %d\n", name, have[i]) 258 } 259 if total == 0 { 260 t.Logf("no samples in expected functions") 261 ok = false 262 } 263 // We'd like to check a reasonable minimum, like 264 // total / len(have) / smallconstant, but this test is 265 // pretty flaky (see bug 7095). So we'll just test to 266 // make sure we got at least one sample. 267 min := uintptr(1) 268 for i, name := range need { 269 if have[i] < min { 270 t.Logf("%s has %d samples out of %d, want at least %d, ideally %d", name, have[i], total, min, total/uintptr(len(have))) 271 ok = false 272 } 273 } 274 return ok 275 } 276 277 // Fork can hang if preempted with signals frequently enough (see issue 5517). 278 // Ensure that we do not do this. 279 func TestCPUProfileWithFork(t *testing.T) { 280 testenv.MustHaveExec(t) 281 282 heap := 1 << 30 283 if runtime.GOOS == "android" { 284 // Use smaller size for Android to avoid crash. 285 heap = 100 << 20 286 } 287 if testing.Short() { 288 heap = 100 << 20 289 } 290 // This makes fork slower. 291 garbage := make([]byte, heap) 292 // Need to touch the slice, otherwise it won't be paged in. 293 done := make(chan bool) 294 go func() { 295 for i := range garbage { 296 garbage[i] = 42 297 } 298 done <- true 299 }() 300 <-done 301 302 var prof bytes.Buffer 303 if err := StartCPUProfile(&prof); err != nil { 304 t.Fatal(err) 305 } 306 defer StopCPUProfile() 307 308 for i := 0; i < 10; i++ { 309 exec.Command(os.Args[0], "-h").CombinedOutput() 310 } 311 } 312 313 // Test that profiler does not observe runtime.gogo as "user" goroutine execution. 314 // If it did, it would see inconsistent state and would either record an incorrect stack 315 // or crash because the stack was malformed. 316 func TestGoroutineSwitch(t *testing.T) { 317 // How much to try. These defaults take about 1 seconds 318 // on a 2012 MacBook Pro. The ones in short mode take 319 // about 0.1 seconds. 320 tries := 10 321 count := 1000000 322 if testing.Short() { 323 tries = 1 324 } 325 for try := 0; try < tries; try++ { 326 var prof bytes.Buffer 327 if err := StartCPUProfile(&prof); err != nil { 328 t.Fatal(err) 329 } 330 for i := 0; i < count; i++ { 331 runtime.Gosched() 332 } 333 StopCPUProfile() 334 335 // Read profile to look for entries for runtime.gogo with an attempt at a traceback. 336 // The special entry 337 parseProfile(t, prof.Bytes(), func(count uintptr, stk []*profile.Location, _ map[string][]string) { 338 // An entry with two frames with 'System' in its top frame 339 // exists to record a PC without a traceback. Those are okay. 340 if len(stk) == 2 { 341 name := stk[1].Line[0].Function.Name 342 if name == "runtime._System" || name == "runtime._ExternalCode" || name == "runtime._GC" { 343 return 344 } 345 } 346 347 // Otherwise, should not see runtime.gogo. 348 // The place we'd see it would be the inner most frame. 349 name := stk[0].Line[0].Function.Name 350 if name == "runtime.gogo" { 351 var buf bytes.Buffer 352 fprintStack(&buf, stk) 353 t.Fatalf("found profile entry for runtime.gogo:\n%s", buf.String()) 354 } 355 }) 356 } 357 } 358 359 func fprintStack(w io.Writer, stk []*profile.Location) { 360 for _, loc := range stk { 361 fmt.Fprintf(w, " %#x", loc.Address) 362 fmt.Fprintf(w, " (") 363 for i, line := range loc.Line { 364 if i > 0 { 365 fmt.Fprintf(w, " ") 366 } 367 fmt.Fprintf(w, "%s:%d", line.Function.Name, line.Line) 368 } 369 fmt.Fprintf(w, ")") 370 } 371 fmt.Fprintf(w, "\n") 372 } 373 374 // Test that profiling of division operations is okay, especially on ARM. See issue 6681. 375 func TestMathBigDivide(t *testing.T) { 376 testCPUProfile(t, nil, func(duration time.Duration) { 377 t := time.After(duration) 378 pi := new(big.Int) 379 for { 380 for i := 0; i < 100; i++ { 381 n := big.NewInt(2646693125139304345) 382 d := big.NewInt(842468587426513207) 383 pi.Div(n, d) 384 } 385 select { 386 case <-t: 387 return 388 default: 389 } 390 } 391 }) 392 } 393 394 // Operating systems that are expected to fail the tests. See issue 13841. 395 var badOS = map[string]bool{ 396 "darwin": true, 397 "netbsd": true, 398 "plan9": true, 399 "dragonfly": true, 400 "solaris": true, 401 } 402 403 func TestBlockProfile(t *testing.T) { 404 type TestCase struct { 405 name string 406 f func() 407 re string 408 } 409 tests := [...]TestCase{ 410 {"chan recv", blockChanRecv, ` 411 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+ 412 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+ 413 # 0x[0-9a-f]+ runtime/pprof\.blockChanRecv\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 414 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 415 `}, 416 {"chan send", blockChanSend, ` 417 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+ 418 # 0x[0-9a-f]+ runtime\.chansend1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+ 419 # 0x[0-9a-f]+ runtime/pprof\.blockChanSend\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 420 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 421 `}, 422 {"chan close", blockChanClose, ` 423 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+ 424 # 0x[0-9a-f]+ runtime\.chanrecv1\+0x[0-9a-f]+ .*/src/runtime/chan.go:[0-9]+ 425 # 0x[0-9a-f]+ runtime/pprof\.blockChanClose\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 426 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 427 `}, 428 {"select recv async", blockSelectRecvAsync, ` 429 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+ 430 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+ 431 # 0x[0-9a-f]+ runtime/pprof\.blockSelectRecvAsync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 432 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 433 `}, 434 {"select send sync", blockSelectSendSync, ` 435 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+ 436 # 0x[0-9a-f]+ runtime\.selectgo\+0x[0-9a-f]+ .*/src/runtime/select.go:[0-9]+ 437 # 0x[0-9a-f]+ runtime/pprof\.blockSelectSendSync\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 438 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 439 `}, 440 {"mutex", blockMutex, ` 441 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+ 442 # 0x[0-9a-f]+ sync\.\(\*Mutex\)\.Lock\+0x[0-9a-f]+ .*/src/sync/mutex\.go:[0-9]+ 443 # 0x[0-9a-f]+ runtime/pprof\.blockMutex\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 444 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 445 `}, 446 {"cond", blockCond, ` 447 [0-9]+ [0-9]+ @( 0x[[:xdigit:]]+)+ 448 # 0x[0-9a-f]+ sync\.\(\*Cond\)\.Wait\+0x[0-9a-f]+ .*/src/sync/cond\.go:[0-9]+ 449 # 0x[0-9a-f]+ runtime/pprof\.blockCond\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 450 # 0x[0-9a-f]+ runtime/pprof\.TestBlockProfile\+0x[0-9a-f]+ .*/src/runtime/pprof/pprof_test.go:[0-9]+ 451 `}, 452 } 453 454 runtime.SetBlockProfileRate(1) 455 defer runtime.SetBlockProfileRate(0) 456 for _, test := range tests { 457 test.f() 458 } 459 var w bytes.Buffer 460 Lookup("block").WriteTo(&w, 1) 461 prof := w.String() 462 463 if !strings.HasPrefix(prof, "--- contention:\ncycles/second=") { 464 t.Fatalf("Bad profile header:\n%v", prof) 465 } 466 467 if strings.HasSuffix(prof, "#\t0x0\n\n") { 468 t.Errorf("Useless 0 suffix:\n%v", prof) 469 } 470 471 for _, test := range tests { 472 if !regexp.MustCompile(strings.Replace(test.re, "\t", "\t+", -1)).MatchString(prof) { 473 t.Fatalf("Bad %v entry, expect:\n%v\ngot:\n%v", test.name, test.re, prof) 474 } 475 } 476 } 477 478 const blockDelay = 10 * time.Millisecond 479 480 func blockChanRecv() { 481 c := make(chan bool) 482 go func() { 483 time.Sleep(blockDelay) 484 c <- true 485 }() 486 <-c 487 } 488 489 func blockChanSend() { 490 c := make(chan bool) 491 go func() { 492 time.Sleep(blockDelay) 493 <-c 494 }() 495 c <- true 496 } 497 498 func blockChanClose() { 499 c := make(chan bool) 500 go func() { 501 time.Sleep(blockDelay) 502 close(c) 503 }() 504 <-c 505 } 506 507 func blockSelectRecvAsync() { 508 const numTries = 3 509 c := make(chan bool, 1) 510 c2 := make(chan bool, 1) 511 go func() { 512 for i := 0; i < numTries; i++ { 513 time.Sleep(blockDelay) 514 c <- true 515 } 516 }() 517 for i := 0; i < numTries; i++ { 518 select { 519 case <-c: 520 case <-c2: 521 } 522 } 523 } 524 525 func blockSelectSendSync() { 526 c := make(chan bool) 527 c2 := make(chan bool) 528 go func() { 529 time.Sleep(blockDelay) 530 <-c 531 }() 532 select { 533 case c <- true: 534 case c2 <- true: 535 } 536 } 537 538 func blockMutex() { 539 var mu sync.Mutex 540 mu.Lock() 541 go func() { 542 time.Sleep(blockDelay) 543 mu.Unlock() 544 }() 545 // Note: Unlock releases mu before recording the mutex event, 546 // so it's theoretically possible for this to proceed and 547 // capture the profile before the event is recorded. As long 548 // as this is blocked before the unlock happens, it's okay. 549 mu.Lock() 550 } 551 552 func blockCond() { 553 var mu sync.Mutex 554 c := sync.NewCond(&mu) 555 mu.Lock() 556 go func() { 557 time.Sleep(blockDelay) 558 mu.Lock() 559 c.Signal() 560 mu.Unlock() 561 }() 562 c.Wait() 563 mu.Unlock() 564 } 565 566 func TestMutexProfile(t *testing.T) { 567 old := runtime.SetMutexProfileFraction(1) 568 defer runtime.SetMutexProfileFraction(old) 569 if old != 0 { 570 t.Fatalf("need MutexProfileRate 0, got %d", old) 571 } 572 573 blockMutex() 574 575 var w bytes.Buffer 576 Lookup("mutex").WriteTo(&w, 1) 577 prof := w.String() 578 579 if !strings.HasPrefix(prof, "--- mutex:\ncycles/second=") { 580 t.Errorf("Bad profile header:\n%v", prof) 581 } 582 prof = strings.Trim(prof, "\n") 583 lines := strings.Split(prof, "\n") 584 if len(lines) != 6 { 585 t.Errorf("expected 6 lines, got %d %q\n%s", len(lines), prof, prof) 586 } 587 if len(lines) < 6 { 588 return 589 } 590 // checking that the line is like "35258904 1 @ 0x48288d 0x47cd28 0x458931" 591 r2 := `^\d+ 1 @(?: 0x[[:xdigit:]]+)+` 592 //r2 := "^[0-9]+ 1 @ 0x[0-9a-f x]+$" 593 if ok, err := regexp.MatchString(r2, lines[3]); err != nil || !ok { 594 t.Errorf("%q didn't match %q", lines[3], r2) 595 } 596 r3 := "^#.*runtime/pprof.blockMutex.*$" 597 if ok, err := regexp.MatchString(r3, lines[5]); err != nil || !ok { 598 t.Errorf("%q didn't match %q", lines[5], r3) 599 } 600 } 601 602 func func1(c chan int) { <-c } 603 func func2(c chan int) { <-c } 604 func func3(c chan int) { <-c } 605 func func4(c chan int) { <-c } 606 607 func TestGoroutineCounts(t *testing.T) { 608 // Setting GOMAXPROCS to 1 ensures we can force all goroutines to the 609 // desired blocking point. 610 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) 611 612 c := make(chan int) 613 for i := 0; i < 100; i++ { 614 switch { 615 case i%10 == 0: 616 go func1(c) 617 case i%2 == 0: 618 go func2(c) 619 default: 620 go func3(c) 621 } 622 // Let goroutines block on channel 623 for j := 0; j < 5; j++ { 624 runtime.Gosched() 625 } 626 } 627 628 var w bytes.Buffer 629 goroutineProf := Lookup("goroutine") 630 631 // Check debug profile 632 goroutineProf.WriteTo(&w, 1) 633 prof := w.String() 634 635 if !containsInOrder(prof, "\n50 @ ", "\n40 @", "\n10 @", "\n1 @") { 636 t.Errorf("expected sorted goroutine counts:\n%s", prof) 637 } 638 639 // Check proto profile 640 w.Reset() 641 goroutineProf.WriteTo(&w, 0) 642 p, err := profile.Parse(&w) 643 if err != nil { 644 t.Errorf("error parsing protobuf profile: %v", err) 645 } 646 if err := p.CheckValid(); err != nil { 647 t.Errorf("protobuf profile is invalid: %v", err) 648 } 649 if !containsCounts(p, []int64{50, 40, 10, 1}) { 650 t.Errorf("expected count profile to contain goroutines with counts %v, got %v", 651 []int64{50, 40, 10, 1}, p) 652 } 653 654 close(c) 655 656 time.Sleep(10 * time.Millisecond) // let goroutines exit 657 } 658 659 func containsInOrder(s string, all ...string) bool { 660 for _, t := range all { 661 i := strings.Index(s, t) 662 if i < 0 { 663 return false 664 } 665 s = s[i+len(t):] 666 } 667 return true 668 } 669 670 func containsCounts(prof *profile.Profile, counts []int64) bool { 671 m := make(map[int64]int) 672 for _, c := range counts { 673 m[c]++ 674 } 675 for _, s := range prof.Sample { 676 // The count is the single value in the sample 677 if len(s.Value) != 1 { 678 return false 679 } 680 m[s.Value[0]]-- 681 } 682 for _, n := range m { 683 if n > 0 { 684 return false 685 } 686 } 687 return true 688 } 689 690 // Issue 18836. 691 func TestEmptyCallStack(t *testing.T) { 692 t.Parallel() 693 var buf bytes.Buffer 694 p := NewProfile("test18836") 695 p.Add("foo", 47674) 696 p.WriteTo(&buf, 1) 697 p.Remove("foo") 698 got := buf.String() 699 prefix := "test18836 profile: total 1\n" 700 if !strings.HasPrefix(got, prefix) { 701 t.Fatalf("got:\n\t%q\nwant prefix:\n\t%q\n", got, prefix) 702 } 703 lostevent := "lostProfileEvent" 704 if !strings.Contains(got, lostevent) { 705 t.Fatalf("got:\n\t%q\ndoes not contain:\n\t%q\n", got, lostevent) 706 } 707 } 708 709 func TestCPUProfileLabel(t *testing.T) { 710 testCPUProfile(t, []string{"runtime/pprof.cpuHogger;key=value"}, func(dur time.Duration) { 711 Do(context.Background(), Labels("key", "value"), func(context.Context) { 712 cpuHogger(cpuHog1, dur) 713 }) 714 }) 715 }