github.com/m10x/go/src@v0.0.0-20220112094212-ba61592315da/runtime/stack_test.go (about) 1 // Copyright 2012 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "bytes" 9 "fmt" 10 "os" 11 "reflect" 12 "regexp" 13 . "runtime" 14 "strconv" 15 "strings" 16 "sync" 17 "sync/atomic" 18 "testing" 19 "time" 20 _ "unsafe" // for go:linkname 21 ) 22 23 // TestStackMem measures per-thread stack segment cache behavior. 24 // The test consumed up to 500MB in the past. 25 func TestStackMem(t *testing.T) { 26 const ( 27 BatchSize = 32 28 BatchCount = 256 29 ArraySize = 1024 30 RecursionDepth = 128 31 ) 32 if testing.Short() { 33 return 34 } 35 defer GOMAXPROCS(GOMAXPROCS(BatchSize)) 36 s0 := new(MemStats) 37 ReadMemStats(s0) 38 for b := 0; b < BatchCount; b++ { 39 c := make(chan bool, BatchSize) 40 for i := 0; i < BatchSize; i++ { 41 go func() { 42 var f func(k int, a [ArraySize]byte) 43 f = func(k int, a [ArraySize]byte) { 44 if k == 0 { 45 time.Sleep(time.Millisecond) 46 return 47 } 48 f(k-1, a) 49 } 50 f(RecursionDepth, [ArraySize]byte{}) 51 c <- true 52 }() 53 } 54 for i := 0; i < BatchSize; i++ { 55 <-c 56 } 57 58 // The goroutines have signaled via c that they are ready to exit. 59 // Give them a chance to exit by sleeping. If we don't wait, we 60 // might not reuse them on the next batch. 61 time.Sleep(10 * time.Millisecond) 62 } 63 s1 := new(MemStats) 64 ReadMemStats(s1) 65 consumed := int64(s1.StackSys - s0.StackSys) 66 t.Logf("Consumed %vMB for stack mem", consumed>>20) 67 estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness. 68 if consumed > estimate { 69 t.Fatalf("Stack mem: want %v, got %v", estimate, consumed) 70 } 71 // Due to broken stack memory accounting (https://golang.org/issue/7468), 72 // StackInuse can decrease during function execution, so we cast the values to int64. 73 inuse := int64(s1.StackInuse) - int64(s0.StackInuse) 74 t.Logf("Inuse %vMB for stack mem", inuse>>20) 75 if inuse > 4<<20 { 76 t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse) 77 } 78 } 79 80 // Test stack growing in different contexts. 81 func TestStackGrowth(t *testing.T) { 82 if *flagQuick { 83 t.Skip("-quick") 84 } 85 86 if GOARCH == "wasm" { 87 t.Skip("fails on wasm (too slow?)") 88 } 89 90 // Don't make this test parallel as this makes the 20 second 91 // timeout unreliable on slow builders. (See issue #19381.) 92 93 var wg sync.WaitGroup 94 95 // in a normal goroutine 96 var growDuration time.Duration // For debugging failures 97 wg.Add(1) 98 go func() { 99 defer wg.Done() 100 start := time.Now() 101 growStack(nil) 102 growDuration = time.Since(start) 103 }() 104 wg.Wait() 105 106 // in locked goroutine 107 wg.Add(1) 108 go func() { 109 defer wg.Done() 110 LockOSThread() 111 growStack(nil) 112 UnlockOSThread() 113 }() 114 wg.Wait() 115 116 // in finalizer 117 wg.Add(1) 118 go func() { 119 defer wg.Done() 120 done := make(chan bool) 121 var startTime time.Time 122 var started, progress uint32 123 go func() { 124 s := new(string) 125 SetFinalizer(s, func(ss *string) { 126 startTime = time.Now() 127 atomic.StoreUint32(&started, 1) 128 growStack(&progress) 129 done <- true 130 }) 131 s = nil 132 done <- true 133 }() 134 <-done 135 GC() 136 137 timeout := 20 * time.Second 138 if s := os.Getenv("GO_TEST_TIMEOUT_SCALE"); s != "" { 139 scale, err := strconv.Atoi(s) 140 if err == nil { 141 timeout *= time.Duration(scale) 142 } 143 } 144 145 select { 146 case <-done: 147 case <-time.After(timeout): 148 if atomic.LoadUint32(&started) == 0 { 149 t.Log("finalizer did not start") 150 } else { 151 t.Logf("finalizer started %s ago and finished %d iterations", time.Since(startTime), atomic.LoadUint32(&progress)) 152 } 153 t.Log("first growStack took", growDuration) 154 t.Error("finalizer did not run") 155 return 156 } 157 }() 158 wg.Wait() 159 } 160 161 // ... and in init 162 //func init() { 163 // growStack() 164 //} 165 166 func growStack(progress *uint32) { 167 n := 1 << 10 168 if testing.Short() { 169 n = 1 << 8 170 } 171 for i := 0; i < n; i++ { 172 x := 0 173 growStackIter(&x, i) 174 if x != i+1 { 175 panic("stack is corrupted") 176 } 177 if progress != nil { 178 atomic.StoreUint32(progress, uint32(i)) 179 } 180 } 181 GC() 182 } 183 184 // This function is not an anonymous func, so that the compiler can do escape 185 // analysis and place x on stack (and subsequently stack growth update the pointer). 186 func growStackIter(p *int, n int) { 187 if n == 0 { 188 *p = n + 1 189 GC() 190 return 191 } 192 *p = n + 1 193 x := 0 194 growStackIter(&x, n-1) 195 if x != n { 196 panic("stack is corrupted") 197 } 198 } 199 200 func TestStackGrowthCallback(t *testing.T) { 201 t.Parallel() 202 var wg sync.WaitGroup 203 204 // test stack growth at chan op 205 wg.Add(1) 206 go func() { 207 defer wg.Done() 208 c := make(chan int, 1) 209 growStackWithCallback(func() { 210 c <- 1 211 <-c 212 }) 213 }() 214 215 // test stack growth at map op 216 wg.Add(1) 217 go func() { 218 defer wg.Done() 219 m := make(map[int]int) 220 growStackWithCallback(func() { 221 _, _ = m[1] 222 m[1] = 1 223 }) 224 }() 225 226 // test stack growth at goroutine creation 227 wg.Add(1) 228 go func() { 229 defer wg.Done() 230 growStackWithCallback(func() { 231 done := make(chan bool) 232 go func() { 233 done <- true 234 }() 235 <-done 236 }) 237 }() 238 wg.Wait() 239 } 240 241 func growStackWithCallback(cb func()) { 242 var f func(n int) 243 f = func(n int) { 244 if n == 0 { 245 cb() 246 return 247 } 248 f(n - 1) 249 } 250 for i := 0; i < 1<<10; i++ { 251 f(i) 252 } 253 } 254 255 // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y) 256 // during a stack copy. 257 func set(p *int, x int) { 258 *p = x 259 } 260 func TestDeferPtrs(t *testing.T) { 261 var y int 262 263 defer func() { 264 if y != 42 { 265 t.Errorf("defer's stack references were not adjusted appropriately") 266 } 267 }() 268 defer set(&y, 42) 269 growStack(nil) 270 } 271 272 type bigBuf [4 * 1024]byte 273 274 // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the 275 // stack grows as part of starting the deferred function. It calls Goexit at various 276 // stack depths, forcing the deferred function (with >4kB of args) to be run at 277 // the bottom of the stack. The goal is to find a stack depth less than 4kB from 278 // the end of the stack. Each trial runs in a different goroutine so that an earlier 279 // stack growth does not invalidate a later attempt. 280 func TestDeferPtrsGoexit(t *testing.T) { 281 for i := 0; i < 100; i++ { 282 c := make(chan int, 1) 283 go testDeferPtrsGoexit(c, i) 284 if n := <-c; n != 42 { 285 t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n) 286 } 287 } 288 } 289 290 func testDeferPtrsGoexit(c chan int, i int) { 291 var y int 292 defer func() { 293 c <- y 294 }() 295 defer setBig(&y, 42, bigBuf{}) 296 useStackAndCall(i, Goexit) 297 } 298 299 func setBig(p *int, x int, b bigBuf) { 300 *p = x 301 } 302 303 // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead 304 // of Goexit to run the Defers. Those two are different execution paths 305 // in the runtime. 306 func TestDeferPtrsPanic(t *testing.T) { 307 for i := 0; i < 100; i++ { 308 c := make(chan int, 1) 309 go testDeferPtrsGoexit(c, i) 310 if n := <-c; n != 42 { 311 t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n) 312 } 313 } 314 } 315 316 func testDeferPtrsPanic(c chan int, i int) { 317 var y int 318 defer func() { 319 if recover() == nil { 320 c <- -1 321 return 322 } 323 c <- y 324 }() 325 defer setBig(&y, 42, bigBuf{}) 326 useStackAndCall(i, func() { panic(1) }) 327 } 328 329 //go:noinline 330 func testDeferLeafSigpanic1() { 331 // Cause a sigpanic to be injected in this frame. 332 // 333 // This function has to be declared before 334 // TestDeferLeafSigpanic so the runtime will crash if we think 335 // this function's continuation PC is in 336 // TestDeferLeafSigpanic. 337 *(*int)(nil) = 0 338 } 339 340 // TestDeferLeafSigpanic tests defer matching around leaf functions 341 // that sigpanic. This is tricky because on LR machines the outer 342 // function and the inner function have the same SP, but it's critical 343 // that we match up the defer correctly to get the right liveness map. 344 // See issue #25499. 345 func TestDeferLeafSigpanic(t *testing.T) { 346 // Push a defer that will walk the stack. 347 defer func() { 348 if err := recover(); err == nil { 349 t.Fatal("expected panic from nil pointer") 350 } 351 GC() 352 }() 353 // Call a leaf function. We must set up the exact call stack: 354 // 355 // defering function -> leaf function -> sigpanic 356 // 357 // On LR machines, the leaf function will have the same SP as 358 // the SP pushed for the defer frame. 359 testDeferLeafSigpanic1() 360 } 361 362 // TestPanicUseStack checks that a chain of Panic structs on the stack are 363 // updated correctly if the stack grows during the deferred execution that 364 // happens as a result of the panic. 365 func TestPanicUseStack(t *testing.T) { 366 pc := make([]uintptr, 10000) 367 defer func() { 368 recover() 369 Callers(0, pc) // force stack walk 370 useStackAndCall(100, func() { 371 defer func() { 372 recover() 373 Callers(0, pc) // force stack walk 374 useStackAndCall(200, func() { 375 defer func() { 376 recover() 377 Callers(0, pc) // force stack walk 378 }() 379 panic(3) 380 }) 381 }() 382 panic(2) 383 }) 384 }() 385 panic(1) 386 } 387 388 func TestPanicFar(t *testing.T) { 389 var xtree *xtreeNode 390 pc := make([]uintptr, 10000) 391 defer func() { 392 // At this point we created a large stack and unwound 393 // it via recovery. Force a stack walk, which will 394 // check the stack's consistency. 395 Callers(0, pc) 396 }() 397 defer func() { 398 recover() 399 }() 400 useStackAndCall(100, func() { 401 // Kick off the GC and make it do something nontrivial. 402 // (This used to force stack barriers to stick around.) 403 xtree = makeTree(18) 404 // Give the GC time to start scanning stacks. 405 time.Sleep(time.Millisecond) 406 panic(1) 407 }) 408 _ = xtree 409 } 410 411 type xtreeNode struct { 412 l, r *xtreeNode 413 } 414 415 func makeTree(d int) *xtreeNode { 416 if d == 0 { 417 return new(xtreeNode) 418 } 419 return &xtreeNode{makeTree(d - 1), makeTree(d - 1)} 420 } 421 422 // use about n KB of stack and call f 423 func useStackAndCall(n int, f func()) { 424 if n == 0 { 425 f() 426 return 427 } 428 var b [1024]byte // makes frame about 1KB 429 useStackAndCall(n-1+int(b[99]), f) 430 } 431 432 func useStack(n int) { 433 useStackAndCall(n, func() {}) 434 } 435 436 func growing(c chan int, done chan struct{}) { 437 for n := range c { 438 useStack(n) 439 done <- struct{}{} 440 } 441 done <- struct{}{} 442 } 443 444 func TestStackCache(t *testing.T) { 445 // Allocate a bunch of goroutines and grow their stacks. 446 // Repeat a few times to test the stack cache. 447 const ( 448 R = 4 449 G = 200 450 S = 5 451 ) 452 for i := 0; i < R; i++ { 453 var reqchans [G]chan int 454 done := make(chan struct{}) 455 for j := 0; j < G; j++ { 456 reqchans[j] = make(chan int) 457 go growing(reqchans[j], done) 458 } 459 for s := 0; s < S; s++ { 460 for j := 0; j < G; j++ { 461 reqchans[j] <- 1 << uint(s) 462 } 463 for j := 0; j < G; j++ { 464 <-done 465 } 466 } 467 for j := 0; j < G; j++ { 468 close(reqchans[j]) 469 } 470 for j := 0; j < G; j++ { 471 <-done 472 } 473 } 474 } 475 476 func TestStackOutput(t *testing.T) { 477 b := make([]byte, 1024) 478 stk := string(b[:Stack(b, false)]) 479 if !strings.HasPrefix(stk, "goroutine ") { 480 t.Errorf("Stack (len %d):\n%s", len(stk), stk) 481 t.Errorf("Stack output should begin with \"goroutine \"") 482 } 483 } 484 485 func TestStackAllOutput(t *testing.T) { 486 b := make([]byte, 1024) 487 stk := string(b[:Stack(b, true)]) 488 if !strings.HasPrefix(stk, "goroutine ") { 489 t.Errorf("Stack (len %d):\n%s", len(stk), stk) 490 t.Errorf("Stack output should begin with \"goroutine \"") 491 } 492 } 493 494 func TestStackPanic(t *testing.T) { 495 // Test that stack copying copies panics correctly. This is difficult 496 // to test because it is very unlikely that the stack will be copied 497 // in the middle of gopanic. But it can happen. 498 // To make this test effective, edit panic.go:gopanic and uncomment 499 // the GC() call just before freedefer(d). 500 defer func() { 501 if x := recover(); x == nil { 502 t.Errorf("recover failed") 503 } 504 }() 505 useStack(32) 506 panic("test panic") 507 } 508 509 func BenchmarkStackCopyPtr(b *testing.B) { 510 c := make(chan bool) 511 for i := 0; i < b.N; i++ { 512 go func() { 513 i := 1000000 514 countp(&i) 515 c <- true 516 }() 517 <-c 518 } 519 } 520 521 func countp(n *int) { 522 if *n == 0 { 523 return 524 } 525 *n-- 526 countp(n) 527 } 528 529 func BenchmarkStackCopy(b *testing.B) { 530 c := make(chan bool) 531 for i := 0; i < b.N; i++ { 532 go func() { 533 count(1000000) 534 c <- true 535 }() 536 <-c 537 } 538 } 539 540 func count(n int) int { 541 if n == 0 { 542 return 0 543 } 544 return 1 + count(n-1) 545 } 546 547 func BenchmarkStackCopyNoCache(b *testing.B) { 548 c := make(chan bool) 549 for i := 0; i < b.N; i++ { 550 go func() { 551 count1(1000000) 552 c <- true 553 }() 554 <-c 555 } 556 } 557 558 func count1(n int) int { 559 if n <= 0 { 560 return 0 561 } 562 return 1 + count2(n-1) 563 } 564 565 func count2(n int) int { return 1 + count3(n-1) } 566 func count3(n int) int { return 1 + count4(n-1) } 567 func count4(n int) int { return 1 + count5(n-1) } 568 func count5(n int) int { return 1 + count6(n-1) } 569 func count6(n int) int { return 1 + count7(n-1) } 570 func count7(n int) int { return 1 + count8(n-1) } 571 func count8(n int) int { return 1 + count9(n-1) } 572 func count9(n int) int { return 1 + count10(n-1) } 573 func count10(n int) int { return 1 + count11(n-1) } 574 func count11(n int) int { return 1 + count12(n-1) } 575 func count12(n int) int { return 1 + count13(n-1) } 576 func count13(n int) int { return 1 + count14(n-1) } 577 func count14(n int) int { return 1 + count15(n-1) } 578 func count15(n int) int { return 1 + count16(n-1) } 579 func count16(n int) int { return 1 + count17(n-1) } 580 func count17(n int) int { return 1 + count18(n-1) } 581 func count18(n int) int { return 1 + count19(n-1) } 582 func count19(n int) int { return 1 + count20(n-1) } 583 func count20(n int) int { return 1 + count21(n-1) } 584 func count21(n int) int { return 1 + count22(n-1) } 585 func count22(n int) int { return 1 + count23(n-1) } 586 func count23(n int) int { return 1 + count1(n-1) } 587 588 type stkobjT struct { 589 p *stkobjT 590 x int64 591 y [20]int // consume some stack 592 } 593 594 // Sum creates a linked list of stkobjTs. 595 func Sum(n int64, p *stkobjT) { 596 if n == 0 { 597 return 598 } 599 s := stkobjT{p: p, x: n} 600 Sum(n-1, &s) 601 p.x += s.x 602 } 603 604 func BenchmarkStackCopyWithStkobj(b *testing.B) { 605 c := make(chan bool) 606 for i := 0; i < b.N; i++ { 607 go func() { 608 var s stkobjT 609 Sum(100000, &s) 610 c <- true 611 }() 612 <-c 613 } 614 } 615 616 type structWithMethod struct{} 617 618 func (s structWithMethod) caller() string { 619 _, file, line, ok := Caller(1) 620 if !ok { 621 panic("Caller failed") 622 } 623 return fmt.Sprintf("%s:%d", file, line) 624 } 625 626 func (s structWithMethod) callers() []uintptr { 627 pc := make([]uintptr, 16) 628 return pc[:Callers(0, pc)] 629 } 630 631 func (s structWithMethod) stack() string { 632 buf := make([]byte, 4<<10) 633 return string(buf[:Stack(buf, false)]) 634 } 635 636 func (s structWithMethod) nop() {} 637 638 func TestStackWrapperCaller(t *testing.T) { 639 var d structWithMethod 640 // Force the compiler to construct a wrapper method. 641 wrapper := (*structWithMethod).caller 642 // Check that the wrapper doesn't affect the stack trace. 643 if dc, ic := d.caller(), wrapper(&d); dc != ic { 644 t.Fatalf("direct caller %q != indirect caller %q", dc, ic) 645 } 646 } 647 648 func TestStackWrapperCallers(t *testing.T) { 649 var d structWithMethod 650 wrapper := (*structWithMethod).callers 651 // Check that <autogenerated> doesn't appear in the stack trace. 652 pcs := wrapper(&d) 653 frames := CallersFrames(pcs) 654 for { 655 fr, more := frames.Next() 656 if fr.File == "<autogenerated>" { 657 t.Fatalf("<autogenerated> appears in stack trace: %+v", fr) 658 } 659 if !more { 660 break 661 } 662 } 663 } 664 665 func TestStackWrapperStack(t *testing.T) { 666 var d structWithMethod 667 wrapper := (*structWithMethod).stack 668 // Check that <autogenerated> doesn't appear in the stack trace. 669 stk := wrapper(&d) 670 if strings.Contains(stk, "<autogenerated>") { 671 t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk) 672 } 673 } 674 675 type I interface { 676 M() 677 } 678 679 func TestStackWrapperStackPanic(t *testing.T) { 680 t.Run("sigpanic", func(t *testing.T) { 681 // nil calls to interface methods cause a sigpanic. 682 testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M") 683 }) 684 t.Run("panicwrap", func(t *testing.T) { 685 // Nil calls to value method wrappers call panicwrap. 686 wrapper := (*structWithMethod).nop 687 testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop") 688 }) 689 } 690 691 func testStackWrapperPanic(t *testing.T, cb func(), expect string) { 692 // Test that the stack trace from a panicking wrapper includes 693 // the wrapper, even though elide these when they don't panic. 694 t.Run("CallersFrames", func(t *testing.T) { 695 defer func() { 696 err := recover() 697 if err == nil { 698 t.Fatalf("expected panic") 699 } 700 pcs := make([]uintptr, 10) 701 n := Callers(0, pcs) 702 frames := CallersFrames(pcs[:n]) 703 for { 704 frame, more := frames.Next() 705 t.Log(frame.Function) 706 if frame.Function == expect { 707 return 708 } 709 if !more { 710 break 711 } 712 } 713 t.Fatalf("panicking wrapper %s missing from stack trace", expect) 714 }() 715 cb() 716 }) 717 t.Run("Stack", func(t *testing.T) { 718 defer func() { 719 err := recover() 720 if err == nil { 721 t.Fatalf("expected panic") 722 } 723 buf := make([]byte, 4<<10) 724 stk := string(buf[:Stack(buf, false)]) 725 if !strings.Contains(stk, "\n"+expect) { 726 t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk) 727 } 728 }() 729 cb() 730 }) 731 } 732 733 func TestCallersFromWrapper(t *testing.T) { 734 // Test that invoking CallersFrames on a stack where the first 735 // PC is an autogenerated wrapper keeps the wrapper in the 736 // trace. Normally we elide these, assuming that the wrapper 737 // calls the thing you actually wanted to see, but in this 738 // case we need to keep it. 739 pc := reflect.ValueOf(I.M).Pointer() 740 frames := CallersFrames([]uintptr{pc}) 741 frame, more := frames.Next() 742 if frame.Function != "runtime_test.I.M" { 743 t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function) 744 } 745 if more { 746 t.Fatalf("want 1 frame, got > 1") 747 } 748 } 749 750 func TestTracebackSystemstack(t *testing.T) { 751 if GOARCH == "ppc64" || GOARCH == "ppc64le" { 752 t.Skip("systemstack tail call not implemented on ppc64x") 753 } 754 755 // Test that profiles correctly jump over systemstack, 756 // including nested systemstack calls. 757 pcs := make([]uintptr, 20) 758 pcs = pcs[:TracebackSystemstack(pcs, 5)] 759 // Check that runtime.TracebackSystemstack appears five times 760 // and that we see TestTracebackSystemstack. 761 countIn, countOut := 0, 0 762 frames := CallersFrames(pcs) 763 var tb bytes.Buffer 764 for { 765 frame, more := frames.Next() 766 fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line) 767 switch frame.Function { 768 case "runtime.TracebackSystemstack": 769 countIn++ 770 case "runtime_test.TestTracebackSystemstack": 771 countOut++ 772 } 773 if !more { 774 break 775 } 776 } 777 if countIn != 5 || countOut != 1 { 778 t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String()) 779 } 780 } 781 782 func TestTracebackAncestors(t *testing.T) { 783 goroutineRegex := regexp.MustCompile(`goroutine [0-9]+ \[`) 784 for _, tracebackDepth := range []int{0, 1, 5, 50} { 785 output := runTestProg(t, "testprog", "TracebackAncestors", fmt.Sprintf("GODEBUG=tracebackancestors=%d", tracebackDepth)) 786 787 numGoroutines := 3 788 numFrames := 2 789 ancestorsExpected := numGoroutines 790 if numGoroutines > tracebackDepth { 791 ancestorsExpected = tracebackDepth 792 } 793 794 matches := goroutineRegex.FindAllStringSubmatch(output, -1) 795 if len(matches) != 2 { 796 t.Fatalf("want 2 goroutines, got:\n%s", output) 797 } 798 799 // Check functions in the traceback. 800 fns := []string{"main.recurseThenCallGo", "main.main", "main.printStack", "main.TracebackAncestors"} 801 for _, fn := range fns { 802 if !strings.Contains(output, "\n"+fn+"(") { 803 t.Fatalf("expected %q function in traceback:\n%s", fn, output) 804 } 805 } 806 807 if want, count := "originating from goroutine", ancestorsExpected; strings.Count(output, want) != count { 808 t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output) 809 } 810 811 if want, count := "main.recurseThenCallGo(...)", ancestorsExpected*(numFrames+1); strings.Count(output, want) != count { 812 t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output) 813 } 814 815 if want, count := "main.recurseThenCallGo(0x", 1; strings.Count(output, want) != count { 816 t.Errorf("output does not contain %d instances of %q:\n%s", count, want, output) 817 } 818 } 819 } 820 821 // Test that defer closure is correctly scanned when the stack is scanned. 822 func TestDeferLiveness(t *testing.T) { 823 output := runTestProg(t, "testprog", "DeferLiveness", "GODEBUG=clobberfree=1") 824 if output != "" { 825 t.Errorf("output:\n%s\n\nwant no output", output) 826 } 827 } 828 829 func TestDeferHeapAndStack(t *testing.T) { 830 P := 4 // processors 831 N := 10000 //iterations 832 D := 200 // stack depth 833 834 if testing.Short() { 835 P /= 2 836 N /= 10 837 D /= 10 838 } 839 c := make(chan bool) 840 for p := 0; p < P; p++ { 841 go func() { 842 for i := 0; i < N; i++ { 843 if deferHeapAndStack(D) != 2*D { 844 panic("bad result") 845 } 846 } 847 c <- true 848 }() 849 } 850 for p := 0; p < P; p++ { 851 <-c 852 } 853 } 854 855 // deferHeapAndStack(n) computes 2*n 856 func deferHeapAndStack(n int) (r int) { 857 if n == 0 { 858 return 0 859 } 860 if n%2 == 0 { 861 // heap-allocated defers 862 for i := 0; i < 2; i++ { 863 defer func() { 864 r++ 865 }() 866 } 867 } else { 868 // stack-allocated defers 869 defer func() { 870 r++ 871 }() 872 defer func() { 873 r++ 874 }() 875 } 876 r = deferHeapAndStack(n - 1) 877 escapeMe(new([1024]byte)) // force some GCs 878 return 879 } 880 881 // Pass a value to escapeMe to force it to escape. 882 var escapeMe = func(x any) {} 883 884 // Test that when F -> G is inlined and F is excluded from stack 885 // traces, G still appears. 886 func TestTracebackInlineExcluded(t *testing.T) { 887 defer func() { 888 recover() 889 buf := make([]byte, 4<<10) 890 stk := string(buf[:Stack(buf, false)]) 891 892 t.Log(stk) 893 894 if not := "tracebackExcluded"; strings.Contains(stk, not) { 895 t.Errorf("found but did not expect %q", not) 896 } 897 if want := "tracebackNotExcluded"; !strings.Contains(stk, want) { 898 t.Errorf("expected %q in stack", want) 899 } 900 }() 901 tracebackExcluded() 902 } 903 904 // tracebackExcluded should be excluded from tracebacks. There are 905 // various ways this could come up. Linking it to a "runtime." name is 906 // rather synthetic, but it's easy and reliable. See issue #42754 for 907 // one way this happened in real code. 908 // 909 //go:linkname tracebackExcluded runtime.tracebackExcluded 910 //go:noinline 911 func tracebackExcluded() { 912 // Call an inlined function that should not itself be excluded 913 // from tracebacks. 914 tracebackNotExcluded() 915 } 916 917 // tracebackNotExcluded should be inlined into tracebackExcluded, but 918 // should not itself be excluded from the traceback. 919 func tracebackNotExcluded() { 920 var x *int 921 *x = 0 922 }