github.com/c9s/go@v0.0.0-20180120015821-984e81f64e0c/src/runtime/stack_test.go (about) 1 // Copyright 2012 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime_test 6 7 import ( 8 "bytes" 9 "fmt" 10 "reflect" 11 . "runtime" 12 "strings" 13 "sync" 14 "sync/atomic" 15 "testing" 16 "time" 17 ) 18 19 // TestStackMem measures per-thread stack segment cache behavior. 20 // The test consumed up to 500MB in the past. 21 func TestStackMem(t *testing.T) { 22 const ( 23 BatchSize = 32 24 BatchCount = 256 25 ArraySize = 1024 26 RecursionDepth = 128 27 ) 28 if testing.Short() { 29 return 30 } 31 defer GOMAXPROCS(GOMAXPROCS(BatchSize)) 32 s0 := new(MemStats) 33 ReadMemStats(s0) 34 for b := 0; b < BatchCount; b++ { 35 c := make(chan bool, BatchSize) 36 for i := 0; i < BatchSize; i++ { 37 go func() { 38 var f func(k int, a [ArraySize]byte) 39 f = func(k int, a [ArraySize]byte) { 40 if k == 0 { 41 time.Sleep(time.Millisecond) 42 return 43 } 44 f(k-1, a) 45 } 46 f(RecursionDepth, [ArraySize]byte{}) 47 c <- true 48 }() 49 } 50 for i := 0; i < BatchSize; i++ { 51 <-c 52 } 53 54 // The goroutines have signaled via c that they are ready to exit. 55 // Give them a chance to exit by sleeping. If we don't wait, we 56 // might not reuse them on the next batch. 57 time.Sleep(10 * time.Millisecond) 58 } 59 s1 := new(MemStats) 60 ReadMemStats(s1) 61 consumed := int64(s1.StackSys - s0.StackSys) 62 t.Logf("Consumed %vMB for stack mem", consumed>>20) 63 estimate := int64(8 * BatchSize * ArraySize * RecursionDepth) // 8 is to reduce flakiness. 64 if consumed > estimate { 65 t.Fatalf("Stack mem: want %v, got %v", estimate, consumed) 66 } 67 // Due to broken stack memory accounting (https://golang.org/issue/7468), 68 // StackInuse can decrease during function execution, so we cast the values to int64. 69 inuse := int64(s1.StackInuse) - int64(s0.StackInuse) 70 t.Logf("Inuse %vMB for stack mem", inuse>>20) 71 if inuse > 4<<20 { 72 t.Fatalf("Stack inuse: want %v, got %v", 4<<20, inuse) 73 } 74 } 75 76 // Test stack growing in different contexts. 77 func TestStackGrowth(t *testing.T) { 78 // Don't make this test parallel as this makes the 20 second 79 // timeout unreliable on slow builders. (See issue #19381.) 80 81 var wg sync.WaitGroup 82 83 // in a normal goroutine 84 var growDuration time.Duration // For debugging failures 85 wg.Add(1) 86 go func() { 87 defer wg.Done() 88 start := time.Now() 89 growStack(nil) 90 growDuration = time.Since(start) 91 }() 92 wg.Wait() 93 94 // in locked goroutine 95 wg.Add(1) 96 go func() { 97 defer wg.Done() 98 LockOSThread() 99 growStack(nil) 100 UnlockOSThread() 101 }() 102 wg.Wait() 103 104 // in finalizer 105 wg.Add(1) 106 go func() { 107 defer wg.Done() 108 done := make(chan bool) 109 var startTime time.Time 110 var started, progress uint32 111 go func() { 112 s := new(string) 113 SetFinalizer(s, func(ss *string) { 114 startTime = time.Now() 115 atomic.StoreUint32(&started, 1) 116 growStack(&progress) 117 done <- true 118 }) 119 s = nil 120 done <- true 121 }() 122 <-done 123 GC() 124 select { 125 case <-done: 126 case <-time.After(20 * time.Second): 127 if atomic.LoadUint32(&started) == 0 { 128 t.Log("finalizer did not start") 129 } else { 130 t.Logf("finalizer started %s ago and finished %d iterations", time.Since(startTime), atomic.LoadUint32(&progress)) 131 } 132 t.Log("first growStack took", growDuration) 133 t.Error("finalizer did not run") 134 return 135 } 136 }() 137 wg.Wait() 138 } 139 140 // ... and in init 141 //func init() { 142 // growStack() 143 //} 144 145 func growStack(progress *uint32) { 146 n := 1 << 10 147 if testing.Short() { 148 n = 1 << 8 149 } 150 for i := 0; i < n; i++ { 151 x := 0 152 growStackIter(&x, i) 153 if x != i+1 { 154 panic("stack is corrupted") 155 } 156 if progress != nil { 157 atomic.StoreUint32(progress, uint32(i)) 158 } 159 } 160 GC() 161 } 162 163 // This function is not an anonymous func, so that the compiler can do escape 164 // analysis and place x on stack (and subsequently stack growth update the pointer). 165 func growStackIter(p *int, n int) { 166 if n == 0 { 167 *p = n + 1 168 GC() 169 return 170 } 171 *p = n + 1 172 x := 0 173 growStackIter(&x, n-1) 174 if x != n { 175 panic("stack is corrupted") 176 } 177 } 178 179 func TestStackGrowthCallback(t *testing.T) { 180 t.Parallel() 181 var wg sync.WaitGroup 182 183 // test stack growth at chan op 184 wg.Add(1) 185 go func() { 186 defer wg.Done() 187 c := make(chan int, 1) 188 growStackWithCallback(func() { 189 c <- 1 190 <-c 191 }) 192 }() 193 194 // test stack growth at map op 195 wg.Add(1) 196 go func() { 197 defer wg.Done() 198 m := make(map[int]int) 199 growStackWithCallback(func() { 200 _, _ = m[1] 201 m[1] = 1 202 }) 203 }() 204 205 // test stack growth at goroutine creation 206 wg.Add(1) 207 go func() { 208 defer wg.Done() 209 growStackWithCallback(func() { 210 done := make(chan bool) 211 go func() { 212 done <- true 213 }() 214 <-done 215 }) 216 }() 217 wg.Wait() 218 } 219 220 func growStackWithCallback(cb func()) { 221 var f func(n int) 222 f = func(n int) { 223 if n == 0 { 224 cb() 225 return 226 } 227 f(n - 1) 228 } 229 for i := 0; i < 1<<10; i++ { 230 f(i) 231 } 232 } 233 234 // TestDeferPtrs tests the adjustment of Defer's argument pointers (p aka &y) 235 // during a stack copy. 236 func set(p *int, x int) { 237 *p = x 238 } 239 func TestDeferPtrs(t *testing.T) { 240 var y int 241 242 defer func() { 243 if y != 42 { 244 t.Errorf("defer's stack references were not adjusted appropriately") 245 } 246 }() 247 defer set(&y, 42) 248 growStack(nil) 249 } 250 251 type bigBuf [4 * 1024]byte 252 253 // TestDeferPtrsGoexit is like TestDeferPtrs but exercises the possibility that the 254 // stack grows as part of starting the deferred function. It calls Goexit at various 255 // stack depths, forcing the deferred function (with >4kB of args) to be run at 256 // the bottom of the stack. The goal is to find a stack depth less than 4kB from 257 // the end of the stack. Each trial runs in a different goroutine so that an earlier 258 // stack growth does not invalidate a later attempt. 259 func TestDeferPtrsGoexit(t *testing.T) { 260 for i := 0; i < 100; i++ { 261 c := make(chan int, 1) 262 go testDeferPtrsGoexit(c, i) 263 if n := <-c; n != 42 { 264 t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n) 265 } 266 } 267 } 268 269 func testDeferPtrsGoexit(c chan int, i int) { 270 var y int 271 defer func() { 272 c <- y 273 }() 274 defer setBig(&y, 42, bigBuf{}) 275 useStackAndCall(i, Goexit) 276 } 277 278 func setBig(p *int, x int, b bigBuf) { 279 *p = x 280 } 281 282 // TestDeferPtrsPanic is like TestDeferPtrsGoexit, but it's using panic instead 283 // of Goexit to run the Defers. Those two are different execution paths 284 // in the runtime. 285 func TestDeferPtrsPanic(t *testing.T) { 286 for i := 0; i < 100; i++ { 287 c := make(chan int, 1) 288 go testDeferPtrsGoexit(c, i) 289 if n := <-c; n != 42 { 290 t.Fatalf("defer's stack references were not adjusted appropriately (i=%d n=%d)", i, n) 291 } 292 } 293 } 294 295 func testDeferPtrsPanic(c chan int, i int) { 296 var y int 297 defer func() { 298 if recover() == nil { 299 c <- -1 300 return 301 } 302 c <- y 303 }() 304 defer setBig(&y, 42, bigBuf{}) 305 useStackAndCall(i, func() { panic(1) }) 306 } 307 308 // TestPanicUseStack checks that a chain of Panic structs on the stack are 309 // updated correctly if the stack grows during the deferred execution that 310 // happens as a result of the panic. 311 func TestPanicUseStack(t *testing.T) { 312 pc := make([]uintptr, 10000) 313 defer func() { 314 recover() 315 Callers(0, pc) // force stack walk 316 useStackAndCall(100, func() { 317 defer func() { 318 recover() 319 Callers(0, pc) // force stack walk 320 useStackAndCall(200, func() { 321 defer func() { 322 recover() 323 Callers(0, pc) // force stack walk 324 }() 325 panic(3) 326 }) 327 }() 328 panic(2) 329 }) 330 }() 331 panic(1) 332 } 333 334 func TestPanicFar(t *testing.T) { 335 var xtree *xtreeNode 336 pc := make([]uintptr, 10000) 337 defer func() { 338 // At this point we created a large stack and unwound 339 // it via recovery. Force a stack walk, which will 340 // check the stack's consistency. 341 Callers(0, pc) 342 }() 343 defer func() { 344 recover() 345 }() 346 useStackAndCall(100, func() { 347 // Kick off the GC and make it do something nontrivial. 348 // (This used to force stack barriers to stick around.) 349 xtree = makeTree(18) 350 // Give the GC time to start scanning stacks. 351 time.Sleep(time.Millisecond) 352 panic(1) 353 }) 354 _ = xtree 355 } 356 357 type xtreeNode struct { 358 l, r *xtreeNode 359 } 360 361 func makeTree(d int) *xtreeNode { 362 if d == 0 { 363 return new(xtreeNode) 364 } 365 return &xtreeNode{makeTree(d - 1), makeTree(d - 1)} 366 } 367 368 // use about n KB of stack and call f 369 func useStackAndCall(n int, f func()) { 370 if n == 0 { 371 f() 372 return 373 } 374 var b [1024]byte // makes frame about 1KB 375 useStackAndCall(n-1+int(b[99]), f) 376 } 377 378 func useStack(n int) { 379 useStackAndCall(n, func() {}) 380 } 381 382 func growing(c chan int, done chan struct{}) { 383 for n := range c { 384 useStack(n) 385 done <- struct{}{} 386 } 387 done <- struct{}{} 388 } 389 390 func TestStackCache(t *testing.T) { 391 // Allocate a bunch of goroutines and grow their stacks. 392 // Repeat a few times to test the stack cache. 393 const ( 394 R = 4 395 G = 200 396 S = 5 397 ) 398 for i := 0; i < R; i++ { 399 var reqchans [G]chan int 400 done := make(chan struct{}) 401 for j := 0; j < G; j++ { 402 reqchans[j] = make(chan int) 403 go growing(reqchans[j], done) 404 } 405 for s := 0; s < S; s++ { 406 for j := 0; j < G; j++ { 407 reqchans[j] <- 1 << uint(s) 408 } 409 for j := 0; j < G; j++ { 410 <-done 411 } 412 } 413 for j := 0; j < G; j++ { 414 close(reqchans[j]) 415 } 416 for j := 0; j < G; j++ { 417 <-done 418 } 419 } 420 } 421 422 func TestStackOutput(t *testing.T) { 423 b := make([]byte, 1024) 424 stk := string(b[:Stack(b, false)]) 425 if !strings.HasPrefix(stk, "goroutine ") { 426 t.Errorf("Stack (len %d):\n%s", len(stk), stk) 427 t.Errorf("Stack output should begin with \"goroutine \"") 428 } 429 } 430 431 func TestStackAllOutput(t *testing.T) { 432 b := make([]byte, 1024) 433 stk := string(b[:Stack(b, true)]) 434 if !strings.HasPrefix(stk, "goroutine ") { 435 t.Errorf("Stack (len %d):\n%s", len(stk), stk) 436 t.Errorf("Stack output should begin with \"goroutine \"") 437 } 438 } 439 440 func TestStackPanic(t *testing.T) { 441 // Test that stack copying copies panics correctly. This is difficult 442 // to test because it is very unlikely that the stack will be copied 443 // in the middle of gopanic. But it can happen. 444 // To make this test effective, edit panic.go:gopanic and uncomment 445 // the GC() call just before freedefer(d). 446 defer func() { 447 if x := recover(); x == nil { 448 t.Errorf("recover failed") 449 } 450 }() 451 useStack(32) 452 panic("test panic") 453 } 454 455 func BenchmarkStackCopy(b *testing.B) { 456 c := make(chan bool) 457 for i := 0; i < b.N; i++ { 458 go func() { 459 count(1000000) 460 c <- true 461 }() 462 <-c 463 } 464 } 465 466 func count(n int) int { 467 if n == 0 { 468 return 0 469 } 470 return 1 + count(n-1) 471 } 472 473 func BenchmarkStackCopyNoCache(b *testing.B) { 474 c := make(chan bool) 475 for i := 0; i < b.N; i++ { 476 go func() { 477 count1(1000000) 478 c <- true 479 }() 480 <-c 481 } 482 } 483 484 func count1(n int) int { 485 if n == 0 { 486 return 0 487 } 488 return 1 + count2(n-1) 489 } 490 491 func count2(n int) int { 492 if n == 0 { 493 return 0 494 } 495 return 1 + count3(n-1) 496 } 497 498 func count3(n int) int { 499 if n == 0 { 500 return 0 501 } 502 return 1 + count4(n-1) 503 } 504 505 func count4(n int) int { 506 if n == 0 { 507 return 0 508 } 509 return 1 + count5(n-1) 510 } 511 512 func count5(n int) int { 513 if n == 0 { 514 return 0 515 } 516 return 1 + count6(n-1) 517 } 518 519 func count6(n int) int { 520 if n == 0 { 521 return 0 522 } 523 return 1 + count7(n-1) 524 } 525 526 func count7(n int) int { 527 if n == 0 { 528 return 0 529 } 530 return 1 + count8(n-1) 531 } 532 533 func count8(n int) int { 534 if n == 0 { 535 return 0 536 } 537 return 1 + count9(n-1) 538 } 539 540 func count9(n int) int { 541 if n == 0 { 542 return 0 543 } 544 return 1 + count10(n-1) 545 } 546 547 func count10(n int) int { 548 if n == 0 { 549 return 0 550 } 551 return 1 + count11(n-1) 552 } 553 554 func count11(n int) int { 555 if n == 0 { 556 return 0 557 } 558 return 1 + count12(n-1) 559 } 560 561 func count12(n int) int { 562 if n == 0 { 563 return 0 564 } 565 return 1 + count13(n-1) 566 } 567 568 func count13(n int) int { 569 if n == 0 { 570 return 0 571 } 572 return 1 + count14(n-1) 573 } 574 575 func count14(n int) int { 576 if n == 0 { 577 return 0 578 } 579 return 1 + count15(n-1) 580 } 581 582 func count15(n int) int { 583 if n == 0 { 584 return 0 585 } 586 return 1 + count16(n-1) 587 } 588 589 func count16(n int) int { 590 if n == 0 { 591 return 0 592 } 593 return 1 + count17(n-1) 594 } 595 596 func count17(n int) int { 597 if n == 0 { 598 return 0 599 } 600 return 1 + count18(n-1) 601 } 602 603 func count18(n int) int { 604 if n == 0 { 605 return 0 606 } 607 return 1 + count19(n-1) 608 } 609 610 func count19(n int) int { 611 if n == 0 { 612 return 0 613 } 614 return 1 + count20(n-1) 615 } 616 617 func count20(n int) int { 618 if n == 0 { 619 return 0 620 } 621 return 1 + count21(n-1) 622 } 623 624 func count21(n int) int { 625 if n == 0 { 626 return 0 627 } 628 return 1 + count22(n-1) 629 } 630 631 func count22(n int) int { 632 if n == 0 { 633 return 0 634 } 635 return 1 + count23(n-1) 636 } 637 638 func count23(n int) int { 639 if n == 0 { 640 return 0 641 } 642 return 1 + count1(n-1) 643 } 644 645 type structWithMethod struct{} 646 647 func (s structWithMethod) caller() string { 648 _, file, line, ok := Caller(1) 649 if !ok { 650 panic("Caller failed") 651 } 652 return fmt.Sprintf("%s:%d", file, line) 653 } 654 655 func (s structWithMethod) callers() []uintptr { 656 pc := make([]uintptr, 16) 657 return pc[:Callers(0, pc)] 658 } 659 660 func (s structWithMethod) stack() string { 661 buf := make([]byte, 4<<10) 662 return string(buf[:Stack(buf, false)]) 663 } 664 665 func (s structWithMethod) nop() {} 666 667 func TestStackWrapperCaller(t *testing.T) { 668 var d structWithMethod 669 // Force the compiler to construct a wrapper method. 670 wrapper := (*structWithMethod).caller 671 // Check that the wrapper doesn't affect the stack trace. 672 if dc, ic := d.caller(), wrapper(&d); dc != ic { 673 t.Fatalf("direct caller %q != indirect caller %q", dc, ic) 674 } 675 } 676 677 func TestStackWrapperCallers(t *testing.T) { 678 var d structWithMethod 679 wrapper := (*structWithMethod).callers 680 // Check that <autogenerated> doesn't appear in the stack trace. 681 pcs := wrapper(&d) 682 frames := CallersFrames(pcs) 683 for { 684 fr, more := frames.Next() 685 if fr.File == "<autogenerated>" { 686 t.Fatalf("<autogenerated> appears in stack trace: %+v", fr) 687 } 688 if !more { 689 break 690 } 691 } 692 } 693 694 func TestStackWrapperStack(t *testing.T) { 695 var d structWithMethod 696 wrapper := (*structWithMethod).stack 697 // Check that <autogenerated> doesn't appear in the stack trace. 698 stk := wrapper(&d) 699 if strings.Contains(stk, "<autogenerated>") { 700 t.Fatalf("<autogenerated> appears in stack trace:\n%s", stk) 701 } 702 } 703 704 type I interface { 705 M() 706 } 707 708 func TestStackWrapperStackPanic(t *testing.T) { 709 t.Run("sigpanic", func(t *testing.T) { 710 // nil calls to interface methods cause a sigpanic. 711 testStackWrapperPanic(t, func() { I.M(nil) }, "runtime_test.I.M") 712 }) 713 t.Run("panicwrap", func(t *testing.T) { 714 // Nil calls to value method wrappers call panicwrap. 715 wrapper := (*structWithMethod).nop 716 testStackWrapperPanic(t, func() { wrapper(nil) }, "runtime_test.(*structWithMethod).nop") 717 }) 718 } 719 720 func testStackWrapperPanic(t *testing.T, cb func(), expect string) { 721 // Test that the stack trace from a panicking wrapper includes 722 // the wrapper, even though elide these when they don't panic. 723 t.Run("CallersFrames", func(t *testing.T) { 724 defer func() { 725 err := recover() 726 if err == nil { 727 t.Fatalf("expected panic") 728 } 729 pcs := make([]uintptr, 10) 730 n := Callers(0, pcs) 731 frames := CallersFrames(pcs[:n]) 732 for { 733 frame, more := frames.Next() 734 t.Log(frame.Function) 735 if frame.Function == expect { 736 return 737 } 738 if !more { 739 break 740 } 741 } 742 t.Fatalf("panicking wrapper %s missing from stack trace", expect) 743 }() 744 cb() 745 }) 746 t.Run("Stack", func(t *testing.T) { 747 defer func() { 748 err := recover() 749 if err == nil { 750 t.Fatalf("expected panic") 751 } 752 buf := make([]byte, 4<<10) 753 stk := string(buf[:Stack(buf, false)]) 754 if !strings.Contains(stk, "\n"+expect) { 755 t.Fatalf("panicking wrapper %s missing from stack trace:\n%s", expect, stk) 756 } 757 }() 758 cb() 759 }) 760 } 761 762 func TestCallersFromWrapper(t *testing.T) { 763 // Test that invoking CallersFrames on a stack where the first 764 // PC is an autogenerated wrapper keeps the wrapper in the 765 // trace. Normally we elide these, assuming that the wrapper 766 // calls the thing you actually wanted to see, but in this 767 // case we need to keep it. 768 pc := reflect.ValueOf(I.M).Pointer() 769 frames := CallersFrames([]uintptr{pc}) 770 frame, more := frames.Next() 771 if frame.Function != "runtime_test.I.M" { 772 t.Fatalf("want function %s, got %s", "runtime_test.I.M", frame.Function) 773 } 774 if more { 775 t.Fatalf("want 1 frame, got > 1") 776 } 777 } 778 779 func TestTracebackSystemstack(t *testing.T) { 780 if GOARCH == "ppc64" || GOARCH == "ppc64le" { 781 t.Skip("systemstack tail call not implemented on ppc64x") 782 } 783 784 // Test that profiles correctly jump over systemstack, 785 // including nested systemstack calls. 786 pcs := make([]uintptr, 20) 787 pcs = pcs[:TracebackSystemstack(pcs, 5)] 788 // Check that runtime.TracebackSystemstack appears five times 789 // and that we see TestTracebackSystemstack. 790 countIn, countOut := 0, 0 791 frames := CallersFrames(pcs) 792 var tb bytes.Buffer 793 for { 794 frame, more := frames.Next() 795 fmt.Fprintf(&tb, "\n%s+0x%x %s:%d", frame.Function, frame.PC-frame.Entry, frame.File, frame.Line) 796 switch frame.Function { 797 case "runtime.TracebackSystemstack": 798 countIn++ 799 case "runtime_test.TestTracebackSystemstack": 800 countOut++ 801 } 802 if !more { 803 break 804 } 805 } 806 if countIn != 5 || countOut != 1 { 807 t.Fatalf("expected 5 calls to TracebackSystemstack and 1 call to TestTracebackSystemstack, got:%s", tb.String()) 808 } 809 }