github.com/c9s/go@v0.0.0-20180120015821-984e81f64e0c/src/runtime/traceback.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // The code in this file implements stack trace walking for all architectures. 14 // The most important fact about a given architecture is whether it uses a link register. 15 // On systems with link registers, the prologue for a non-leaf function stores the 16 // incoming value of LR at the bottom of the newly allocated stack frame. 17 // On systems without link registers, the architecture pushes a return PC during 18 // the call instruction, so the return PC ends up above the stack frame. 19 // In this file, the return PC is always called LR, no matter how it was found. 20 // 21 // To date, the opposite of a link register architecture is an x86 architecture. 22 // This code may need to change if some other kind of non-link-register 23 // architecture comes along. 24 // 25 // The other important fact is the size of a pointer: on 32-bit systems the LR 26 // takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes. 27 // Typically this is ptrSize. 28 // 29 // As an exception, amd64p32 has ptrSize == 4 but the CALL instruction still 30 // stores an 8-byte return PC onto the stack. To accommodate this, we use regSize 31 // as the size of the architecture-pushed return PC. 32 // 33 // usesLR is defined below in terms of minFrameSize, which is defined in 34 // arch_$GOARCH.go. ptrSize and regSize are defined in stubs.go. 35 36 const usesLR = sys.MinFrameSize > 0 37 38 var ( 39 // initialized in tracebackinit 40 goexitPC uintptr 41 jmpdeferPC uintptr 42 mcallPC uintptr 43 morestackPC uintptr 44 mstartPC uintptr 45 rt0_goPC uintptr 46 sigpanicPC uintptr 47 runfinqPC uintptr 48 bgsweepPC uintptr 49 forcegchelperPC uintptr 50 timerprocPC uintptr 51 gcBgMarkWorkerPC uintptr 52 systemstack_switchPC uintptr 53 systemstackPC uintptr 54 cgocallback_gofuncPC uintptr 55 skipPC uintptr 56 57 gogoPC uintptr 58 59 externalthreadhandlerp uintptr // initialized elsewhere 60 ) 61 62 func tracebackinit() { 63 // Go variable initialization happens late during runtime startup. 64 // Instead of initializing the variables above in the declarations, 65 // schedinit calls this function so that the variables are 66 // initialized and available earlier in the startup sequence. 67 goexitPC = funcPC(goexit) 68 jmpdeferPC = funcPC(jmpdefer) 69 mcallPC = funcPC(mcall) 70 morestackPC = funcPC(morestack) 71 mstartPC = funcPC(mstart) 72 rt0_goPC = funcPC(rt0_go) 73 sigpanicPC = funcPC(sigpanic) 74 runfinqPC = funcPC(runfinq) 75 bgsweepPC = funcPC(bgsweep) 76 forcegchelperPC = funcPC(forcegchelper) 77 timerprocPC = funcPC(timerproc) 78 gcBgMarkWorkerPC = funcPC(gcBgMarkWorker) 79 systemstack_switchPC = funcPC(systemstack_switch) 80 systemstackPC = funcPC(systemstack) 81 cgocallback_gofuncPC = funcPC(cgocallback_gofunc) 82 skipPC = funcPC(skipPleaseUseCallersFrames) 83 84 // used by sigprof handler 85 gogoPC = funcPC(gogo) 86 } 87 88 // Traceback over the deferred function calls. 89 // Report them like calls that have been invoked but not started executing yet. 90 func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) { 91 var frame stkframe 92 for d := gp._defer; d != nil; d = d.link { 93 fn := d.fn 94 if fn == nil { 95 // Defer of nil function. Args don't matter. 96 frame.pc = 0 97 frame.fn = funcInfo{} 98 frame.argp = 0 99 frame.arglen = 0 100 frame.argmap = nil 101 } else { 102 frame.pc = fn.fn 103 f := findfunc(frame.pc) 104 if !f.valid() { 105 print("runtime: unknown pc in defer ", hex(frame.pc), "\n") 106 throw("unknown pc") 107 } 108 frame.fn = f 109 frame.argp = uintptr(deferArgs(d)) 110 frame.arglen, frame.argmap = getArgInfo(&frame, f, true, fn) 111 } 112 frame.continpc = frame.pc 113 if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) { 114 return 115 } 116 } 117 } 118 119 const sizeofSkipFunction = 256 120 121 // This function is defined in asm.s to be sizeofSkipFunction bytes long. 122 func skipPleaseUseCallersFrames() 123 124 // Generic traceback. Handles runtime stack prints (pcbuf == nil), 125 // the runtime.Callers function (pcbuf != nil), as well as the garbage 126 // collector (callback != nil). A little clunky to merge these, but avoids 127 // duplicating the code and all its subtlety. 128 // 129 // The skip argument is only valid with pcbuf != nil and counts the number 130 // of logical frames to skip rather than physical frames (with inlining, a 131 // PC in pcbuf can represent multiple calls). If a PC is partially skipped 132 // and max > 1, pcbuf[1] will be runtime.skipPleaseUseCallersFrames+N where 133 // N indicates the number of logical frames to skip in pcbuf[0]. 134 func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int { 135 if skip > 0 && callback != nil { 136 throw("gentraceback callback cannot be used with non-zero skip") 137 } 138 if goexitPC == 0 { 139 throw("gentraceback before goexitPC initialization") 140 } 141 g := getg() 142 if g == gp && g == g.m.curg { 143 // The starting sp has been passed in as a uintptr, and the caller may 144 // have other uintptr-typed stack references as well. 145 // If during one of the calls that got us here or during one of the 146 // callbacks below the stack must be grown, all these uintptr references 147 // to the stack will not be updated, and gentraceback will continue 148 // to inspect the old stack memory, which may no longer be valid. 149 // Even if all the variables were updated correctly, it is not clear that 150 // we want to expose a traceback that begins on one stack and ends 151 // on another stack. That could confuse callers quite a bit. 152 // Instead, we require that gentraceback and any other function that 153 // accepts an sp for the current goroutine (typically obtained by 154 // calling getcallersp) must not run on that goroutine's stack but 155 // instead on the g0 stack. 156 throw("gentraceback cannot trace user goroutine on its own stack") 157 } 158 level, _, _ := gotraceback() 159 160 if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp. 161 if gp.syscallsp != 0 { 162 pc0 = gp.syscallpc 163 sp0 = gp.syscallsp 164 if usesLR { 165 lr0 = 0 166 } 167 } else { 168 pc0 = gp.sched.pc 169 sp0 = gp.sched.sp 170 if usesLR { 171 lr0 = gp.sched.lr 172 } 173 } 174 } 175 176 nprint := 0 177 var frame stkframe 178 frame.pc = pc0 179 frame.sp = sp0 180 if usesLR { 181 frame.lr = lr0 182 } 183 waspanic := false 184 cgoCtxt := gp.cgoCtxt 185 printing := pcbuf == nil && callback == nil 186 _defer := gp._defer 187 elideWrapper := false 188 189 for _defer != nil && _defer.sp == _NoArgs { 190 _defer = _defer.link 191 } 192 193 // If the PC is zero, it's likely a nil function call. 194 // Start in the caller's frame. 195 if frame.pc == 0 { 196 if usesLR { 197 frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp)) 198 frame.lr = 0 199 } else { 200 frame.pc = uintptr(*(*sys.Uintreg)(unsafe.Pointer(frame.sp))) 201 frame.sp += sys.RegSize 202 } 203 } 204 205 f := findfunc(frame.pc) 206 if !f.valid() { 207 if callback != nil { 208 print("runtime: unknown pc ", hex(frame.pc), "\n") 209 throw("unknown pc") 210 } 211 return 0 212 } 213 frame.fn = f 214 215 var cache pcvalueCache 216 217 n := 0 218 for n < max { 219 // Typically: 220 // pc is the PC of the running function. 221 // sp is the stack pointer at that program counter. 222 // fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown. 223 // stk is the stack containing sp. 224 // The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp. 225 f = frame.fn 226 if f.pcsp == 0 { 227 // No frame information, must be external function, like race support. 228 // See golang.org/issue/13568. 229 break 230 } 231 232 // Found an actual function. 233 // Derive frame pointer and link register. 234 if frame.fp == 0 { 235 // We want to jump over the systemstack switch. If we're running on the 236 // g0, this systemstack is at the top of the stack. 237 // if we're not on g0 or there's a no curg, then this is a regular call. 238 sp := frame.sp 239 if flags&_TraceJumpStack != 0 && f.entry == systemstackPC && gp == g.m.g0 && gp.m.curg != nil { 240 sp = gp.m.curg.sched.sp 241 frame.sp = sp 242 cgoCtxt = gp.m.curg.cgoCtxt 243 } 244 frame.fp = sp + uintptr(funcspdelta(f, frame.pc, &cache)) 245 if !usesLR { 246 // On x86, call instruction pushes return PC before entering new function. 247 frame.fp += sys.RegSize 248 } 249 } 250 var flr funcInfo 251 if topofstack(f) { 252 frame.lr = 0 253 flr = funcInfo{} 254 } else if usesLR && f.entry == jmpdeferPC { 255 // jmpdefer modifies SP/LR/PC non-atomically. 256 // If a profiling interrupt arrives during jmpdefer, 257 // the stack unwind may see a mismatched register set 258 // and get confused. Stop if we see PC within jmpdefer 259 // to avoid that confusion. 260 // See golang.org/issue/8153. 261 if callback != nil { 262 throw("traceback_arm: found jmpdefer when tracing with callback") 263 } 264 frame.lr = 0 265 } else { 266 var lrPtr uintptr 267 if usesLR { 268 if n == 0 && frame.sp < frame.fp || frame.lr == 0 { 269 lrPtr = frame.sp 270 frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr)) 271 } 272 } else { 273 if frame.lr == 0 { 274 lrPtr = frame.fp - sys.RegSize 275 frame.lr = uintptr(*(*sys.Uintreg)(unsafe.Pointer(lrPtr))) 276 } 277 } 278 flr = findfunc(frame.lr) 279 if !flr.valid() { 280 // This happens if you get a profiling interrupt at just the wrong time. 281 // In that context it is okay to stop early. 282 // But if callback is set, we're doing a garbage collection and must 283 // get everything, so crash loudly. 284 if callback != nil { 285 print("runtime: unexpected return pc for ", funcname(f), " called from ", hex(frame.lr), "\n") 286 throw("unknown caller pc") 287 } 288 } 289 } 290 291 frame.varp = frame.fp 292 if !usesLR { 293 // On x86, call instruction pushes return PC before entering new function. 294 frame.varp -= sys.RegSize 295 } 296 297 // If framepointer_enabled and there's a frame, then 298 // there's a saved bp here. 299 if framepointer_enabled && GOARCH == "amd64" && frame.varp > frame.sp { 300 frame.varp -= sys.RegSize 301 } 302 303 // Derive size of arguments. 304 // Most functions have a fixed-size argument block, 305 // so we can use metadata about the function f. 306 // Not all, though: there are some variadic functions 307 // in package runtime and reflect, and for those we use call-specific 308 // metadata recorded by f's caller. 309 if callback != nil || printing { 310 frame.argp = frame.fp + sys.MinFrameSize 311 frame.arglen, frame.argmap = getArgInfo(&frame, f, callback != nil, nil) 312 } 313 314 // Determine frame's 'continuation PC', where it can continue. 315 // Normally this is the return address on the stack, but if sigpanic 316 // is immediately below this function on the stack, then the frame 317 // stopped executing due to a trap, and frame.pc is probably not 318 // a safe point for looking up liveness information. In this panicking case, 319 // the function either doesn't return at all (if it has no defers or if the 320 // defers do not recover) or it returns from one of the calls to 321 // deferproc a second time (if the corresponding deferred func recovers). 322 // It suffices to assume that the most recent deferproc is the one that 323 // returns; everything live at earlier deferprocs is still live at that one. 324 frame.continpc = frame.pc 325 if waspanic { 326 if _defer != nil && _defer.sp == frame.sp { 327 frame.continpc = _defer.pc 328 } else { 329 frame.continpc = 0 330 } 331 } 332 333 // Unwind our local defer stack past this frame. 334 for _defer != nil && (_defer.sp == frame.sp || _defer.sp == _NoArgs) { 335 _defer = _defer.link 336 } 337 338 if callback != nil { 339 if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) { 340 return n 341 } 342 } 343 344 if pcbuf != nil { 345 if skip == 0 { 346 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = frame.pc 347 } else { 348 // backup to CALL instruction to read inlining info (same logic as below) 349 tracepc := frame.pc 350 if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic { 351 tracepc-- 352 } 353 inldata := funcdata(f, _FUNCDATA_InlTree) 354 355 // no inlining info, skip the physical frame 356 if inldata == nil { 357 skip-- 358 goto skipped 359 } 360 361 ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache) 362 inltree := (*[1 << 20]inlinedCall)(inldata) 363 // skip the logical (inlined) frames 364 logicalSkipped := 0 365 for ix >= 0 && skip > 0 { 366 skip-- 367 logicalSkipped++ 368 ix = inltree[ix].parent 369 } 370 371 // skip the physical frame if there's more to skip 372 if skip > 0 { 373 skip-- 374 goto skipped 375 } 376 377 // now we have a partially skipped frame 378 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = frame.pc 379 380 // if there's room, pcbuf[1] is a skip PC that encodes the number of skipped frames in pcbuf[0] 381 if n+1 < max { 382 n++ 383 skipPC := funcPC(skipPleaseUseCallersFrames) + uintptr(logicalSkipped) 384 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = skipPC 385 } 386 } 387 } 388 389 if printing { 390 // assume skip=0 for printing. 391 // 392 // Never elide wrappers if we haven't printed 393 // any frames. And don't elide wrappers that 394 // called panic rather than the wrapped 395 // function. Otherwise, leave them out. 396 name := funcname(f) 397 nextElideWrapper := elideWrapperCalling(name) 398 if (flags&_TraceRuntimeFrames) != 0 || showframe(f, gp, nprint == 0, elideWrapper && nprint != 0) { 399 // Print during crash. 400 // main(0x1, 0x2, 0x3) 401 // /home/rsc/go/src/runtime/x.go:23 +0xf 402 // 403 tracepc := frame.pc // back up to CALL instruction for funcline. 404 if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic { 405 tracepc-- 406 } 407 file, line := funcline(f, tracepc) 408 inldata := funcdata(f, _FUNCDATA_InlTree) 409 if inldata != nil { 410 inltree := (*[1 << 20]inlinedCall)(inldata) 411 ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, nil) 412 for ix != -1 { 413 name := funcnameFromNameoff(f, inltree[ix].func_) 414 print(name, "(...)\n") 415 print("\t", file, ":", line, "\n") 416 417 file = funcfile(f, inltree[ix].file) 418 line = inltree[ix].line 419 ix = inltree[ix].parent 420 } 421 } 422 if name == "runtime.gopanic" { 423 name = "panic" 424 } 425 print(name, "(") 426 argp := (*[100]uintptr)(unsafe.Pointer(frame.argp)) 427 for i := uintptr(0); i < frame.arglen/sys.PtrSize; i++ { 428 if i >= 10 { 429 print(", ...") 430 break 431 } 432 if i != 0 { 433 print(", ") 434 } 435 print(hex(argp[i])) 436 } 437 print(")\n") 438 print("\t", file, ":", line) 439 if frame.pc > f.entry { 440 print(" +", hex(frame.pc-f.entry)) 441 } 442 if g.m.throwing > 0 && gp == g.m.curg || level >= 2 { 443 print(" fp=", hex(frame.fp), " sp=", hex(frame.sp), " pc=", hex(frame.pc)) 444 } 445 print("\n") 446 nprint++ 447 } 448 elideWrapper = nextElideWrapper 449 } 450 n++ 451 452 skipped: 453 if f.entry == cgocallback_gofuncPC && len(cgoCtxt) > 0 { 454 ctxt := cgoCtxt[len(cgoCtxt)-1] 455 cgoCtxt = cgoCtxt[:len(cgoCtxt)-1] 456 457 // skip only applies to Go frames. 458 // callback != nil only used when we only care 459 // about Go frames. 460 if skip == 0 && callback == nil { 461 n = tracebackCgoContext(pcbuf, printing, ctxt, n, max) 462 } 463 } 464 465 waspanic = f.entry == sigpanicPC 466 467 // Do not unwind past the bottom of the stack. 468 if !flr.valid() { 469 break 470 } 471 472 // Unwind to next frame. 473 frame.fn = flr 474 frame.pc = frame.lr 475 frame.lr = 0 476 frame.sp = frame.fp 477 frame.fp = 0 478 frame.argmap = nil 479 480 // On link register architectures, sighandler saves the LR on stack 481 // before faking a call to sigpanic. 482 if usesLR && waspanic { 483 x := *(*uintptr)(unsafe.Pointer(frame.sp)) 484 frame.sp += sys.MinFrameSize 485 if GOARCH == "arm64" { 486 // arm64 needs 16-byte aligned SP, always 487 frame.sp += sys.PtrSize 488 } 489 f = findfunc(frame.pc) 490 frame.fn = f 491 if !f.valid() { 492 frame.pc = x 493 } else if funcspdelta(f, frame.pc, &cache) == 0 { 494 frame.lr = x 495 } 496 } 497 } 498 499 if printing { 500 n = nprint 501 } 502 503 // If callback != nil, we're being called to gather stack information during 504 // garbage collection or stack growth. In that context, require that we used 505 // up the entire defer stack. If not, then there is a bug somewhere and the 506 // garbage collection or stack growth may not have seen the correct picture 507 // of the stack. Crash now instead of silently executing the garbage collection 508 // or stack copy incorrectly and setting up for a mysterious crash later. 509 // 510 // Note that panic != nil is okay here: there can be leftover panics, 511 // because the defers on the panic stack do not nest in frame order as 512 // they do on the defer stack. If you have: 513 // 514 // frame 1 defers d1 515 // frame 2 defers d2 516 // frame 3 defers d3 517 // frame 4 panics 518 // frame 4's panic starts running defers 519 // frame 5, running d3, defers d4 520 // frame 5 panics 521 // frame 5's panic starts running defers 522 // frame 6, running d4, garbage collects 523 // frame 6, running d2, garbage collects 524 // 525 // During the execution of d4, the panic stack is d4 -> d3, which 526 // is nested properly, and we'll treat frame 3 as resumable, because we 527 // can find d3. (And in fact frame 3 is resumable. If d4 recovers 528 // and frame 5 continues running, d3, d3 can recover and we'll 529 // resume execution in (returning from) frame 3.) 530 // 531 // During the execution of d2, however, the panic stack is d2 -> d3, 532 // which is inverted. The scan will match d2 to frame 2 but having 533 // d2 on the stack until then means it will not match d3 to frame 3. 534 // This is okay: if we're running d2, then all the defers after d2 have 535 // completed and their corresponding frames are dead. Not finding d3 536 // for frame 3 means we'll set frame 3's continpc == 0, which is correct 537 // (frame 3 is dead). At the end of the walk the panic stack can thus 538 // contain defers (d3 in this case) for dead frames. The inversion here 539 // always indicates a dead frame, and the effect of the inversion on the 540 // scan is to hide those dead frames, so the scan is still okay: 541 // what's left on the panic stack are exactly (and only) the dead frames. 542 // 543 // We require callback != nil here because only when callback != nil 544 // do we know that gentraceback is being called in a "must be correct" 545 // context as opposed to a "best effort" context. The tracebacks with 546 // callbacks only happen when everything is stopped nicely. 547 // At other times, such as when gathering a stack for a profiling signal 548 // or when printing a traceback during a crash, everything may not be 549 // stopped nicely, and the stack walk may not be able to complete. 550 // It's okay in those situations not to use up the entire defer stack: 551 // incomplete information then is still better than nothing. 552 if callback != nil && n < max && _defer != nil { 553 if _defer != nil { 554 print("runtime: g", gp.goid, ": leftover defer sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n") 555 } 556 for _defer = gp._defer; _defer != nil; _defer = _defer.link { 557 print("\tdefer ", _defer, " sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n") 558 } 559 throw("traceback has leftover defers") 560 } 561 562 if callback != nil && n < max && frame.sp != gp.stktopsp { 563 print("runtime: g", gp.goid, ": frame.sp=", hex(frame.sp), " top=", hex(gp.stktopsp), "\n") 564 print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "] n=", n, " max=", max, "\n") 565 throw("traceback did not unwind completely") 566 } 567 568 return n 569 } 570 571 // reflectMethodValue is a partial duplicate of reflect.makeFuncImpl 572 // and reflect.methodValue. 573 type reflectMethodValue struct { 574 fn uintptr 575 stack *bitvector // args bitmap 576 } 577 578 // getArgInfo returns the argument frame information for a call to f 579 // with call frame frame. 580 // 581 // This is used for both actual calls with active stack frames and for 582 // deferred calls that are not yet executing. If this is an actual 583 // call, ctxt must be nil (getArgInfo will retrieve what it needs from 584 // the active stack frame). If this is a deferred call, ctxt must be 585 // the function object that was deferred. 586 func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (arglen uintptr, argmap *bitvector) { 587 arglen = uintptr(f.args) 588 if needArgMap && f.args == _ArgsSizeUnknown { 589 // Extract argument bitmaps for reflect stubs from the calls they made to reflect. 590 switch funcname(f) { 591 case "reflect.makeFuncStub", "reflect.methodValueCall": 592 // These take a *reflect.methodValue as their 593 // context register. 594 var mv *reflectMethodValue 595 if ctxt != nil { 596 // This is not an actual call, but a 597 // deferred call. The function value 598 // is itself the *reflect.methodValue. 599 mv = (*reflectMethodValue)(unsafe.Pointer(ctxt)) 600 } else { 601 // This is a real call that took the 602 // *reflect.methodValue as its context 603 // register and immediately saved it 604 // to 0(SP). Get the methodValue from 605 // 0(SP). 606 arg0 := frame.sp + sys.MinFrameSize 607 mv = *(**reflectMethodValue)(unsafe.Pointer(arg0)) 608 } 609 if mv.fn != f.entry { 610 print("runtime: confused by ", funcname(f), "\n") 611 throw("reflect mismatch") 612 } 613 bv := mv.stack 614 arglen = uintptr(bv.n * sys.PtrSize) 615 argmap = bv 616 } 617 } 618 return 619 } 620 621 // tracebackCgoContext handles tracing back a cgo context value, from 622 // the context argument to setCgoTraceback, for the gentraceback 623 // function. It returns the new value of n. 624 func tracebackCgoContext(pcbuf *uintptr, printing bool, ctxt uintptr, n, max int) int { 625 var cgoPCs [32]uintptr 626 cgoContextPCs(ctxt, cgoPCs[:]) 627 var arg cgoSymbolizerArg 628 anySymbolized := false 629 for _, pc := range cgoPCs { 630 if pc == 0 || n >= max { 631 break 632 } 633 if pcbuf != nil { 634 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc 635 } 636 if printing { 637 if cgoSymbolizer == nil { 638 print("non-Go function at pc=", hex(pc), "\n") 639 } else { 640 c := printOneCgoTraceback(pc, max-n, &arg) 641 n += c - 1 // +1 a few lines down 642 anySymbolized = true 643 } 644 } 645 n++ 646 } 647 if anySymbolized { 648 arg.pc = 0 649 callCgoSymbolizer(&arg) 650 } 651 return n 652 } 653 654 func printcreatedby(gp *g) { 655 // Show what created goroutine, except main goroutine (goid 1). 656 pc := gp.gopc 657 f := findfunc(pc) 658 if f.valid() && showframe(f, gp, false, false) && gp.goid != 1 { 659 print("created by ", funcname(f), "\n") 660 tracepc := pc // back up to CALL instruction for funcline. 661 if pc > f.entry { 662 tracepc -= sys.PCQuantum 663 } 664 file, line := funcline(f, tracepc) 665 print("\t", file, ":", line) 666 if pc > f.entry { 667 print(" +", hex(pc-f.entry)) 668 } 669 print("\n") 670 } 671 } 672 673 func traceback(pc, sp, lr uintptr, gp *g) { 674 traceback1(pc, sp, lr, gp, 0) 675 } 676 677 // tracebacktrap is like traceback but expects that the PC and SP were obtained 678 // from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or getcallerpc/getcallersp. 679 // Because they are from a trap instead of from a saved pair, 680 // the initial PC must not be rewound to the previous instruction. 681 // (All the saved pairs record a PC that is a return address, so we 682 // rewind it into the CALL instruction.) 683 func tracebacktrap(pc, sp, lr uintptr, gp *g) { 684 traceback1(pc, sp, lr, gp, _TraceTrap) 685 } 686 687 func traceback1(pc, sp, lr uintptr, gp *g, flags uint) { 688 // If the goroutine is in cgo, and we have a cgo traceback, print that. 689 if iscgo && gp.m != nil && gp.m.ncgo > 0 && gp.syscallsp != 0 && gp.m.cgoCallers != nil && gp.m.cgoCallers[0] != 0 { 690 // Lock cgoCallers so that a signal handler won't 691 // change it, copy the array, reset it, unlock it. 692 // We are locked to the thread and are not running 693 // concurrently with a signal handler. 694 // We just have to stop a signal handler from interrupting 695 // in the middle of our copy. 696 atomic.Store(&gp.m.cgoCallersUse, 1) 697 cgoCallers := *gp.m.cgoCallers 698 gp.m.cgoCallers[0] = 0 699 atomic.Store(&gp.m.cgoCallersUse, 0) 700 701 printCgoTraceback(&cgoCallers) 702 } 703 704 var n int 705 if readgstatus(gp)&^_Gscan == _Gsyscall { 706 // Override registers if blocked in system call. 707 pc = gp.syscallpc 708 sp = gp.syscallsp 709 flags &^= _TraceTrap 710 } 711 // Print traceback. By default, omits runtime frames. 712 // If that means we print nothing at all, repeat forcing all frames printed. 713 n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags) 714 if n == 0 && (flags&_TraceRuntimeFrames) == 0 { 715 n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags|_TraceRuntimeFrames) 716 } 717 if n == _TracebackMaxFrames { 718 print("...additional frames elided...\n") 719 } 720 printcreatedby(gp) 721 } 722 723 func callers(skip int, pcbuf []uintptr) int { 724 sp := getcallersp(unsafe.Pointer(&skip)) 725 pc := getcallerpc() 726 gp := getg() 727 var n int 728 systemstack(func() { 729 n = gentraceback(pc, sp, 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0) 730 }) 731 return n 732 } 733 734 func gcallers(gp *g, skip int, pcbuf []uintptr) int { 735 return gentraceback(^uintptr(0), ^uintptr(0), 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0) 736 } 737 738 func showframe(f funcInfo, gp *g, firstFrame, elideWrapper bool) bool { 739 g := getg() 740 if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) { 741 return true 742 } 743 level, _, _ := gotraceback() 744 if level > 1 { 745 // Show all frames. 746 return true 747 } 748 749 if !f.valid() { 750 return false 751 } 752 753 if elideWrapper { 754 file, _ := funcline(f, f.entry) 755 if file == "<autogenerated>" { 756 return false 757 } 758 } 759 760 name := funcname(f) 761 762 // Special case: always show runtime.gopanic frame 763 // in the middle of a stack trace, so that we can 764 // see the boundary between ordinary code and 765 // panic-induced deferred code. 766 // See golang.org/issue/5832. 767 if name == "runtime.gopanic" && !firstFrame { 768 return true 769 } 770 771 return contains(name, ".") && (!hasprefix(name, "runtime.") || isExportedRuntime(name)) 772 } 773 774 // isExportedRuntime reports whether name is an exported runtime function. 775 // It is only for runtime functions, so ASCII A-Z is fine. 776 func isExportedRuntime(name string) bool { 777 const n = len("runtime.") 778 return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z' 779 } 780 781 // elideWrapperCalling returns whether a wrapper function that called 782 // function "name" should be elided from stack traces. 783 func elideWrapperCalling(name string) bool { 784 // If the wrapper called a panic function instead of the 785 // wrapped function, we want to include it in stacks. 786 return !(name == "runtime.gopanic" || name == "runtime.sigpanic" || name == "runtime.panicwrap") 787 } 788 789 var gStatusStrings = [...]string{ 790 _Gidle: "idle", 791 _Grunnable: "runnable", 792 _Grunning: "running", 793 _Gsyscall: "syscall", 794 _Gwaiting: "waiting", 795 _Gdead: "dead", 796 _Gcopystack: "copystack", 797 } 798 799 func goroutineheader(gp *g) { 800 gpstatus := readgstatus(gp) 801 802 isScan := gpstatus&_Gscan != 0 803 gpstatus &^= _Gscan // drop the scan bit 804 805 // Basic string status 806 var status string 807 if 0 <= gpstatus && gpstatus < uint32(len(gStatusStrings)) { 808 status = gStatusStrings[gpstatus] 809 } else { 810 status = "???" 811 } 812 813 // Override. 814 if gpstatus == _Gwaiting && gp.waitreason != "" { 815 status = gp.waitreason 816 } 817 818 // approx time the G is blocked, in minutes 819 var waitfor int64 820 if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 { 821 waitfor = (nanotime() - gp.waitsince) / 60e9 822 } 823 print("goroutine ", gp.goid, " [", status) 824 if isScan { 825 print(" (scan)") 826 } 827 if waitfor >= 1 { 828 print(", ", waitfor, " minutes") 829 } 830 if gp.lockedm != 0 { 831 print(", locked to thread") 832 } 833 print("]:\n") 834 } 835 836 func tracebackothers(me *g) { 837 level, _, _ := gotraceback() 838 839 // Show the current goroutine first, if we haven't already. 840 g := getg() 841 gp := g.m.curg 842 if gp != nil && gp != me { 843 print("\n") 844 goroutineheader(gp) 845 traceback(^uintptr(0), ^uintptr(0), 0, gp) 846 } 847 848 lock(&allglock) 849 for _, gp := range allgs { 850 if gp == me || gp == g.m.curg || readgstatus(gp) == _Gdead || isSystemGoroutine(gp) && level < 2 { 851 continue 852 } 853 print("\n") 854 goroutineheader(gp) 855 // Note: gp.m == g.m occurs when tracebackothers is 856 // called from a signal handler initiated during a 857 // systemstack call. The original G is still in the 858 // running state, and we want to print its stack. 859 if gp.m != g.m && readgstatus(gp)&^_Gscan == _Grunning { 860 print("\tgoroutine running on other thread; stack unavailable\n") 861 printcreatedby(gp) 862 } else { 863 traceback(^uintptr(0), ^uintptr(0), 0, gp) 864 } 865 } 866 unlock(&allglock) 867 } 868 869 // Does f mark the top of a goroutine stack? 870 func topofstack(f funcInfo) bool { 871 pc := f.entry 872 return pc == goexitPC || 873 pc == mstartPC || 874 pc == mcallPC || 875 pc == morestackPC || 876 pc == rt0_goPC || 877 externalthreadhandlerp != 0 && pc == externalthreadhandlerp 878 } 879 880 // isSystemGoroutine reports whether the goroutine g must be omitted in 881 // stack dumps and deadlock detector. 882 func isSystemGoroutine(gp *g) bool { 883 pc := gp.startpc 884 return pc == runfinqPC && !fingRunning || 885 pc == bgsweepPC || 886 pc == forcegchelperPC || 887 pc == timerprocPC || 888 pc == gcBgMarkWorkerPC 889 } 890 891 // SetCgoTraceback records three C functions to use to gather 892 // traceback information from C code and to convert that traceback 893 // information into symbolic information. These are used when printing 894 // stack traces for a program that uses cgo. 895 // 896 // The traceback and context functions may be called from a signal 897 // handler, and must therefore use only async-signal safe functions. 898 // The symbolizer function may be called while the program is 899 // crashing, and so must be cautious about using memory. None of the 900 // functions may call back into Go. 901 // 902 // The context function will be called with a single argument, a 903 // pointer to a struct: 904 // 905 // struct { 906 // Context uintptr 907 // } 908 // 909 // In C syntax, this struct will be 910 // 911 // struct { 912 // uintptr_t Context; 913 // }; 914 // 915 // If the Context field is 0, the context function is being called to 916 // record the current traceback context. It should record in the 917 // Context field whatever information is needed about the current 918 // point of execution to later produce a stack trace, probably the 919 // stack pointer and PC. In this case the context function will be 920 // called from C code. 921 // 922 // If the Context field is not 0, then it is a value returned by a 923 // previous call to the context function. This case is called when the 924 // context is no longer needed; that is, when the Go code is returning 925 // to its C code caller. This permits the context function to release 926 // any associated resources. 927 // 928 // While it would be correct for the context function to record a 929 // complete a stack trace whenever it is called, and simply copy that 930 // out in the traceback function, in a typical program the context 931 // function will be called many times without ever recording a 932 // traceback for that context. Recording a complete stack trace in a 933 // call to the context function is likely to be inefficient. 934 // 935 // The traceback function will be called with a single argument, a 936 // pointer to a struct: 937 // 938 // struct { 939 // Context uintptr 940 // SigContext uintptr 941 // Buf *uintptr 942 // Max uintptr 943 // } 944 // 945 // In C syntax, this struct will be 946 // 947 // struct { 948 // uintptr_t Context; 949 // uintptr_t SigContext; 950 // uintptr_t* Buf; 951 // uintptr_t Max; 952 // }; 953 // 954 // The Context field will be zero to gather a traceback from the 955 // current program execution point. In this case, the traceback 956 // function will be called from C code. 957 // 958 // Otherwise Context will be a value previously returned by a call to 959 // the context function. The traceback function should gather a stack 960 // trace from that saved point in the program execution. The traceback 961 // function may be called from an execution thread other than the one 962 // that recorded the context, but only when the context is known to be 963 // valid and unchanging. The traceback function may also be called 964 // deeper in the call stack on the same thread that recorded the 965 // context. The traceback function may be called multiple times with 966 // the same Context value; it will usually be appropriate to cache the 967 // result, if possible, the first time this is called for a specific 968 // context value. 969 // 970 // If the traceback function is called from a signal handler on a Unix 971 // system, SigContext will be the signal context argument passed to 972 // the signal handler (a C ucontext_t* cast to uintptr_t). This may be 973 // used to start tracing at the point where the signal occurred. If 974 // the traceback function is not called from a signal handler, 975 // SigContext will be zero. 976 // 977 // Buf is where the traceback information should be stored. It should 978 // be PC values, such that Buf[0] is the PC of the caller, Buf[1] is 979 // the PC of that function's caller, and so on. Max is the maximum 980 // number of entries to store. The function should store a zero to 981 // indicate the top of the stack, or that the caller is on a different 982 // stack, presumably a Go stack. 983 // 984 // Unlike runtime.Callers, the PC values returned should, when passed 985 // to the symbolizer function, return the file/line of the call 986 // instruction. No additional subtraction is required or appropriate. 987 // 988 // The symbolizer function will be called with a single argument, a 989 // pointer to a struct: 990 // 991 // struct { 992 // PC uintptr // program counter to fetch information for 993 // File *byte // file name (NUL terminated) 994 // Lineno uintptr // line number 995 // Func *byte // function name (NUL terminated) 996 // Entry uintptr // function entry point 997 // More uintptr // set non-zero if more info for this PC 998 // Data uintptr // unused by runtime, available for function 999 // } 1000 // 1001 // In C syntax, this struct will be 1002 // 1003 // struct { 1004 // uintptr_t PC; 1005 // char* File; 1006 // uintptr_t Lineno; 1007 // char* Func; 1008 // uintptr_t Entry; 1009 // uintptr_t More; 1010 // uintptr_t Data; 1011 // }; 1012 // 1013 // The PC field will be a value returned by a call to the traceback 1014 // function. 1015 // 1016 // The first time the function is called for a particular traceback, 1017 // all the fields except PC will be 0. The function should fill in the 1018 // other fields if possible, setting them to 0/nil if the information 1019 // is not available. The Data field may be used to store any useful 1020 // information across calls. The More field should be set to non-zero 1021 // if there is more information for this PC, zero otherwise. If More 1022 // is set non-zero, the function will be called again with the same 1023 // PC, and may return different information (this is intended for use 1024 // with inlined functions). If More is zero, the function will be 1025 // called with the next PC value in the traceback. When the traceback 1026 // is complete, the function will be called once more with PC set to 1027 // zero; this may be used to free any information. Each call will 1028 // leave the fields of the struct set to the same values they had upon 1029 // return, except for the PC field when the More field is zero. The 1030 // function must not keep a copy of the struct pointer between calls. 1031 // 1032 // When calling SetCgoTraceback, the version argument is the version 1033 // number of the structs that the functions expect to receive. 1034 // Currently this must be zero. 1035 // 1036 // The symbolizer function may be nil, in which case the results of 1037 // the traceback function will be displayed as numbers. If the 1038 // traceback function is nil, the symbolizer function will never be 1039 // called. The context function may be nil, in which case the 1040 // traceback function will only be called with the context field set 1041 // to zero. If the context function is nil, then calls from Go to C 1042 // to Go will not show a traceback for the C portion of the call stack. 1043 // 1044 // SetCgoTraceback should be called only once, ideally from an init function. 1045 func SetCgoTraceback(version int, traceback, context, symbolizer unsafe.Pointer) { 1046 if version != 0 { 1047 panic("unsupported version") 1048 } 1049 1050 if cgoTraceback != nil && cgoTraceback != traceback || 1051 cgoContext != nil && cgoContext != context || 1052 cgoSymbolizer != nil && cgoSymbolizer != symbolizer { 1053 panic("call SetCgoTraceback only once") 1054 } 1055 1056 cgoTraceback = traceback 1057 cgoContext = context 1058 cgoSymbolizer = symbolizer 1059 1060 // The context function is called when a C function calls a Go 1061 // function. As such it is only called by C code in runtime/cgo. 1062 if _cgo_set_context_function != nil { 1063 cgocall(_cgo_set_context_function, context) 1064 } 1065 } 1066 1067 var cgoTraceback unsafe.Pointer 1068 var cgoContext unsafe.Pointer 1069 var cgoSymbolizer unsafe.Pointer 1070 1071 // cgoTracebackArg is the type passed to cgoTraceback. 1072 type cgoTracebackArg struct { 1073 context uintptr 1074 sigContext uintptr 1075 buf *uintptr 1076 max uintptr 1077 } 1078 1079 // cgoContextArg is the type passed to the context function. 1080 type cgoContextArg struct { 1081 context uintptr 1082 } 1083 1084 // cgoSymbolizerArg is the type passed to cgoSymbolizer. 1085 type cgoSymbolizerArg struct { 1086 pc uintptr 1087 file *byte 1088 lineno uintptr 1089 funcName *byte 1090 entry uintptr 1091 more uintptr 1092 data uintptr 1093 } 1094 1095 // cgoTraceback prints a traceback of callers. 1096 func printCgoTraceback(callers *cgoCallers) { 1097 if cgoSymbolizer == nil { 1098 for _, c := range callers { 1099 if c == 0 { 1100 break 1101 } 1102 print("non-Go function at pc=", hex(c), "\n") 1103 } 1104 return 1105 } 1106 1107 var arg cgoSymbolizerArg 1108 for _, c := range callers { 1109 if c == 0 { 1110 break 1111 } 1112 printOneCgoTraceback(c, 0x7fffffff, &arg) 1113 } 1114 arg.pc = 0 1115 callCgoSymbolizer(&arg) 1116 } 1117 1118 // printOneCgoTraceback prints the traceback of a single cgo caller. 1119 // This can print more than one line because of inlining. 1120 // Returns the number of frames printed. 1121 func printOneCgoTraceback(pc uintptr, max int, arg *cgoSymbolizerArg) int { 1122 c := 0 1123 arg.pc = pc 1124 for { 1125 if c > max { 1126 break 1127 } 1128 callCgoSymbolizer(arg) 1129 if arg.funcName != nil { 1130 // Note that we don't print any argument 1131 // information here, not even parentheses. 1132 // The symbolizer must add that if appropriate. 1133 println(gostringnocopy(arg.funcName)) 1134 } else { 1135 println("non-Go function") 1136 } 1137 print("\t") 1138 if arg.file != nil { 1139 print(gostringnocopy(arg.file), ":", arg.lineno, " ") 1140 } 1141 print("pc=", hex(pc), "\n") 1142 c++ 1143 if arg.more == 0 { 1144 break 1145 } 1146 } 1147 return c 1148 } 1149 1150 // callCgoSymbolizer calls the cgoSymbolizer function. 1151 func callCgoSymbolizer(arg *cgoSymbolizerArg) { 1152 call := cgocall 1153 if panicking > 0 || getg().m.curg != getg() { 1154 // We do not want to call into the scheduler when panicking 1155 // or when on the system stack. 1156 call = asmcgocall 1157 } 1158 if msanenabled { 1159 msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{})) 1160 } 1161 call(cgoSymbolizer, noescape(unsafe.Pointer(arg))) 1162 } 1163 1164 // cgoContextPCs gets the PC values from a cgo traceback. 1165 func cgoContextPCs(ctxt uintptr, buf []uintptr) { 1166 if cgoTraceback == nil { 1167 return 1168 } 1169 call := cgocall 1170 if panicking > 0 || getg().m.curg != getg() { 1171 // We do not want to call into the scheduler when panicking 1172 // or when on the system stack. 1173 call = asmcgocall 1174 } 1175 arg := cgoTracebackArg{ 1176 context: ctxt, 1177 buf: (*uintptr)(noescape(unsafe.Pointer(&buf[0]))), 1178 max: uintptr(len(buf)), 1179 } 1180 if msanenabled { 1181 msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg)) 1182 } 1183 call(cgoTraceback, noescape(unsafe.Pointer(&arg))) 1184 }