github.com/geraldss/go/src@v0.0.0-20210511222824-ac7d0ebfc235/runtime/traceback.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/bytealg" 9 "runtime/internal/atomic" 10 "runtime/internal/sys" 11 "unsafe" 12 ) 13 14 // The code in this file implements stack trace walking for all architectures. 15 // The most important fact about a given architecture is whether it uses a link register. 16 // On systems with link registers, the prologue for a non-leaf function stores the 17 // incoming value of LR at the bottom of the newly allocated stack frame. 18 // On systems without link registers, the architecture pushes a return PC during 19 // the call instruction, so the return PC ends up above the stack frame. 20 // In this file, the return PC is always called LR, no matter how it was found. 21 // 22 // To date, the opposite of a link register architecture is an x86 architecture. 23 // This code may need to change if some other kind of non-link-register 24 // architecture comes along. 25 // 26 // The other important fact is the size of a pointer: on 32-bit systems the LR 27 // takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes. 28 // Typically this is ptrSize. 29 // 30 // As an exception, amd64p32 had ptrSize == 4 but the CALL instruction still 31 // stored an 8-byte return PC onto the stack. To accommodate this, we used regSize 32 // as the size of the architecture-pushed return PC. 33 // 34 // usesLR is defined below in terms of minFrameSize, which is defined in 35 // arch_$GOARCH.go. ptrSize and regSize are defined in stubs.go. 36 37 const usesLR = sys.MinFrameSize > 0 38 39 // Traceback over the deferred function calls. 40 // Report them like calls that have been invoked but not started executing yet. 41 func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) { 42 var frame stkframe 43 for d := gp._defer; d != nil; d = d.link { 44 fn := d.fn 45 if fn == nil { 46 // Defer of nil function. Args don't matter. 47 frame.pc = 0 48 frame.fn = funcInfo{} 49 frame.argp = 0 50 frame.arglen = 0 51 frame.argmap = nil 52 } else { 53 frame.pc = fn.fn 54 f := findfunc(frame.pc) 55 if !f.valid() { 56 print("runtime: unknown pc in defer ", hex(frame.pc), "\n") 57 throw("unknown pc") 58 } 59 frame.fn = f 60 frame.argp = uintptr(deferArgs(d)) 61 var ok bool 62 frame.arglen, frame.argmap, ok = getArgInfoFast(f, true) 63 if !ok { 64 frame.arglen, frame.argmap = getArgInfo(&frame, f, true, fn) 65 } 66 } 67 frame.continpc = frame.pc 68 if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) { 69 return 70 } 71 } 72 } 73 74 const sizeofSkipFunction = 256 75 76 // Generic traceback. Handles runtime stack prints (pcbuf == nil), 77 // the runtime.Callers function (pcbuf != nil), as well as the garbage 78 // collector (callback != nil). A little clunky to merge these, but avoids 79 // duplicating the code and all its subtlety. 80 // 81 // The skip argument is only valid with pcbuf != nil and counts the number 82 // of logical frames to skip rather than physical frames (with inlining, a 83 // PC in pcbuf can represent multiple calls). If a PC is partially skipped 84 // and max > 1, pcbuf[1] will be runtime.skipPleaseUseCallersFrames+N where 85 // N indicates the number of logical frames to skip in pcbuf[0]. 86 func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int { 87 if skip > 0 && callback != nil { 88 throw("gentraceback callback cannot be used with non-zero skip") 89 } 90 91 // Don't call this "g"; it's too easy get "g" and "gp" confused. 92 if ourg := getg(); ourg == gp && ourg == ourg.m.curg { 93 // The starting sp has been passed in as a uintptr, and the caller may 94 // have other uintptr-typed stack references as well. 95 // If during one of the calls that got us here or during one of the 96 // callbacks below the stack must be grown, all these uintptr references 97 // to the stack will not be updated, and gentraceback will continue 98 // to inspect the old stack memory, which may no longer be valid. 99 // Even if all the variables were updated correctly, it is not clear that 100 // we want to expose a traceback that begins on one stack and ends 101 // on another stack. That could confuse callers quite a bit. 102 // Instead, we require that gentraceback and any other function that 103 // accepts an sp for the current goroutine (typically obtained by 104 // calling getcallersp) must not run on that goroutine's stack but 105 // instead on the g0 stack. 106 throw("gentraceback cannot trace user goroutine on its own stack") 107 } 108 level, _, _ := gotraceback() 109 110 var ctxt *funcval // Context pointer for unstarted goroutines. See issue #25897. 111 112 if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp. 113 if gp.syscallsp != 0 { 114 pc0 = gp.syscallpc 115 sp0 = gp.syscallsp 116 if usesLR { 117 lr0 = 0 118 } 119 } else { 120 pc0 = gp.sched.pc 121 sp0 = gp.sched.sp 122 if usesLR { 123 lr0 = gp.sched.lr 124 } 125 ctxt = (*funcval)(gp.sched.ctxt) 126 } 127 } 128 129 nprint := 0 130 var frame stkframe 131 frame.pc = pc0 132 frame.sp = sp0 133 if usesLR { 134 frame.lr = lr0 135 } 136 waspanic := false 137 cgoCtxt := gp.cgoCtxt 138 printing := pcbuf == nil && callback == nil 139 140 // If the PC is zero, it's likely a nil function call. 141 // Start in the caller's frame. 142 if frame.pc == 0 { 143 if usesLR { 144 frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp)) 145 frame.lr = 0 146 } else { 147 frame.pc = uintptr(*(*sys.Uintreg)(unsafe.Pointer(frame.sp))) 148 frame.sp += sys.RegSize 149 } 150 } 151 152 f := findfunc(frame.pc) 153 if !f.valid() { 154 if callback != nil || printing { 155 print("runtime: unknown pc ", hex(frame.pc), "\n") 156 tracebackHexdump(gp.stack, &frame, 0) 157 } 158 if callback != nil { 159 throw("unknown pc") 160 } 161 return 0 162 } 163 frame.fn = f 164 165 var cache pcvalueCache 166 167 lastFuncID := funcID_normal 168 n := 0 169 for n < max { 170 // Typically: 171 // pc is the PC of the running function. 172 // sp is the stack pointer at that program counter. 173 // fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown. 174 // stk is the stack containing sp. 175 // The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp. 176 f = frame.fn 177 if f.pcsp == 0 { 178 // No frame information, must be external function, like race support. 179 // See golang.org/issue/13568. 180 break 181 } 182 183 // Found an actual function. 184 // Derive frame pointer and link register. 185 if frame.fp == 0 { 186 // Jump over system stack transitions. If we're on g0 and there's a user 187 // goroutine, try to jump. Otherwise this is a regular call. 188 if flags&_TraceJumpStack != 0 && gp == gp.m.g0 && gp.m.curg != nil { 189 switch f.funcID { 190 case funcID_morestack: 191 // morestack does not return normally -- newstack() 192 // gogo's to curg.sched. Match that. 193 // This keeps morestack() from showing up in the backtrace, 194 // but that makes some sense since it'll never be returned 195 // to. 196 frame.pc = gp.m.curg.sched.pc 197 frame.fn = findfunc(frame.pc) 198 f = frame.fn 199 frame.sp = gp.m.curg.sched.sp 200 cgoCtxt = gp.m.curg.cgoCtxt 201 case funcID_systemstack: 202 // systemstack returns normally, so just follow the 203 // stack transition. 204 frame.sp = gp.m.curg.sched.sp 205 cgoCtxt = gp.m.curg.cgoCtxt 206 } 207 } 208 frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &cache)) 209 if !usesLR { 210 // On x86, call instruction pushes return PC before entering new function. 211 frame.fp += sys.RegSize 212 } 213 } 214 var flr funcInfo 215 if topofstack(f, gp.m != nil && gp == gp.m.g0) { 216 frame.lr = 0 217 flr = funcInfo{} 218 } else if usesLR && f.funcID == funcID_jmpdefer { 219 // jmpdefer modifies SP/LR/PC non-atomically. 220 // If a profiling interrupt arrives during jmpdefer, 221 // the stack unwind may see a mismatched register set 222 // and get confused. Stop if we see PC within jmpdefer 223 // to avoid that confusion. 224 // See golang.org/issue/8153. 225 if callback != nil { 226 throw("traceback_arm: found jmpdefer when tracing with callback") 227 } 228 frame.lr = 0 229 } else { 230 var lrPtr uintptr 231 if usesLR { 232 if n == 0 && frame.sp < frame.fp || frame.lr == 0 { 233 lrPtr = frame.sp 234 frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr)) 235 } 236 } else { 237 if frame.lr == 0 { 238 lrPtr = frame.fp - sys.RegSize 239 frame.lr = uintptr(*(*sys.Uintreg)(unsafe.Pointer(lrPtr))) 240 } 241 } 242 flr = findfunc(frame.lr) 243 if !flr.valid() { 244 // This happens if you get a profiling interrupt at just the wrong time. 245 // In that context it is okay to stop early. 246 // But if callback is set, we're doing a garbage collection and must 247 // get everything, so crash loudly. 248 doPrint := printing 249 if doPrint && gp.m.incgo && f.funcID == funcID_sigpanic { 250 // We can inject sigpanic 251 // calls directly into C code, 252 // in which case we'll see a C 253 // return PC. Don't complain. 254 doPrint = false 255 } 256 if callback != nil || doPrint { 257 print("runtime: unexpected return pc for ", funcname(f), " called from ", hex(frame.lr), "\n") 258 tracebackHexdump(gp.stack, &frame, lrPtr) 259 } 260 if callback != nil { 261 throw("unknown caller pc") 262 } 263 } 264 } 265 266 frame.varp = frame.fp 267 if !usesLR { 268 // On x86, call instruction pushes return PC before entering new function. 269 frame.varp -= sys.RegSize 270 } 271 272 // For architectures with frame pointers, if there's 273 // a frame, then there's a saved frame pointer here. 274 if frame.varp > frame.sp && (GOARCH == "amd64" || GOARCH == "arm64") { 275 frame.varp -= sys.RegSize 276 } 277 278 // Derive size of arguments. 279 // Most functions have a fixed-size argument block, 280 // so we can use metadata about the function f. 281 // Not all, though: there are some variadic functions 282 // in package runtime and reflect, and for those we use call-specific 283 // metadata recorded by f's caller. 284 if callback != nil || printing { 285 frame.argp = frame.fp + sys.MinFrameSize 286 var ok bool 287 frame.arglen, frame.argmap, ok = getArgInfoFast(f, callback != nil) 288 if !ok { 289 frame.arglen, frame.argmap = getArgInfo(&frame, f, callback != nil, ctxt) 290 } 291 } 292 ctxt = nil // ctxt is only needed to get arg maps for the topmost frame 293 294 // Determine frame's 'continuation PC', where it can continue. 295 // Normally this is the return address on the stack, but if sigpanic 296 // is immediately below this function on the stack, then the frame 297 // stopped executing due to a trap, and frame.pc is probably not 298 // a safe point for looking up liveness information. In this panicking case, 299 // the function either doesn't return at all (if it has no defers or if the 300 // defers do not recover) or it returns from one of the calls to 301 // deferproc a second time (if the corresponding deferred func recovers). 302 // In the latter case, use a deferreturn call site as the continuation pc. 303 frame.continpc = frame.pc 304 if waspanic { 305 if frame.fn.deferreturn != 0 { 306 frame.continpc = frame.fn.entry + uintptr(frame.fn.deferreturn) + 1 307 // Note: this may perhaps keep return variables alive longer than 308 // strictly necessary, as we are using "function has a defer statement" 309 // as a proxy for "function actually deferred something". It seems 310 // to be a minor drawback. (We used to actually look through the 311 // gp._defer for a defer corresponding to this function, but that 312 // is hard to do with defer records on the stack during a stack copy.) 313 // Note: the +1 is to offset the -1 that 314 // stack.go:getStackMap does to back up a return 315 // address make sure the pc is in the CALL instruction. 316 } else { 317 frame.continpc = 0 318 } 319 } 320 321 if callback != nil { 322 if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) { 323 return n 324 } 325 } 326 327 if pcbuf != nil { 328 pc := frame.pc 329 // backup to CALL instruction to read inlining info (same logic as below) 330 tracepc := pc 331 // Normally, pc is a return address. In that case, we want to look up 332 // file/line information using pc-1, because that is the pc of the 333 // call instruction (more precisely, the last byte of the call instruction). 334 // Callers expect the pc buffer to contain return addresses and do the 335 // same -1 themselves, so we keep pc unchanged. 336 // When the pc is from a signal (e.g. profiler or segv) then we want 337 // to look up file/line information using pc, and we store pc+1 in the 338 // pc buffer so callers can unconditionally subtract 1 before looking up. 339 // See issue 34123. 340 // The pc can be at function entry when the frame is initialized without 341 // actually running code, like runtime.mstart. 342 if (n == 0 && flags&_TraceTrap != 0) || waspanic || pc == f.entry { 343 pc++ 344 } else { 345 tracepc-- 346 } 347 348 // If there is inlining info, record the inner frames. 349 if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil { 350 inltree := (*[1 << 20]inlinedCall)(inldata) 351 for { 352 ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache) 353 if ix < 0 { 354 break 355 } 356 if inltree[ix].funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) { 357 // ignore wrappers 358 } else if skip > 0 { 359 skip-- 360 } else if n < max { 361 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc 362 n++ 363 } 364 lastFuncID = inltree[ix].funcID 365 // Back up to an instruction in the "caller". 366 tracepc = frame.fn.entry + uintptr(inltree[ix].parentPc) 367 pc = tracepc + 1 368 } 369 } 370 // Record the main frame. 371 if f.funcID == funcID_wrapper && elideWrapperCalling(lastFuncID) { 372 // Ignore wrapper functions (except when they trigger panics). 373 } else if skip > 0 { 374 skip-- 375 } else if n < max { 376 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc 377 n++ 378 } 379 lastFuncID = f.funcID 380 n-- // offset n++ below 381 } 382 383 if printing { 384 // assume skip=0 for printing. 385 // 386 // Never elide wrappers if we haven't printed 387 // any frames. And don't elide wrappers that 388 // called panic rather than the wrapped 389 // function. Otherwise, leave them out. 390 391 // backup to CALL instruction to read inlining info (same logic as below) 392 tracepc := frame.pc 393 if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic { 394 tracepc-- 395 } 396 // If there is inlining info, print the inner frames. 397 if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil { 398 inltree := (*[1 << 20]inlinedCall)(inldata) 399 var inlFunc _func 400 inlFuncInfo := funcInfo{&inlFunc, f.datap} 401 for { 402 ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, nil) 403 if ix < 0 { 404 break 405 } 406 407 // Create a fake _func for the 408 // inlined function. 409 inlFunc.nameoff = inltree[ix].func_ 410 inlFunc.funcID = inltree[ix].funcID 411 412 if (flags&_TraceRuntimeFrames) != 0 || showframe(inlFuncInfo, gp, nprint == 0, inlFuncInfo.funcID, lastFuncID) { 413 name := funcname(inlFuncInfo) 414 file, line := funcline(f, tracepc) 415 print(name, "(...)\n") 416 print("\t", file, ":", line, "\n") 417 nprint++ 418 } 419 lastFuncID = inltree[ix].funcID 420 // Back up to an instruction in the "caller". 421 tracepc = frame.fn.entry + uintptr(inltree[ix].parentPc) 422 } 423 } 424 if (flags&_TraceRuntimeFrames) != 0 || showframe(f, gp, nprint == 0, f.funcID, lastFuncID) { 425 // Print during crash. 426 // main(0x1, 0x2, 0x3) 427 // /home/rsc/go/src/runtime/x.go:23 +0xf 428 // 429 name := funcname(f) 430 file, line := funcline(f, tracepc) 431 if name == "runtime.gopanic" { 432 name = "panic" 433 } 434 print(name, "(") 435 argp := (*[100]uintptr)(unsafe.Pointer(frame.argp)) 436 for i := uintptr(0); i < frame.arglen/sys.PtrSize; i++ { 437 if i >= 10 { 438 print(", ...") 439 break 440 } 441 if i != 0 { 442 print(", ") 443 } 444 print(hex(argp[i])) 445 } 446 print(")\n") 447 print("\t", file, ":", line) 448 if frame.pc > f.entry { 449 print(" +", hex(frame.pc-f.entry)) 450 } 451 if gp.m != nil && gp.m.throwing > 0 && gp == gp.m.curg || level >= 2 { 452 print(" fp=", hex(frame.fp), " sp=", hex(frame.sp), " pc=", hex(frame.pc)) 453 } 454 print("\n") 455 nprint++ 456 } 457 lastFuncID = f.funcID 458 } 459 n++ 460 461 if f.funcID == funcID_cgocallback && len(cgoCtxt) > 0 { 462 ctxt := cgoCtxt[len(cgoCtxt)-1] 463 cgoCtxt = cgoCtxt[:len(cgoCtxt)-1] 464 465 // skip only applies to Go frames. 466 // callback != nil only used when we only care 467 // about Go frames. 468 if skip == 0 && callback == nil { 469 n = tracebackCgoContext(pcbuf, printing, ctxt, n, max) 470 } 471 } 472 473 waspanic = f.funcID == funcID_sigpanic 474 injectedCall := waspanic || f.funcID == funcID_asyncPreempt 475 476 // Do not unwind past the bottom of the stack. 477 if !flr.valid() { 478 break 479 } 480 481 // Unwind to next frame. 482 frame.fn = flr 483 frame.pc = frame.lr 484 frame.lr = 0 485 frame.sp = frame.fp 486 frame.fp = 0 487 frame.argmap = nil 488 489 // On link register architectures, sighandler saves the LR on stack 490 // before faking a call. 491 if usesLR && injectedCall { 492 x := *(*uintptr)(unsafe.Pointer(frame.sp)) 493 frame.sp += sys.MinFrameSize 494 if GOARCH == "arm64" { 495 // arm64 needs 16-byte aligned SP, always 496 frame.sp += sys.PtrSize 497 } 498 f = findfunc(frame.pc) 499 frame.fn = f 500 if !f.valid() { 501 frame.pc = x 502 } else if funcspdelta(f, frame.pc, &cache) == 0 { 503 frame.lr = x 504 } 505 } 506 } 507 508 if printing { 509 n = nprint 510 } 511 512 // Note that panic != nil is okay here: there can be leftover panics, 513 // because the defers on the panic stack do not nest in frame order as 514 // they do on the defer stack. If you have: 515 // 516 // frame 1 defers d1 517 // frame 2 defers d2 518 // frame 3 defers d3 519 // frame 4 panics 520 // frame 4's panic starts running defers 521 // frame 5, running d3, defers d4 522 // frame 5 panics 523 // frame 5's panic starts running defers 524 // frame 6, running d4, garbage collects 525 // frame 6, running d2, garbage collects 526 // 527 // During the execution of d4, the panic stack is d4 -> d3, which 528 // is nested properly, and we'll treat frame 3 as resumable, because we 529 // can find d3. (And in fact frame 3 is resumable. If d4 recovers 530 // and frame 5 continues running, d3, d3 can recover and we'll 531 // resume execution in (returning from) frame 3.) 532 // 533 // During the execution of d2, however, the panic stack is d2 -> d3, 534 // which is inverted. The scan will match d2 to frame 2 but having 535 // d2 on the stack until then means it will not match d3 to frame 3. 536 // This is okay: if we're running d2, then all the defers after d2 have 537 // completed and their corresponding frames are dead. Not finding d3 538 // for frame 3 means we'll set frame 3's continpc == 0, which is correct 539 // (frame 3 is dead). At the end of the walk the panic stack can thus 540 // contain defers (d3 in this case) for dead frames. The inversion here 541 // always indicates a dead frame, and the effect of the inversion on the 542 // scan is to hide those dead frames, so the scan is still okay: 543 // what's left on the panic stack are exactly (and only) the dead frames. 544 // 545 // We require callback != nil here because only when callback != nil 546 // do we know that gentraceback is being called in a "must be correct" 547 // context as opposed to a "best effort" context. The tracebacks with 548 // callbacks only happen when everything is stopped nicely. 549 // At other times, such as when gathering a stack for a profiling signal 550 // or when printing a traceback during a crash, everything may not be 551 // stopped nicely, and the stack walk may not be able to complete. 552 if callback != nil && n < max && frame.sp != gp.stktopsp { 553 print("runtime: g", gp.goid, ": frame.sp=", hex(frame.sp), " top=", hex(gp.stktopsp), "\n") 554 print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "] n=", n, " max=", max, "\n") 555 throw("traceback did not unwind completely") 556 } 557 558 return n 559 } 560 561 // reflectMethodValue is a partial duplicate of reflect.makeFuncImpl 562 // and reflect.methodValue. 563 type reflectMethodValue struct { 564 fn uintptr 565 stack *bitvector // ptrmap for both args and results 566 argLen uintptr // just args 567 } 568 569 // getArgInfoFast returns the argument frame information for a call to f. 570 // It is short and inlineable. However, it does not handle all functions. 571 // If ok reports false, you must call getArgInfo instead. 572 // TODO(josharian): once we do mid-stack inlining, 573 // call getArgInfo directly from getArgInfoFast and stop returning an ok bool. 574 func getArgInfoFast(f funcInfo, needArgMap bool) (arglen uintptr, argmap *bitvector, ok bool) { 575 return uintptr(f.args), nil, !(needArgMap && f.args == _ArgsSizeUnknown) 576 } 577 578 // getArgInfo returns the argument frame information for a call to f 579 // with call frame frame. 580 // 581 // This is used for both actual calls with active stack frames and for 582 // deferred calls or goroutines that are not yet executing. If this is an actual 583 // call, ctxt must be nil (getArgInfo will retrieve what it needs from 584 // the active stack frame). If this is a deferred call or unstarted goroutine, 585 // ctxt must be the function object that was deferred or go'd. 586 func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (arglen uintptr, argmap *bitvector) { 587 arglen = uintptr(f.args) 588 if needArgMap && f.args == _ArgsSizeUnknown { 589 // Extract argument bitmaps for reflect stubs from the calls they made to reflect. 590 switch funcname(f) { 591 case "reflect.makeFuncStub", "reflect.methodValueCall": 592 // These take a *reflect.methodValue as their 593 // context register. 594 var mv *reflectMethodValue 595 var retValid bool 596 if ctxt != nil { 597 // This is not an actual call, but a 598 // deferred call or an unstarted goroutine. 599 // The function value is itself the *reflect.methodValue. 600 mv = (*reflectMethodValue)(unsafe.Pointer(ctxt)) 601 } else { 602 // This is a real call that took the 603 // *reflect.methodValue as its context 604 // register and immediately saved it 605 // to 0(SP). Get the methodValue from 606 // 0(SP). 607 arg0 := frame.sp + sys.MinFrameSize 608 mv = *(**reflectMethodValue)(unsafe.Pointer(arg0)) 609 // Figure out whether the return values are valid. 610 // Reflect will update this value after it copies 611 // in the return values. 612 retValid = *(*bool)(unsafe.Pointer(arg0 + 3*sys.PtrSize)) 613 } 614 if mv.fn != f.entry { 615 print("runtime: confused by ", funcname(f), "\n") 616 throw("reflect mismatch") 617 } 618 bv := mv.stack 619 arglen = uintptr(bv.n * sys.PtrSize) 620 if !retValid { 621 arglen = uintptr(mv.argLen) &^ (sys.PtrSize - 1) 622 } 623 argmap = bv 624 } 625 } 626 return 627 } 628 629 // tracebackCgoContext handles tracing back a cgo context value, from 630 // the context argument to setCgoTraceback, for the gentraceback 631 // function. It returns the new value of n. 632 func tracebackCgoContext(pcbuf *uintptr, printing bool, ctxt uintptr, n, max int) int { 633 var cgoPCs [32]uintptr 634 cgoContextPCs(ctxt, cgoPCs[:]) 635 var arg cgoSymbolizerArg 636 anySymbolized := false 637 for _, pc := range cgoPCs { 638 if pc == 0 || n >= max { 639 break 640 } 641 if pcbuf != nil { 642 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc 643 } 644 if printing { 645 if cgoSymbolizer == nil { 646 print("non-Go function at pc=", hex(pc), "\n") 647 } else { 648 c := printOneCgoTraceback(pc, max-n, &arg) 649 n += c - 1 // +1 a few lines down 650 anySymbolized = true 651 } 652 } 653 n++ 654 } 655 if anySymbolized { 656 arg.pc = 0 657 callCgoSymbolizer(&arg) 658 } 659 return n 660 } 661 662 func printcreatedby(gp *g) { 663 // Show what created goroutine, except main goroutine (goid 1). 664 pc := gp.gopc 665 f := findfunc(pc) 666 if f.valid() && showframe(f, gp, false, funcID_normal, funcID_normal) && gp.goid != 1 { 667 printcreatedby1(f, pc) 668 } 669 } 670 671 func printcreatedby1(f funcInfo, pc uintptr) { 672 print("created by ", funcname(f), "\n") 673 tracepc := pc // back up to CALL instruction for funcline. 674 if pc > f.entry { 675 tracepc -= sys.PCQuantum 676 } 677 file, line := funcline(f, tracepc) 678 print("\t", file, ":", line) 679 if pc > f.entry { 680 print(" +", hex(pc-f.entry)) 681 } 682 print("\n") 683 } 684 685 func traceback(pc, sp, lr uintptr, gp *g) { 686 traceback1(pc, sp, lr, gp, 0) 687 } 688 689 // tracebacktrap is like traceback but expects that the PC and SP were obtained 690 // from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or getcallerpc/getcallersp. 691 // Because they are from a trap instead of from a saved pair, 692 // the initial PC must not be rewound to the previous instruction. 693 // (All the saved pairs record a PC that is a return address, so we 694 // rewind it into the CALL instruction.) 695 // If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to 696 // the pc/sp/lr passed in. 697 func tracebacktrap(pc, sp, lr uintptr, gp *g) { 698 if gp.m.libcallsp != 0 { 699 // We're in C code somewhere, traceback from the saved position. 700 traceback1(gp.m.libcallpc, gp.m.libcallsp, 0, gp.m.libcallg.ptr(), 0) 701 return 702 } 703 traceback1(pc, sp, lr, gp, _TraceTrap) 704 } 705 706 func traceback1(pc, sp, lr uintptr, gp *g, flags uint) { 707 // If the goroutine is in cgo, and we have a cgo traceback, print that. 708 if iscgo && gp.m != nil && gp.m.ncgo > 0 && gp.syscallsp != 0 && gp.m.cgoCallers != nil && gp.m.cgoCallers[0] != 0 { 709 // Lock cgoCallers so that a signal handler won't 710 // change it, copy the array, reset it, unlock it. 711 // We are locked to the thread and are not running 712 // concurrently with a signal handler. 713 // We just have to stop a signal handler from interrupting 714 // in the middle of our copy. 715 atomic.Store(&gp.m.cgoCallersUse, 1) 716 cgoCallers := *gp.m.cgoCallers 717 gp.m.cgoCallers[0] = 0 718 atomic.Store(&gp.m.cgoCallersUse, 0) 719 720 printCgoTraceback(&cgoCallers) 721 } 722 723 var n int 724 if readgstatus(gp)&^_Gscan == _Gsyscall { 725 // Override registers if blocked in system call. 726 pc = gp.syscallpc 727 sp = gp.syscallsp 728 flags &^= _TraceTrap 729 } 730 // Print traceback. By default, omits runtime frames. 731 // If that means we print nothing at all, repeat forcing all frames printed. 732 n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags) 733 if n == 0 && (flags&_TraceRuntimeFrames) == 0 { 734 n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags|_TraceRuntimeFrames) 735 } 736 if n == _TracebackMaxFrames { 737 print("...additional frames elided...\n") 738 } 739 printcreatedby(gp) 740 741 if gp.ancestors == nil { 742 return 743 } 744 for _, ancestor := range *gp.ancestors { 745 printAncestorTraceback(ancestor) 746 } 747 } 748 749 // printAncestorTraceback prints the traceback of the given ancestor. 750 // TODO: Unify this with gentraceback and CallersFrames. 751 func printAncestorTraceback(ancestor ancestorInfo) { 752 print("[originating from goroutine ", ancestor.goid, "]:\n") 753 for fidx, pc := range ancestor.pcs { 754 f := findfunc(pc) // f previously validated 755 if showfuncinfo(f, fidx == 0, funcID_normal, funcID_normal) { 756 printAncestorTracebackFuncInfo(f, pc) 757 } 758 } 759 if len(ancestor.pcs) == _TracebackMaxFrames { 760 print("...additional frames elided...\n") 761 } 762 // Show what created goroutine, except main goroutine (goid 1). 763 f := findfunc(ancestor.gopc) 764 if f.valid() && showfuncinfo(f, false, funcID_normal, funcID_normal) && ancestor.goid != 1 { 765 printcreatedby1(f, ancestor.gopc) 766 } 767 } 768 769 // printAncestorTraceback prints the given function info at a given pc 770 // within an ancestor traceback. The precision of this info is reduced 771 // due to only have access to the pcs at the time of the caller 772 // goroutine being created. 773 func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) { 774 name := funcname(f) 775 if inldata := funcdata(f, _FUNCDATA_InlTree); inldata != nil { 776 inltree := (*[1 << 20]inlinedCall)(inldata) 777 ix := pcdatavalue(f, _PCDATA_InlTreeIndex, pc, nil) 778 if ix >= 0 { 779 name = funcnameFromNameoff(f, inltree[ix].func_) 780 } 781 } 782 file, line := funcline(f, pc) 783 if name == "runtime.gopanic" { 784 name = "panic" 785 } 786 print(name, "(...)\n") 787 print("\t", file, ":", line) 788 if pc > f.entry { 789 print(" +", hex(pc-f.entry)) 790 } 791 print("\n") 792 } 793 794 func callers(skip int, pcbuf []uintptr) int { 795 sp := getcallersp() 796 pc := getcallerpc() 797 gp := getg() 798 var n int 799 systemstack(func() { 800 n = gentraceback(pc, sp, 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0) 801 }) 802 return n 803 } 804 805 func gcallers(gp *g, skip int, pcbuf []uintptr) int { 806 return gentraceback(^uintptr(0), ^uintptr(0), 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0) 807 } 808 809 // showframe reports whether the frame with the given characteristics should 810 // be printed during a traceback. 811 func showframe(f funcInfo, gp *g, firstFrame bool, funcID, childID funcID) bool { 812 g := getg() 813 if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) { 814 return true 815 } 816 return showfuncinfo(f, firstFrame, funcID, childID) 817 } 818 819 // showfuncinfo reports whether a function with the given characteristics should 820 // be printed during a traceback. 821 func showfuncinfo(f funcInfo, firstFrame bool, funcID, childID funcID) bool { 822 // Note that f may be a synthesized funcInfo for an inlined 823 // function, in which case only nameoff and funcID are set. 824 825 level, _, _ := gotraceback() 826 if level > 1 { 827 // Show all frames. 828 return true 829 } 830 831 if !f.valid() { 832 return false 833 } 834 835 if funcID == funcID_wrapper && elideWrapperCalling(childID) { 836 return false 837 } 838 839 name := funcname(f) 840 841 // Special case: always show runtime.gopanic frame 842 // in the middle of a stack trace, so that we can 843 // see the boundary between ordinary code and 844 // panic-induced deferred code. 845 // See golang.org/issue/5832. 846 if name == "runtime.gopanic" && !firstFrame { 847 return true 848 } 849 850 return bytealg.IndexByteString(name, '.') >= 0 && (!hasPrefix(name, "runtime.") || isExportedRuntime(name)) 851 } 852 853 // isExportedRuntime reports whether name is an exported runtime function. 854 // It is only for runtime functions, so ASCII A-Z is fine. 855 func isExportedRuntime(name string) bool { 856 const n = len("runtime.") 857 return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z' 858 } 859 860 // elideWrapperCalling reports whether a wrapper function that called 861 // function id should be elided from stack traces. 862 func elideWrapperCalling(id funcID) bool { 863 // If the wrapper called a panic function instead of the 864 // wrapped function, we want to include it in stacks. 865 return !(id == funcID_gopanic || id == funcID_sigpanic || id == funcID_panicwrap) 866 } 867 868 var gStatusStrings = [...]string{ 869 _Gidle: "idle", 870 _Grunnable: "runnable", 871 _Grunning: "running", 872 _Gsyscall: "syscall", 873 _Gwaiting: "waiting", 874 _Gdead: "dead", 875 _Gcopystack: "copystack", 876 _Gpreempted: "preempted", 877 } 878 879 func goroutineheader(gp *g) { 880 gpstatus := readgstatus(gp) 881 882 isScan := gpstatus&_Gscan != 0 883 gpstatus &^= _Gscan // drop the scan bit 884 885 // Basic string status 886 var status string 887 if 0 <= gpstatus && gpstatus < uint32(len(gStatusStrings)) { 888 status = gStatusStrings[gpstatus] 889 } else { 890 status = "???" 891 } 892 893 // Override. 894 if gpstatus == _Gwaiting && gp.waitreason != waitReasonZero { 895 status = gp.waitreason.String() 896 } 897 898 // approx time the G is blocked, in minutes 899 var waitfor int64 900 if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 { 901 waitfor = (nanotime() - gp.waitsince) / 60e9 902 } 903 print("goroutine ", gp.goid, " [", status) 904 if isScan { 905 print(" (scan)") 906 } 907 if waitfor >= 1 { 908 print(", ", waitfor, " minutes") 909 } 910 if gp.lockedm != 0 { 911 print(", locked to thread") 912 } 913 print("]:\n") 914 } 915 916 func tracebackothers(me *g) { 917 level, _, _ := gotraceback() 918 919 // Show the current goroutine first, if we haven't already. 920 curgp := getg().m.curg 921 if curgp != nil && curgp != me { 922 print("\n") 923 goroutineheader(curgp) 924 traceback(^uintptr(0), ^uintptr(0), 0, curgp) 925 } 926 927 // We can't take allglock here because this may be during fatal 928 // throw/panic, where locking allglock could be out-of-order or a 929 // direct deadlock. 930 // 931 // Instead, use atomic access to allgs which requires no locking. We 932 // don't lock against concurrent creation of new Gs, but even with 933 // allglock we may miss Gs created after this loop. 934 ptr, length := atomicAllG() 935 for i := uintptr(0); i < length; i++ { 936 gp := atomicAllGIndex(ptr, i) 937 938 if gp == me || gp == curgp || readgstatus(gp) == _Gdead || isSystemGoroutine(gp, false) && level < 2 { 939 continue 940 } 941 print("\n") 942 goroutineheader(gp) 943 // Note: gp.m == g.m occurs when tracebackothers is 944 // called from a signal handler initiated during a 945 // systemstack call. The original G is still in the 946 // running state, and we want to print its stack. 947 if gp.m != getg().m && readgstatus(gp)&^_Gscan == _Grunning { 948 print("\tgoroutine running on other thread; stack unavailable\n") 949 printcreatedby(gp) 950 } else { 951 traceback(^uintptr(0), ^uintptr(0), 0, gp) 952 } 953 } 954 } 955 956 // tracebackHexdump hexdumps part of stk around frame.sp and frame.fp 957 // for debugging purposes. If the address bad is included in the 958 // hexdumped range, it will mark it as well. 959 func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) { 960 const expand = 32 * sys.PtrSize 961 const maxExpand = 256 * sys.PtrSize 962 // Start around frame.sp. 963 lo, hi := frame.sp, frame.sp 964 // Expand to include frame.fp. 965 if frame.fp != 0 && frame.fp < lo { 966 lo = frame.fp 967 } 968 if frame.fp != 0 && frame.fp > hi { 969 hi = frame.fp 970 } 971 // Expand a bit more. 972 lo, hi = lo-expand, hi+expand 973 // But don't go too far from frame.sp. 974 if lo < frame.sp-maxExpand { 975 lo = frame.sp - maxExpand 976 } 977 if hi > frame.sp+maxExpand { 978 hi = frame.sp + maxExpand 979 } 980 // And don't go outside the stack bounds. 981 if lo < stk.lo { 982 lo = stk.lo 983 } 984 if hi > stk.hi { 985 hi = stk.hi 986 } 987 988 // Print the hex dump. 989 print("stack: frame={sp:", hex(frame.sp), ", fp:", hex(frame.fp), "} stack=[", hex(stk.lo), ",", hex(stk.hi), ")\n") 990 hexdumpWords(lo, hi, func(p uintptr) byte { 991 switch p { 992 case frame.fp: 993 return '>' 994 case frame.sp: 995 return '<' 996 case bad: 997 return '!' 998 } 999 return 0 1000 }) 1001 } 1002 1003 // Does f mark the top of a goroutine stack? 1004 func topofstack(f funcInfo, g0 bool) bool { 1005 return f.funcID == funcID_goexit || 1006 f.funcID == funcID_mstart || 1007 f.funcID == funcID_mcall || 1008 f.funcID == funcID_morestack || 1009 f.funcID == funcID_rt0_go || 1010 f.funcID == funcID_externalthreadhandler || 1011 // asmcgocall is TOS on the system stack because it 1012 // switches to the system stack, but in this case we 1013 // can come back to the regular stack and still want 1014 // to be able to unwind through the call that appeared 1015 // on the regular stack. 1016 (g0 && f.funcID == funcID_asmcgocall) 1017 } 1018 1019 // isSystemGoroutine reports whether the goroutine g must be omitted 1020 // in stack dumps and deadlock detector. This is any goroutine that 1021 // starts at a runtime.* entry point, except for runtime.main, 1022 // runtime.handleAsyncEvent (wasm only) and sometimes runtime.runfinq. 1023 // 1024 // If fixed is true, any goroutine that can vary between user and 1025 // system (that is, the finalizer goroutine) is considered a user 1026 // goroutine. 1027 func isSystemGoroutine(gp *g, fixed bool) bool { 1028 // Keep this in sync with cmd/trace/trace.go:isSystemGoroutine. 1029 f := findfunc(gp.startpc) 1030 if !f.valid() { 1031 return false 1032 } 1033 if f.funcID == funcID_runtime_main || f.funcID == funcID_handleAsyncEvent { 1034 return false 1035 } 1036 if f.funcID == funcID_runfinq { 1037 // We include the finalizer goroutine if it's calling 1038 // back into user code. 1039 if fixed { 1040 // This goroutine can vary. In fixed mode, 1041 // always consider it a user goroutine. 1042 return false 1043 } 1044 return !fingRunning 1045 } 1046 return hasPrefix(funcname(f), "runtime.") 1047 } 1048 1049 // SetCgoTraceback records three C functions to use to gather 1050 // traceback information from C code and to convert that traceback 1051 // information into symbolic information. These are used when printing 1052 // stack traces for a program that uses cgo. 1053 // 1054 // The traceback and context functions may be called from a signal 1055 // handler, and must therefore use only async-signal safe functions. 1056 // The symbolizer function may be called while the program is 1057 // crashing, and so must be cautious about using memory. None of the 1058 // functions may call back into Go. 1059 // 1060 // The context function will be called with a single argument, a 1061 // pointer to a struct: 1062 // 1063 // struct { 1064 // Context uintptr 1065 // } 1066 // 1067 // In C syntax, this struct will be 1068 // 1069 // struct { 1070 // uintptr_t Context; 1071 // }; 1072 // 1073 // If the Context field is 0, the context function is being called to 1074 // record the current traceback context. It should record in the 1075 // Context field whatever information is needed about the current 1076 // point of execution to later produce a stack trace, probably the 1077 // stack pointer and PC. In this case the context function will be 1078 // called from C code. 1079 // 1080 // If the Context field is not 0, then it is a value returned by a 1081 // previous call to the context function. This case is called when the 1082 // context is no longer needed; that is, when the Go code is returning 1083 // to its C code caller. This permits the context function to release 1084 // any associated resources. 1085 // 1086 // While it would be correct for the context function to record a 1087 // complete a stack trace whenever it is called, and simply copy that 1088 // out in the traceback function, in a typical program the context 1089 // function will be called many times without ever recording a 1090 // traceback for that context. Recording a complete stack trace in a 1091 // call to the context function is likely to be inefficient. 1092 // 1093 // The traceback function will be called with a single argument, a 1094 // pointer to a struct: 1095 // 1096 // struct { 1097 // Context uintptr 1098 // SigContext uintptr 1099 // Buf *uintptr 1100 // Max uintptr 1101 // } 1102 // 1103 // In C syntax, this struct will be 1104 // 1105 // struct { 1106 // uintptr_t Context; 1107 // uintptr_t SigContext; 1108 // uintptr_t* Buf; 1109 // uintptr_t Max; 1110 // }; 1111 // 1112 // The Context field will be zero to gather a traceback from the 1113 // current program execution point. In this case, the traceback 1114 // function will be called from C code. 1115 // 1116 // Otherwise Context will be a value previously returned by a call to 1117 // the context function. The traceback function should gather a stack 1118 // trace from that saved point in the program execution. The traceback 1119 // function may be called from an execution thread other than the one 1120 // that recorded the context, but only when the context is known to be 1121 // valid and unchanging. The traceback function may also be called 1122 // deeper in the call stack on the same thread that recorded the 1123 // context. The traceback function may be called multiple times with 1124 // the same Context value; it will usually be appropriate to cache the 1125 // result, if possible, the first time this is called for a specific 1126 // context value. 1127 // 1128 // If the traceback function is called from a signal handler on a Unix 1129 // system, SigContext will be the signal context argument passed to 1130 // the signal handler (a C ucontext_t* cast to uintptr_t). This may be 1131 // used to start tracing at the point where the signal occurred. If 1132 // the traceback function is not called from a signal handler, 1133 // SigContext will be zero. 1134 // 1135 // Buf is where the traceback information should be stored. It should 1136 // be PC values, such that Buf[0] is the PC of the caller, Buf[1] is 1137 // the PC of that function's caller, and so on. Max is the maximum 1138 // number of entries to store. The function should store a zero to 1139 // indicate the top of the stack, or that the caller is on a different 1140 // stack, presumably a Go stack. 1141 // 1142 // Unlike runtime.Callers, the PC values returned should, when passed 1143 // to the symbolizer function, return the file/line of the call 1144 // instruction. No additional subtraction is required or appropriate. 1145 // 1146 // On all platforms, the traceback function is invoked when a call from 1147 // Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le, 1148 // and freebsd/amd64, the traceback function is also invoked when a 1149 // signal is received by a thread that is executing a cgo call. The 1150 // traceback function should not make assumptions about when it is 1151 // called, as future versions of Go may make additional calls. 1152 // 1153 // The symbolizer function will be called with a single argument, a 1154 // pointer to a struct: 1155 // 1156 // struct { 1157 // PC uintptr // program counter to fetch information for 1158 // File *byte // file name (NUL terminated) 1159 // Lineno uintptr // line number 1160 // Func *byte // function name (NUL terminated) 1161 // Entry uintptr // function entry point 1162 // More uintptr // set non-zero if more info for this PC 1163 // Data uintptr // unused by runtime, available for function 1164 // } 1165 // 1166 // In C syntax, this struct will be 1167 // 1168 // struct { 1169 // uintptr_t PC; 1170 // char* File; 1171 // uintptr_t Lineno; 1172 // char* Func; 1173 // uintptr_t Entry; 1174 // uintptr_t More; 1175 // uintptr_t Data; 1176 // }; 1177 // 1178 // The PC field will be a value returned by a call to the traceback 1179 // function. 1180 // 1181 // The first time the function is called for a particular traceback, 1182 // all the fields except PC will be 0. The function should fill in the 1183 // other fields if possible, setting them to 0/nil if the information 1184 // is not available. The Data field may be used to store any useful 1185 // information across calls. The More field should be set to non-zero 1186 // if there is more information for this PC, zero otherwise. If More 1187 // is set non-zero, the function will be called again with the same 1188 // PC, and may return different information (this is intended for use 1189 // with inlined functions). If More is zero, the function will be 1190 // called with the next PC value in the traceback. When the traceback 1191 // is complete, the function will be called once more with PC set to 1192 // zero; this may be used to free any information. Each call will 1193 // leave the fields of the struct set to the same values they had upon 1194 // return, except for the PC field when the More field is zero. The 1195 // function must not keep a copy of the struct pointer between calls. 1196 // 1197 // When calling SetCgoTraceback, the version argument is the version 1198 // number of the structs that the functions expect to receive. 1199 // Currently this must be zero. 1200 // 1201 // The symbolizer function may be nil, in which case the results of 1202 // the traceback function will be displayed as numbers. If the 1203 // traceback function is nil, the symbolizer function will never be 1204 // called. The context function may be nil, in which case the 1205 // traceback function will only be called with the context field set 1206 // to zero. If the context function is nil, then calls from Go to C 1207 // to Go will not show a traceback for the C portion of the call stack. 1208 // 1209 // SetCgoTraceback should be called only once, ideally from an init function. 1210 func SetCgoTraceback(version int, traceback, context, symbolizer unsafe.Pointer) { 1211 if version != 0 { 1212 panic("unsupported version") 1213 } 1214 1215 if cgoTraceback != nil && cgoTraceback != traceback || 1216 cgoContext != nil && cgoContext != context || 1217 cgoSymbolizer != nil && cgoSymbolizer != symbolizer { 1218 panic("call SetCgoTraceback only once") 1219 } 1220 1221 cgoTraceback = traceback 1222 cgoContext = context 1223 cgoSymbolizer = symbolizer 1224 1225 // The context function is called when a C function calls a Go 1226 // function. As such it is only called by C code in runtime/cgo. 1227 if _cgo_set_context_function != nil { 1228 cgocall(_cgo_set_context_function, context) 1229 } 1230 } 1231 1232 var cgoTraceback unsafe.Pointer 1233 var cgoContext unsafe.Pointer 1234 var cgoSymbolizer unsafe.Pointer 1235 1236 // cgoTracebackArg is the type passed to cgoTraceback. 1237 type cgoTracebackArg struct { 1238 context uintptr 1239 sigContext uintptr 1240 buf *uintptr 1241 max uintptr 1242 } 1243 1244 // cgoContextArg is the type passed to the context function. 1245 type cgoContextArg struct { 1246 context uintptr 1247 } 1248 1249 // cgoSymbolizerArg is the type passed to cgoSymbolizer. 1250 type cgoSymbolizerArg struct { 1251 pc uintptr 1252 file *byte 1253 lineno uintptr 1254 funcName *byte 1255 entry uintptr 1256 more uintptr 1257 data uintptr 1258 } 1259 1260 // cgoTraceback prints a traceback of callers. 1261 func printCgoTraceback(callers *cgoCallers) { 1262 if cgoSymbolizer == nil { 1263 for _, c := range callers { 1264 if c == 0 { 1265 break 1266 } 1267 print("non-Go function at pc=", hex(c), "\n") 1268 } 1269 return 1270 } 1271 1272 var arg cgoSymbolizerArg 1273 for _, c := range callers { 1274 if c == 0 { 1275 break 1276 } 1277 printOneCgoTraceback(c, 0x7fffffff, &arg) 1278 } 1279 arg.pc = 0 1280 callCgoSymbolizer(&arg) 1281 } 1282 1283 // printOneCgoTraceback prints the traceback of a single cgo caller. 1284 // This can print more than one line because of inlining. 1285 // Returns the number of frames printed. 1286 func printOneCgoTraceback(pc uintptr, max int, arg *cgoSymbolizerArg) int { 1287 c := 0 1288 arg.pc = pc 1289 for c <= max { 1290 callCgoSymbolizer(arg) 1291 if arg.funcName != nil { 1292 // Note that we don't print any argument 1293 // information here, not even parentheses. 1294 // The symbolizer must add that if appropriate. 1295 println(gostringnocopy(arg.funcName)) 1296 } else { 1297 println("non-Go function") 1298 } 1299 print("\t") 1300 if arg.file != nil { 1301 print(gostringnocopy(arg.file), ":", arg.lineno, " ") 1302 } 1303 print("pc=", hex(pc), "\n") 1304 c++ 1305 if arg.more == 0 { 1306 break 1307 } 1308 } 1309 return c 1310 } 1311 1312 // callCgoSymbolizer calls the cgoSymbolizer function. 1313 func callCgoSymbolizer(arg *cgoSymbolizerArg) { 1314 call := cgocall 1315 if panicking > 0 || getg().m.curg != getg() { 1316 // We do not want to call into the scheduler when panicking 1317 // or when on the system stack. 1318 call = asmcgocall 1319 } 1320 if msanenabled { 1321 msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{})) 1322 } 1323 call(cgoSymbolizer, noescape(unsafe.Pointer(arg))) 1324 } 1325 1326 // cgoContextPCs gets the PC values from a cgo traceback. 1327 func cgoContextPCs(ctxt uintptr, buf []uintptr) { 1328 if cgoTraceback == nil { 1329 return 1330 } 1331 call := cgocall 1332 if panicking > 0 || getg().m.curg != getg() { 1333 // We do not want to call into the scheduler when panicking 1334 // or when on the system stack. 1335 call = asmcgocall 1336 } 1337 arg := cgoTracebackArg{ 1338 context: ctxt, 1339 buf: (*uintptr)(noescape(unsafe.Pointer(&buf[0]))), 1340 max: uintptr(len(buf)), 1341 } 1342 if msanenabled { 1343 msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg)) 1344 } 1345 call(cgoTraceback, noescape(unsafe.Pointer(&arg))) 1346 }