github.com/hikaru7719/go@v0.0.0-20181025140707-c8b2ac68906a/src/runtime/traceback.go (about) 1 // Copyright 2009 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // The code in this file implements stack trace walking for all architectures. 14 // The most important fact about a given architecture is whether it uses a link register. 15 // On systems with link registers, the prologue for a non-leaf function stores the 16 // incoming value of LR at the bottom of the newly allocated stack frame. 17 // On systems without link registers, the architecture pushes a return PC during 18 // the call instruction, so the return PC ends up above the stack frame. 19 // In this file, the return PC is always called LR, no matter how it was found. 20 // 21 // To date, the opposite of a link register architecture is an x86 architecture. 22 // This code may need to change if some other kind of non-link-register 23 // architecture comes along. 24 // 25 // The other important fact is the size of a pointer: on 32-bit systems the LR 26 // takes up only 4 bytes on the stack, while on 64-bit systems it takes up 8 bytes. 27 // Typically this is ptrSize. 28 // 29 // As an exception, amd64p32 has ptrSize == 4 but the CALL instruction still 30 // stores an 8-byte return PC onto the stack. To accommodate this, we use regSize 31 // as the size of the architecture-pushed return PC. 32 // 33 // usesLR is defined below in terms of minFrameSize, which is defined in 34 // arch_$GOARCH.go. ptrSize and regSize are defined in stubs.go. 35 36 const usesLR = sys.MinFrameSize > 0 37 38 var skipPC uintptr 39 40 func tracebackinit() { 41 // Go variable initialization happens late during runtime startup. 42 // Instead of initializing the variables above in the declarations, 43 // schedinit calls this function so that the variables are 44 // initialized and available earlier in the startup sequence. 45 skipPC = funcPC(skipPleaseUseCallersFrames) 46 } 47 48 // Traceback over the deferred function calls. 49 // Report them like calls that have been invoked but not started executing yet. 50 func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer) { 51 var frame stkframe 52 for d := gp._defer; d != nil; d = d.link { 53 fn := d.fn 54 if fn == nil { 55 // Defer of nil function. Args don't matter. 56 frame.pc = 0 57 frame.fn = funcInfo{} 58 frame.argp = 0 59 frame.arglen = 0 60 frame.argmap = nil 61 } else { 62 frame.pc = fn.fn 63 f := findfunc(frame.pc) 64 if !f.valid() { 65 print("runtime: unknown pc in defer ", hex(frame.pc), "\n") 66 throw("unknown pc") 67 } 68 frame.fn = f 69 frame.argp = uintptr(deferArgs(d)) 70 var ok bool 71 frame.arglen, frame.argmap, ok = getArgInfoFast(f, true) 72 if !ok { 73 frame.arglen, frame.argmap = getArgInfo(&frame, f, true, fn) 74 } 75 } 76 frame.continpc = frame.pc 77 if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) { 78 return 79 } 80 } 81 } 82 83 const sizeofSkipFunction = 256 84 85 // This function is defined in asm.s to be sizeofSkipFunction bytes long. 86 func skipPleaseUseCallersFrames() 87 88 // Generic traceback. Handles runtime stack prints (pcbuf == nil), 89 // the runtime.Callers function (pcbuf != nil), as well as the garbage 90 // collector (callback != nil). A little clunky to merge these, but avoids 91 // duplicating the code and all its subtlety. 92 // 93 // The skip argument is only valid with pcbuf != nil and counts the number 94 // of logical frames to skip rather than physical frames (with inlining, a 95 // PC in pcbuf can represent multiple calls). If a PC is partially skipped 96 // and max > 1, pcbuf[1] will be runtime.skipPleaseUseCallersFrames+N where 97 // N indicates the number of logical frames to skip in pcbuf[0]. 98 func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int { 99 if skip > 0 && callback != nil { 100 throw("gentraceback callback cannot be used with non-zero skip") 101 } 102 103 // Don't call this "g"; it's too easy get "g" and "gp" confused. 104 if ourg := getg(); ourg == gp && ourg == ourg.m.curg { 105 // The starting sp has been passed in as a uintptr, and the caller may 106 // have other uintptr-typed stack references as well. 107 // If during one of the calls that got us here or during one of the 108 // callbacks below the stack must be grown, all these uintptr references 109 // to the stack will not be updated, and gentraceback will continue 110 // to inspect the old stack memory, which may no longer be valid. 111 // Even if all the variables were updated correctly, it is not clear that 112 // we want to expose a traceback that begins on one stack and ends 113 // on another stack. That could confuse callers quite a bit. 114 // Instead, we require that gentraceback and any other function that 115 // accepts an sp for the current goroutine (typically obtained by 116 // calling getcallersp) must not run on that goroutine's stack but 117 // instead on the g0 stack. 118 throw("gentraceback cannot trace user goroutine on its own stack") 119 } 120 level, _, _ := gotraceback() 121 122 if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp. 123 if gp.syscallsp != 0 { 124 pc0 = gp.syscallpc 125 sp0 = gp.syscallsp 126 if usesLR { 127 lr0 = 0 128 } 129 } else { 130 pc0 = gp.sched.pc 131 sp0 = gp.sched.sp 132 if usesLR { 133 lr0 = gp.sched.lr 134 } 135 } 136 } 137 138 nprint := 0 139 var frame stkframe 140 frame.pc = pc0 141 frame.sp = sp0 142 if usesLR { 143 frame.lr = lr0 144 } 145 waspanic := false 146 cgoCtxt := gp.cgoCtxt 147 printing := pcbuf == nil && callback == nil 148 _defer := gp._defer 149 elideWrapper := false 150 151 for _defer != nil && _defer.sp == _NoArgs { 152 _defer = _defer.link 153 } 154 155 // If the PC is zero, it's likely a nil function call. 156 // Start in the caller's frame. 157 if frame.pc == 0 { 158 if usesLR { 159 frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp)) 160 frame.lr = 0 161 } else { 162 frame.pc = uintptr(*(*sys.Uintreg)(unsafe.Pointer(frame.sp))) 163 frame.sp += sys.RegSize 164 } 165 } 166 167 f := findfunc(frame.pc) 168 if !f.valid() { 169 if callback != nil || printing { 170 print("runtime: unknown pc ", hex(frame.pc), "\n") 171 tracebackHexdump(gp.stack, &frame, 0) 172 } 173 if callback != nil { 174 throw("unknown pc") 175 } 176 return 0 177 } 178 frame.fn = f 179 180 var cache pcvalueCache 181 182 n := 0 183 for n < max { 184 // Typically: 185 // pc is the PC of the running function. 186 // sp is the stack pointer at that program counter. 187 // fp is the frame pointer (caller's stack pointer) at that program counter, or nil if unknown. 188 // stk is the stack containing sp. 189 // The caller's program counter is lr, unless lr is zero, in which case it is *(uintptr*)sp. 190 f = frame.fn 191 if f.pcsp == 0 { 192 // No frame information, must be external function, like race support. 193 // See golang.org/issue/13568. 194 break 195 } 196 197 // Found an actual function. 198 // Derive frame pointer and link register. 199 if frame.fp == 0 { 200 // Jump over system stack transitions. If we're on g0 and there's a user 201 // goroutine, try to jump. Otherwise this is a regular call. 202 if flags&_TraceJumpStack != 0 && gp == gp.m.g0 && gp.m.curg != nil { 203 switch f.funcID { 204 case funcID_morestack: 205 // morestack does not return normally -- newstack() 206 // gogo's to curg.sched. Match that. 207 // This keeps morestack() from showing up in the backtrace, 208 // but that makes some sense since it'll never be returned 209 // to. 210 frame.pc = gp.m.curg.sched.pc 211 frame.fn = findfunc(frame.pc) 212 f = frame.fn 213 frame.sp = gp.m.curg.sched.sp 214 cgoCtxt = gp.m.curg.cgoCtxt 215 case funcID_systemstack: 216 // systemstack returns normally, so just follow the 217 // stack transition. 218 frame.sp = gp.m.curg.sched.sp 219 cgoCtxt = gp.m.curg.cgoCtxt 220 } 221 } 222 frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &cache)) 223 if !usesLR { 224 // On x86, call instruction pushes return PC before entering new function. 225 frame.fp += sys.RegSize 226 } 227 } 228 var flr funcInfo 229 if topofstack(f, gp.m != nil && gp == gp.m.g0) { 230 frame.lr = 0 231 flr = funcInfo{} 232 } else if usesLR && f.funcID == funcID_jmpdefer { 233 // jmpdefer modifies SP/LR/PC non-atomically. 234 // If a profiling interrupt arrives during jmpdefer, 235 // the stack unwind may see a mismatched register set 236 // and get confused. Stop if we see PC within jmpdefer 237 // to avoid that confusion. 238 // See golang.org/issue/8153. 239 if callback != nil { 240 throw("traceback_arm: found jmpdefer when tracing with callback") 241 } 242 frame.lr = 0 243 } else { 244 var lrPtr uintptr 245 if usesLR { 246 if n == 0 && frame.sp < frame.fp || frame.lr == 0 { 247 lrPtr = frame.sp 248 frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr)) 249 } 250 } else { 251 if frame.lr == 0 { 252 lrPtr = frame.fp - sys.RegSize 253 frame.lr = uintptr(*(*sys.Uintreg)(unsafe.Pointer(lrPtr))) 254 } 255 } 256 flr = findfunc(frame.lr) 257 if !flr.valid() { 258 // This happens if you get a profiling interrupt at just the wrong time. 259 // In that context it is okay to stop early. 260 // But if callback is set, we're doing a garbage collection and must 261 // get everything, so crash loudly. 262 doPrint := printing 263 if doPrint && gp.m.incgo && f.funcID == funcID_sigpanic { 264 // We can inject sigpanic 265 // calls directly into C code, 266 // in which case we'll see a C 267 // return PC. Don't complain. 268 doPrint = false 269 } 270 if callback != nil || doPrint { 271 print("runtime: unexpected return pc for ", funcname(f), " called from ", hex(frame.lr), "\n") 272 tracebackHexdump(gp.stack, &frame, lrPtr) 273 } 274 if callback != nil { 275 throw("unknown caller pc") 276 } 277 } 278 } 279 280 frame.varp = frame.fp 281 if !usesLR { 282 // On x86, call instruction pushes return PC before entering new function. 283 frame.varp -= sys.RegSize 284 } 285 286 // If framepointer_enabled and there's a frame, then 287 // there's a saved bp here. 288 if frame.varp > frame.sp && (framepointer_enabled && GOARCH == "amd64" || GOARCH == "arm64") { 289 frame.varp -= sys.RegSize 290 } 291 292 // Derive size of arguments. 293 // Most functions have a fixed-size argument block, 294 // so we can use metadata about the function f. 295 // Not all, though: there are some variadic functions 296 // in package runtime and reflect, and for those we use call-specific 297 // metadata recorded by f's caller. 298 if callback != nil || printing { 299 frame.argp = frame.fp + sys.MinFrameSize 300 var ok bool 301 frame.arglen, frame.argmap, ok = getArgInfoFast(f, callback != nil) 302 if !ok { 303 frame.arglen, frame.argmap = getArgInfo(&frame, f, callback != nil, nil) 304 } 305 } 306 307 // Determine frame's 'continuation PC', where it can continue. 308 // Normally this is the return address on the stack, but if sigpanic 309 // is immediately below this function on the stack, then the frame 310 // stopped executing due to a trap, and frame.pc is probably not 311 // a safe point for looking up liveness information. In this panicking case, 312 // the function either doesn't return at all (if it has no defers or if the 313 // defers do not recover) or it returns from one of the calls to 314 // deferproc a second time (if the corresponding deferred func recovers). 315 // In the latter case, use a deferreturn call site as the continuation pc. 316 frame.continpc = frame.pc 317 if waspanic { 318 // We match up defers with frames using the SP. 319 // However, if the function has an empty stack 320 // frame, then it's possible (on LR machines) 321 // for multiple call frames to have the same 322 // SP. But, since a function with no frame 323 // can't push a defer, the defer can't belong 324 // to that frame. 325 if _defer != nil && _defer.sp == frame.sp && frame.sp != frame.fp { 326 frame.continpc = frame.fn.entry + uintptr(frame.fn.deferreturn) + 1 327 // Note: the +1 is to offset the -1 that 328 // stack.go:getStackMap does to back up a return 329 // address make sure the pc is in the CALL instruction. 330 } else { 331 frame.continpc = 0 332 } 333 } 334 335 // Unwind our local defer stack past this frame. 336 for _defer != nil && ((_defer.sp == frame.sp && frame.sp != frame.fp) || _defer.sp == _NoArgs) { 337 _defer = _defer.link 338 } 339 340 if callback != nil { 341 if !callback((*stkframe)(noescape(unsafe.Pointer(&frame))), v) { 342 return n 343 } 344 } 345 346 if pcbuf != nil { 347 if skip == 0 { 348 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = frame.pc 349 } else { 350 // backup to CALL instruction to read inlining info (same logic as below) 351 tracepc := frame.pc 352 if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic { 353 tracepc-- 354 } 355 inldata := funcdata(f, _FUNCDATA_InlTree) 356 357 // no inlining info, skip the physical frame 358 if inldata == nil { 359 skip-- 360 goto skipped 361 } 362 363 ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, &cache) 364 inltree := (*[1 << 20]inlinedCall)(inldata) 365 // skip the logical (inlined) frames 366 logicalSkipped := 0 367 for ix >= 0 && skip > 0 { 368 skip-- 369 logicalSkipped++ 370 ix = inltree[ix].parent 371 } 372 373 // skip the physical frame if there's more to skip 374 if skip > 0 { 375 skip-- 376 goto skipped 377 } 378 379 // now we have a partially skipped frame 380 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = frame.pc 381 382 // if there's room, pcbuf[1] is a skip PC that encodes the number of skipped frames in pcbuf[0] 383 if n+1 < max { 384 n++ 385 pc := skipPC + uintptr(logicalSkipped) 386 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc 387 } 388 } 389 } 390 391 if printing { 392 // assume skip=0 for printing. 393 // 394 // Never elide wrappers if we haven't printed 395 // any frames. And don't elide wrappers that 396 // called panic rather than the wrapped 397 // function. Otherwise, leave them out. 398 name := funcname(f) 399 nextElideWrapper := elideWrapperCalling(name) 400 if (flags&_TraceRuntimeFrames) != 0 || showframe(f, gp, nprint == 0, elideWrapper && nprint != 0) { 401 // Print during crash. 402 // main(0x1, 0x2, 0x3) 403 // /home/rsc/go/src/runtime/x.go:23 +0xf 404 // 405 tracepc := frame.pc // back up to CALL instruction for funcline. 406 if (n > 0 || flags&_TraceTrap == 0) && frame.pc > f.entry && !waspanic { 407 tracepc-- 408 } 409 file, line := funcline(f, tracepc) 410 inldata := funcdata(f, _FUNCDATA_InlTree) 411 if inldata != nil { 412 inltree := (*[1 << 20]inlinedCall)(inldata) 413 ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, nil) 414 for ix != -1 { 415 name := funcnameFromNameoff(f, inltree[ix].func_) 416 print(name, "(...)\n") 417 print("\t", file, ":", line, "\n") 418 419 file = funcfile(f, inltree[ix].file) 420 line = inltree[ix].line 421 ix = inltree[ix].parent 422 } 423 } 424 if name == "runtime.gopanic" { 425 name = "panic" 426 } 427 print(name, "(") 428 argp := (*[100]uintptr)(unsafe.Pointer(frame.argp)) 429 for i := uintptr(0); i < frame.arglen/sys.PtrSize; i++ { 430 if i >= 10 { 431 print(", ...") 432 break 433 } 434 if i != 0 { 435 print(", ") 436 } 437 print(hex(argp[i])) 438 } 439 print(")\n") 440 print("\t", file, ":", line) 441 if frame.pc > f.entry { 442 print(" +", hex(frame.pc-f.entry)) 443 } 444 if gp.m != nil && gp.m.throwing > 0 && gp == gp.m.curg || level >= 2 { 445 print(" fp=", hex(frame.fp), " sp=", hex(frame.sp), " pc=", hex(frame.pc)) 446 } 447 print("\n") 448 nprint++ 449 } 450 elideWrapper = nextElideWrapper 451 } 452 n++ 453 454 skipped: 455 if f.funcID == funcID_cgocallback_gofunc && len(cgoCtxt) > 0 { 456 ctxt := cgoCtxt[len(cgoCtxt)-1] 457 cgoCtxt = cgoCtxt[:len(cgoCtxt)-1] 458 459 // skip only applies to Go frames. 460 // callback != nil only used when we only care 461 // about Go frames. 462 if skip == 0 && callback == nil { 463 n = tracebackCgoContext(pcbuf, printing, ctxt, n, max) 464 } 465 } 466 467 waspanic = f.funcID == funcID_sigpanic 468 469 // Do not unwind past the bottom of the stack. 470 if !flr.valid() { 471 break 472 } 473 474 // Unwind to next frame. 475 frame.fn = flr 476 frame.pc = frame.lr 477 frame.lr = 0 478 frame.sp = frame.fp 479 frame.fp = 0 480 frame.argmap = nil 481 482 // On link register architectures, sighandler saves the LR on stack 483 // before faking a call to sigpanic. 484 if usesLR && waspanic { 485 x := *(*uintptr)(unsafe.Pointer(frame.sp)) 486 frame.sp += sys.MinFrameSize 487 if GOARCH == "arm64" { 488 // arm64 needs 16-byte aligned SP, always 489 frame.sp += sys.PtrSize 490 } 491 f = findfunc(frame.pc) 492 frame.fn = f 493 if !f.valid() { 494 frame.pc = x 495 } else if funcspdelta(f, frame.pc, &cache) == 0 { 496 frame.lr = x 497 } 498 } 499 } 500 501 if printing { 502 n = nprint 503 } 504 505 // If callback != nil, we're being called to gather stack information during 506 // garbage collection or stack growth. In that context, require that we used 507 // up the entire defer stack. If not, then there is a bug somewhere and the 508 // garbage collection or stack growth may not have seen the correct picture 509 // of the stack. Crash now instead of silently executing the garbage collection 510 // or stack copy incorrectly and setting up for a mysterious crash later. 511 // 512 // Note that panic != nil is okay here: there can be leftover panics, 513 // because the defers on the panic stack do not nest in frame order as 514 // they do on the defer stack. If you have: 515 // 516 // frame 1 defers d1 517 // frame 2 defers d2 518 // frame 3 defers d3 519 // frame 4 panics 520 // frame 4's panic starts running defers 521 // frame 5, running d3, defers d4 522 // frame 5 panics 523 // frame 5's panic starts running defers 524 // frame 6, running d4, garbage collects 525 // frame 6, running d2, garbage collects 526 // 527 // During the execution of d4, the panic stack is d4 -> d3, which 528 // is nested properly, and we'll treat frame 3 as resumable, because we 529 // can find d3. (And in fact frame 3 is resumable. If d4 recovers 530 // and frame 5 continues running, d3, d3 can recover and we'll 531 // resume execution in (returning from) frame 3.) 532 // 533 // During the execution of d2, however, the panic stack is d2 -> d3, 534 // which is inverted. The scan will match d2 to frame 2 but having 535 // d2 on the stack until then means it will not match d3 to frame 3. 536 // This is okay: if we're running d2, then all the defers after d2 have 537 // completed and their corresponding frames are dead. Not finding d3 538 // for frame 3 means we'll set frame 3's continpc == 0, which is correct 539 // (frame 3 is dead). At the end of the walk the panic stack can thus 540 // contain defers (d3 in this case) for dead frames. The inversion here 541 // always indicates a dead frame, and the effect of the inversion on the 542 // scan is to hide those dead frames, so the scan is still okay: 543 // what's left on the panic stack are exactly (and only) the dead frames. 544 // 545 // We require callback != nil here because only when callback != nil 546 // do we know that gentraceback is being called in a "must be correct" 547 // context as opposed to a "best effort" context. The tracebacks with 548 // callbacks only happen when everything is stopped nicely. 549 // At other times, such as when gathering a stack for a profiling signal 550 // or when printing a traceback during a crash, everything may not be 551 // stopped nicely, and the stack walk may not be able to complete. 552 // It's okay in those situations not to use up the entire defer stack: 553 // incomplete information then is still better than nothing. 554 if callback != nil && n < max && _defer != nil { 555 if _defer != nil { 556 print("runtime: g", gp.goid, ": leftover defer sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n") 557 } 558 for _defer = gp._defer; _defer != nil; _defer = _defer.link { 559 print("\tdefer ", _defer, " sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n") 560 } 561 throw("traceback has leftover defers") 562 } 563 564 if callback != nil && n < max && frame.sp != gp.stktopsp { 565 print("runtime: g", gp.goid, ": frame.sp=", hex(frame.sp), " top=", hex(gp.stktopsp), "\n") 566 print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "] n=", n, " max=", max, "\n") 567 throw("traceback did not unwind completely") 568 } 569 570 return n 571 } 572 573 // reflectMethodValue is a partial duplicate of reflect.makeFuncImpl 574 // and reflect.methodValue. 575 type reflectMethodValue struct { 576 fn uintptr 577 stack *bitvector // ptrmap for both args and results 578 argLen uintptr // just args 579 } 580 581 // getArgInfoFast returns the argument frame information for a call to f. 582 // It is short and inlineable. However, it does not handle all functions. 583 // If ok reports false, you must call getArgInfo instead. 584 // TODO(josharian): once we do mid-stack inlining, 585 // call getArgInfo directly from getArgInfoFast and stop returning an ok bool. 586 func getArgInfoFast(f funcInfo, needArgMap bool) (arglen uintptr, argmap *bitvector, ok bool) { 587 return uintptr(f.args), nil, !(needArgMap && f.args == _ArgsSizeUnknown) 588 } 589 590 // getArgInfo returns the argument frame information for a call to f 591 // with call frame frame. 592 // 593 // This is used for both actual calls with active stack frames and for 594 // deferred calls that are not yet executing. If this is an actual 595 // call, ctxt must be nil (getArgInfo will retrieve what it needs from 596 // the active stack frame). If this is a deferred call, ctxt must be 597 // the function object that was deferred. 598 func getArgInfo(frame *stkframe, f funcInfo, needArgMap bool, ctxt *funcval) (arglen uintptr, argmap *bitvector) { 599 arglen = uintptr(f.args) 600 if needArgMap && f.args == _ArgsSizeUnknown { 601 // Extract argument bitmaps for reflect stubs from the calls they made to reflect. 602 switch funcname(f) { 603 case "reflect.makeFuncStub", "reflect.methodValueCall": 604 // These take a *reflect.methodValue as their 605 // context register. 606 var mv *reflectMethodValue 607 var retValid bool 608 if ctxt != nil { 609 // This is not an actual call, but a 610 // deferred call. The function value 611 // is itself the *reflect.methodValue. 612 mv = (*reflectMethodValue)(unsafe.Pointer(ctxt)) 613 } else { 614 // This is a real call that took the 615 // *reflect.methodValue as its context 616 // register and immediately saved it 617 // to 0(SP). Get the methodValue from 618 // 0(SP). 619 arg0 := frame.sp + sys.MinFrameSize 620 mv = *(**reflectMethodValue)(unsafe.Pointer(arg0)) 621 // Figure out whether the return values are valid. 622 // Reflect will update this value after it copies 623 // in the return values. 624 retValid = *(*bool)(unsafe.Pointer(arg0 + 3*sys.PtrSize)) 625 } 626 if mv.fn != f.entry { 627 print("runtime: confused by ", funcname(f), "\n") 628 throw("reflect mismatch") 629 } 630 bv := mv.stack 631 arglen = uintptr(bv.n * sys.PtrSize) 632 if !retValid { 633 arglen = uintptr(mv.argLen) &^ (sys.PtrSize - 1) 634 } 635 argmap = bv 636 } 637 } 638 return 639 } 640 641 // tracebackCgoContext handles tracing back a cgo context value, from 642 // the context argument to setCgoTraceback, for the gentraceback 643 // function. It returns the new value of n. 644 func tracebackCgoContext(pcbuf *uintptr, printing bool, ctxt uintptr, n, max int) int { 645 var cgoPCs [32]uintptr 646 cgoContextPCs(ctxt, cgoPCs[:]) 647 var arg cgoSymbolizerArg 648 anySymbolized := false 649 for _, pc := range cgoPCs { 650 if pc == 0 || n >= max { 651 break 652 } 653 if pcbuf != nil { 654 (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = pc 655 } 656 if printing { 657 if cgoSymbolizer == nil { 658 print("non-Go function at pc=", hex(pc), "\n") 659 } else { 660 c := printOneCgoTraceback(pc, max-n, &arg) 661 n += c - 1 // +1 a few lines down 662 anySymbolized = true 663 } 664 } 665 n++ 666 } 667 if anySymbolized { 668 arg.pc = 0 669 callCgoSymbolizer(&arg) 670 } 671 return n 672 } 673 674 func printcreatedby(gp *g) { 675 // Show what created goroutine, except main goroutine (goid 1). 676 pc := gp.gopc 677 f := findfunc(pc) 678 if f.valid() && showframe(f, gp, false, false) && gp.goid != 1 { 679 printcreatedby1(f, pc) 680 } 681 } 682 683 func printcreatedby1(f funcInfo, pc uintptr) { 684 print("created by ", funcname(f), "\n") 685 tracepc := pc // back up to CALL instruction for funcline. 686 if pc > f.entry { 687 tracepc -= sys.PCQuantum 688 } 689 file, line := funcline(f, tracepc) 690 print("\t", file, ":", line) 691 if pc > f.entry { 692 print(" +", hex(pc-f.entry)) 693 } 694 print("\n") 695 } 696 697 func traceback(pc, sp, lr uintptr, gp *g) { 698 traceback1(pc, sp, lr, gp, 0) 699 } 700 701 // tracebacktrap is like traceback but expects that the PC and SP were obtained 702 // from a trap, not from gp->sched or gp->syscallpc/gp->syscallsp or getcallerpc/getcallersp. 703 // Because they are from a trap instead of from a saved pair, 704 // the initial PC must not be rewound to the previous instruction. 705 // (All the saved pairs record a PC that is a return address, so we 706 // rewind it into the CALL instruction.) 707 // If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to 708 // the pc/sp/lr passed in. 709 func tracebacktrap(pc, sp, lr uintptr, gp *g) { 710 if gp.m.libcallsp != 0 { 711 // We're in C code somewhere, traceback from the saved position. 712 traceback1(gp.m.libcallpc, gp.m.libcallsp, 0, gp.m.libcallg.ptr(), 0) 713 return 714 } 715 traceback1(pc, sp, lr, gp, _TraceTrap) 716 } 717 718 func traceback1(pc, sp, lr uintptr, gp *g, flags uint) { 719 // If the goroutine is in cgo, and we have a cgo traceback, print that. 720 if iscgo && gp.m != nil && gp.m.ncgo > 0 && gp.syscallsp != 0 && gp.m.cgoCallers != nil && gp.m.cgoCallers[0] != 0 { 721 // Lock cgoCallers so that a signal handler won't 722 // change it, copy the array, reset it, unlock it. 723 // We are locked to the thread and are not running 724 // concurrently with a signal handler. 725 // We just have to stop a signal handler from interrupting 726 // in the middle of our copy. 727 atomic.Store(&gp.m.cgoCallersUse, 1) 728 cgoCallers := *gp.m.cgoCallers 729 gp.m.cgoCallers[0] = 0 730 atomic.Store(&gp.m.cgoCallersUse, 0) 731 732 printCgoTraceback(&cgoCallers) 733 } 734 735 var n int 736 if readgstatus(gp)&^_Gscan == _Gsyscall { 737 // Override registers if blocked in system call. 738 pc = gp.syscallpc 739 sp = gp.syscallsp 740 flags &^= _TraceTrap 741 } 742 // Print traceback. By default, omits runtime frames. 743 // If that means we print nothing at all, repeat forcing all frames printed. 744 n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags) 745 if n == 0 && (flags&_TraceRuntimeFrames) == 0 { 746 n = gentraceback(pc, sp, lr, gp, 0, nil, _TracebackMaxFrames, nil, nil, flags|_TraceRuntimeFrames) 747 } 748 if n == _TracebackMaxFrames { 749 print("...additional frames elided...\n") 750 } 751 printcreatedby(gp) 752 753 if gp.ancestors == nil { 754 return 755 } 756 for _, ancestor := range *gp.ancestors { 757 printAncestorTraceback(ancestor) 758 } 759 } 760 761 // printAncestorTraceback prints the traceback of the given ancestor. 762 // TODO: Unify this with gentraceback and CallersFrames. 763 func printAncestorTraceback(ancestor ancestorInfo) { 764 print("[originating from goroutine ", ancestor.goid, "]:\n") 765 elideWrapper := false 766 for fidx, pc := range ancestor.pcs { 767 f := findfunc(pc) // f previously validated 768 if showfuncinfo(f, fidx == 0, elideWrapper && fidx != 0) { 769 elideWrapper = printAncestorTracebackFuncInfo(f, pc) 770 } 771 } 772 if len(ancestor.pcs) == _TracebackMaxFrames { 773 print("...additional frames elided...\n") 774 } 775 // Show what created goroutine, except main goroutine (goid 1). 776 f := findfunc(ancestor.gopc) 777 if f.valid() && showfuncinfo(f, false, false) && ancestor.goid != 1 { 778 printcreatedby1(f, ancestor.gopc) 779 } 780 } 781 782 // printAncestorTraceback prints the given function info at a given pc 783 // within an ancestor traceback. The precision of this info is reduced 784 // due to only have access to the pcs at the time of the caller 785 // goroutine being created. 786 func printAncestorTracebackFuncInfo(f funcInfo, pc uintptr) bool { 787 tracepc := pc // back up to CALL instruction for funcline. 788 if pc > f.entry { 789 tracepc -= sys.PCQuantum 790 } 791 file, line := funcline(f, tracepc) 792 inldata := funcdata(f, _FUNCDATA_InlTree) 793 if inldata != nil { 794 inltree := (*[1 << 20]inlinedCall)(inldata) 795 ix := pcdatavalue(f, _PCDATA_InlTreeIndex, tracepc, nil) 796 for ix != -1 { 797 name := funcnameFromNameoff(f, inltree[ix].func_) 798 print(name, "(...)\n") 799 print("\t", file, ":", line, "\n") 800 801 file = funcfile(f, inltree[ix].file) 802 line = inltree[ix].line 803 ix = inltree[ix].parent 804 } 805 } 806 name := funcname(f) 807 if name == "runtime.gopanic" { 808 name = "panic" 809 } 810 print(name, "(...)\n") 811 print("\t", file, ":", line) 812 if pc > f.entry { 813 print(" +", hex(pc-f.entry)) 814 } 815 print("\n") 816 return elideWrapperCalling(name) 817 } 818 819 func callers(skip int, pcbuf []uintptr) int { 820 sp := getcallersp() 821 pc := getcallerpc() 822 gp := getg() 823 var n int 824 systemstack(func() { 825 n = gentraceback(pc, sp, 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0) 826 }) 827 return n 828 } 829 830 func gcallers(gp *g, skip int, pcbuf []uintptr) int { 831 return gentraceback(^uintptr(0), ^uintptr(0), 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0) 832 } 833 834 func showframe(f funcInfo, gp *g, firstFrame, elideWrapper bool) bool { 835 g := getg() 836 if g.m.throwing > 0 && gp != nil && (gp == g.m.curg || gp == g.m.caughtsig.ptr()) { 837 return true 838 } 839 return showfuncinfo(f, firstFrame, elideWrapper) 840 } 841 842 func showfuncinfo(f funcInfo, firstFrame, elideWrapper bool) bool { 843 level, _, _ := gotraceback() 844 if level > 1 { 845 // Show all frames. 846 return true 847 } 848 849 if !f.valid() { 850 return false 851 } 852 853 if elideWrapper { 854 file, _ := funcline(f, f.entry) 855 if file == "<autogenerated>" { 856 return false 857 } 858 } 859 860 name := funcname(f) 861 862 // Special case: always show runtime.gopanic frame 863 // in the middle of a stack trace, so that we can 864 // see the boundary between ordinary code and 865 // panic-induced deferred code. 866 // See golang.org/issue/5832. 867 if name == "runtime.gopanic" && !firstFrame { 868 return true 869 } 870 871 return contains(name, ".") && (!hasPrefix(name, "runtime.") || isExportedRuntime(name)) 872 } 873 874 // isExportedRuntime reports whether name is an exported runtime function. 875 // It is only for runtime functions, so ASCII A-Z is fine. 876 func isExportedRuntime(name string) bool { 877 const n = len("runtime.") 878 return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z' 879 } 880 881 // elideWrapperCalling returns whether a wrapper function that called 882 // function "name" should be elided from stack traces. 883 func elideWrapperCalling(name string) bool { 884 // If the wrapper called a panic function instead of the 885 // wrapped function, we want to include it in stacks. 886 return !(name == "runtime.gopanic" || name == "runtime.sigpanic" || name == "runtime.panicwrap") 887 } 888 889 var gStatusStrings = [...]string{ 890 _Gidle: "idle", 891 _Grunnable: "runnable", 892 _Grunning: "running", 893 _Gsyscall: "syscall", 894 _Gwaiting: "waiting", 895 _Gdead: "dead", 896 _Gcopystack: "copystack", 897 } 898 899 func goroutineheader(gp *g) { 900 gpstatus := readgstatus(gp) 901 902 isScan := gpstatus&_Gscan != 0 903 gpstatus &^= _Gscan // drop the scan bit 904 905 // Basic string status 906 var status string 907 if 0 <= gpstatus && gpstatus < uint32(len(gStatusStrings)) { 908 status = gStatusStrings[gpstatus] 909 } else { 910 status = "???" 911 } 912 913 // Override. 914 if gpstatus == _Gwaiting && gp.waitreason != waitReasonZero { 915 status = gp.waitreason.String() 916 } 917 918 // approx time the G is blocked, in minutes 919 var waitfor int64 920 if (gpstatus == _Gwaiting || gpstatus == _Gsyscall) && gp.waitsince != 0 { 921 waitfor = (nanotime() - gp.waitsince) / 60e9 922 } 923 print("goroutine ", gp.goid, " [", status) 924 if isScan { 925 print(" (scan)") 926 } 927 if waitfor >= 1 { 928 print(", ", waitfor, " minutes") 929 } 930 if gp.lockedm != 0 { 931 print(", locked to thread") 932 } 933 print("]:\n") 934 } 935 936 func tracebackothers(me *g) { 937 level, _, _ := gotraceback() 938 939 // Show the current goroutine first, if we haven't already. 940 g := getg() 941 gp := g.m.curg 942 if gp != nil && gp != me { 943 print("\n") 944 goroutineheader(gp) 945 traceback(^uintptr(0), ^uintptr(0), 0, gp) 946 } 947 948 lock(&allglock) 949 for _, gp := range allgs { 950 if gp == me || gp == g.m.curg || readgstatus(gp) == _Gdead || isSystemGoroutine(gp, false) && level < 2 { 951 continue 952 } 953 print("\n") 954 goroutineheader(gp) 955 // Note: gp.m == g.m occurs when tracebackothers is 956 // called from a signal handler initiated during a 957 // systemstack call. The original G is still in the 958 // running state, and we want to print its stack. 959 if gp.m != g.m && readgstatus(gp)&^_Gscan == _Grunning { 960 print("\tgoroutine running on other thread; stack unavailable\n") 961 printcreatedby(gp) 962 } else { 963 traceback(^uintptr(0), ^uintptr(0), 0, gp) 964 } 965 } 966 unlock(&allglock) 967 } 968 969 // tracebackHexdump hexdumps part of stk around frame.sp and frame.fp 970 // for debugging purposes. If the address bad is included in the 971 // hexdumped range, it will mark it as well. 972 func tracebackHexdump(stk stack, frame *stkframe, bad uintptr) { 973 const expand = 32 * sys.PtrSize 974 const maxExpand = 256 * sys.PtrSize 975 // Start around frame.sp. 976 lo, hi := frame.sp, frame.sp 977 // Expand to include frame.fp. 978 if frame.fp != 0 && frame.fp < lo { 979 lo = frame.fp 980 } 981 if frame.fp != 0 && frame.fp > hi { 982 hi = frame.fp 983 } 984 // Expand a bit more. 985 lo, hi = lo-expand, hi+expand 986 // But don't go too far from frame.sp. 987 if lo < frame.sp-maxExpand { 988 lo = frame.sp - maxExpand 989 } 990 if hi > frame.sp+maxExpand { 991 hi = frame.sp + maxExpand 992 } 993 // And don't go outside the stack bounds. 994 if lo < stk.lo { 995 lo = stk.lo 996 } 997 if hi > stk.hi { 998 hi = stk.hi 999 } 1000 1001 // Print the hex dump. 1002 print("stack: frame={sp:", hex(frame.sp), ", fp:", hex(frame.fp), "} stack=[", hex(stk.lo), ",", hex(stk.hi), ")\n") 1003 hexdumpWords(lo, hi, func(p uintptr) byte { 1004 switch p { 1005 case frame.fp: 1006 return '>' 1007 case frame.sp: 1008 return '<' 1009 case bad: 1010 return '!' 1011 } 1012 return 0 1013 }) 1014 } 1015 1016 // Does f mark the top of a goroutine stack? 1017 func topofstack(f funcInfo, g0 bool) bool { 1018 return f.funcID == funcID_goexit || 1019 f.funcID == funcID_mstart || 1020 f.funcID == funcID_mcall || 1021 f.funcID == funcID_morestack || 1022 f.funcID == funcID_rt0_go || 1023 f.funcID == funcID_externalthreadhandler || 1024 // asmcgocall is TOS on the system stack because it 1025 // switches to the system stack, but in this case we 1026 // can come back to the regular stack and still want 1027 // to be able to unwind through the call that appeared 1028 // on the regular stack. 1029 (g0 && f.funcID == funcID_asmcgocall) 1030 } 1031 1032 // isSystemGoroutine reports whether the goroutine g must be omitted 1033 // in stack dumps and deadlock detector. This is any goroutine that 1034 // starts at a runtime.* entry point, except for runtime.main and 1035 // sometimes runtime.runfinq. 1036 // 1037 // If fixed is true, any goroutine that can vary between user and 1038 // system (that is, the finalizer goroutine) is considered a user 1039 // goroutine. 1040 func isSystemGoroutine(gp *g, fixed bool) bool { 1041 // Keep this in sync with cmd/trace/trace.go:isSystemGoroutine. 1042 f := findfunc(gp.startpc) 1043 if !f.valid() { 1044 return false 1045 } 1046 if f.funcID == funcID_runtime_main { 1047 return false 1048 } 1049 if f.funcID == funcID_runfinq { 1050 // We include the finalizer goroutine if it's calling 1051 // back into user code. 1052 if fixed { 1053 // This goroutine can vary. In fixed mode, 1054 // always consider it a user goroutine. 1055 return false 1056 } 1057 return !fingRunning 1058 } 1059 return hasPrefix(funcname(f), "runtime.") 1060 } 1061 1062 // SetCgoTraceback records three C functions to use to gather 1063 // traceback information from C code and to convert that traceback 1064 // information into symbolic information. These are used when printing 1065 // stack traces for a program that uses cgo. 1066 // 1067 // The traceback and context functions may be called from a signal 1068 // handler, and must therefore use only async-signal safe functions. 1069 // The symbolizer function may be called while the program is 1070 // crashing, and so must be cautious about using memory. None of the 1071 // functions may call back into Go. 1072 // 1073 // The context function will be called with a single argument, a 1074 // pointer to a struct: 1075 // 1076 // struct { 1077 // Context uintptr 1078 // } 1079 // 1080 // In C syntax, this struct will be 1081 // 1082 // struct { 1083 // uintptr_t Context; 1084 // }; 1085 // 1086 // If the Context field is 0, the context function is being called to 1087 // record the current traceback context. It should record in the 1088 // Context field whatever information is needed about the current 1089 // point of execution to later produce a stack trace, probably the 1090 // stack pointer and PC. In this case the context function will be 1091 // called from C code. 1092 // 1093 // If the Context field is not 0, then it is a value returned by a 1094 // previous call to the context function. This case is called when the 1095 // context is no longer needed; that is, when the Go code is returning 1096 // to its C code caller. This permits the context function to release 1097 // any associated resources. 1098 // 1099 // While it would be correct for the context function to record a 1100 // complete a stack trace whenever it is called, and simply copy that 1101 // out in the traceback function, in a typical program the context 1102 // function will be called many times without ever recording a 1103 // traceback for that context. Recording a complete stack trace in a 1104 // call to the context function is likely to be inefficient. 1105 // 1106 // The traceback function will be called with a single argument, a 1107 // pointer to a struct: 1108 // 1109 // struct { 1110 // Context uintptr 1111 // SigContext uintptr 1112 // Buf *uintptr 1113 // Max uintptr 1114 // } 1115 // 1116 // In C syntax, this struct will be 1117 // 1118 // struct { 1119 // uintptr_t Context; 1120 // uintptr_t SigContext; 1121 // uintptr_t* Buf; 1122 // uintptr_t Max; 1123 // }; 1124 // 1125 // The Context field will be zero to gather a traceback from the 1126 // current program execution point. In this case, the traceback 1127 // function will be called from C code. 1128 // 1129 // Otherwise Context will be a value previously returned by a call to 1130 // the context function. The traceback function should gather a stack 1131 // trace from that saved point in the program execution. The traceback 1132 // function may be called from an execution thread other than the one 1133 // that recorded the context, but only when the context is known to be 1134 // valid and unchanging. The traceback function may also be called 1135 // deeper in the call stack on the same thread that recorded the 1136 // context. The traceback function may be called multiple times with 1137 // the same Context value; it will usually be appropriate to cache the 1138 // result, if possible, the first time this is called for a specific 1139 // context value. 1140 // 1141 // If the traceback function is called from a signal handler on a Unix 1142 // system, SigContext will be the signal context argument passed to 1143 // the signal handler (a C ucontext_t* cast to uintptr_t). This may be 1144 // used to start tracing at the point where the signal occurred. If 1145 // the traceback function is not called from a signal handler, 1146 // SigContext will be zero. 1147 // 1148 // Buf is where the traceback information should be stored. It should 1149 // be PC values, such that Buf[0] is the PC of the caller, Buf[1] is 1150 // the PC of that function's caller, and so on. Max is the maximum 1151 // number of entries to store. The function should store a zero to 1152 // indicate the top of the stack, or that the caller is on a different 1153 // stack, presumably a Go stack. 1154 // 1155 // Unlike runtime.Callers, the PC values returned should, when passed 1156 // to the symbolizer function, return the file/line of the call 1157 // instruction. No additional subtraction is required or appropriate. 1158 // 1159 // On all platforms, the traceback function is invoked when a call from 1160 // Go to C to Go requests a stack trace. On linux/amd64, linux/ppc64le, 1161 // and freebsd/amd64, the traceback function is also invoked when a 1162 // signal is received by a thread that is executing a cgo call. The 1163 // traceback function should not make assumptions about when it is 1164 // called, as future versions of Go may make additional calls. 1165 // 1166 // The symbolizer function will be called with a single argument, a 1167 // pointer to a struct: 1168 // 1169 // struct { 1170 // PC uintptr // program counter to fetch information for 1171 // File *byte // file name (NUL terminated) 1172 // Lineno uintptr // line number 1173 // Func *byte // function name (NUL terminated) 1174 // Entry uintptr // function entry point 1175 // More uintptr // set non-zero if more info for this PC 1176 // Data uintptr // unused by runtime, available for function 1177 // } 1178 // 1179 // In C syntax, this struct will be 1180 // 1181 // struct { 1182 // uintptr_t PC; 1183 // char* File; 1184 // uintptr_t Lineno; 1185 // char* Func; 1186 // uintptr_t Entry; 1187 // uintptr_t More; 1188 // uintptr_t Data; 1189 // }; 1190 // 1191 // The PC field will be a value returned by a call to the traceback 1192 // function. 1193 // 1194 // The first time the function is called for a particular traceback, 1195 // all the fields except PC will be 0. The function should fill in the 1196 // other fields if possible, setting them to 0/nil if the information 1197 // is not available. The Data field may be used to store any useful 1198 // information across calls. The More field should be set to non-zero 1199 // if there is more information for this PC, zero otherwise. If More 1200 // is set non-zero, the function will be called again with the same 1201 // PC, and may return different information (this is intended for use 1202 // with inlined functions). If More is zero, the function will be 1203 // called with the next PC value in the traceback. When the traceback 1204 // is complete, the function will be called once more with PC set to 1205 // zero; this may be used to free any information. Each call will 1206 // leave the fields of the struct set to the same values they had upon 1207 // return, except for the PC field when the More field is zero. The 1208 // function must not keep a copy of the struct pointer between calls. 1209 // 1210 // When calling SetCgoTraceback, the version argument is the version 1211 // number of the structs that the functions expect to receive. 1212 // Currently this must be zero. 1213 // 1214 // The symbolizer function may be nil, in which case the results of 1215 // the traceback function will be displayed as numbers. If the 1216 // traceback function is nil, the symbolizer function will never be 1217 // called. The context function may be nil, in which case the 1218 // traceback function will only be called with the context field set 1219 // to zero. If the context function is nil, then calls from Go to C 1220 // to Go will not show a traceback for the C portion of the call stack. 1221 // 1222 // SetCgoTraceback should be called only once, ideally from an init function. 1223 func SetCgoTraceback(version int, traceback, context, symbolizer unsafe.Pointer) { 1224 if version != 0 { 1225 panic("unsupported version") 1226 } 1227 1228 if cgoTraceback != nil && cgoTraceback != traceback || 1229 cgoContext != nil && cgoContext != context || 1230 cgoSymbolizer != nil && cgoSymbolizer != symbolizer { 1231 panic("call SetCgoTraceback only once") 1232 } 1233 1234 cgoTraceback = traceback 1235 cgoContext = context 1236 cgoSymbolizer = symbolizer 1237 1238 // The context function is called when a C function calls a Go 1239 // function. As such it is only called by C code in runtime/cgo. 1240 if _cgo_set_context_function != nil { 1241 cgocall(_cgo_set_context_function, context) 1242 } 1243 } 1244 1245 var cgoTraceback unsafe.Pointer 1246 var cgoContext unsafe.Pointer 1247 var cgoSymbolizer unsafe.Pointer 1248 1249 // cgoTracebackArg is the type passed to cgoTraceback. 1250 type cgoTracebackArg struct { 1251 context uintptr 1252 sigContext uintptr 1253 buf *uintptr 1254 max uintptr 1255 } 1256 1257 // cgoContextArg is the type passed to the context function. 1258 type cgoContextArg struct { 1259 context uintptr 1260 } 1261 1262 // cgoSymbolizerArg is the type passed to cgoSymbolizer. 1263 type cgoSymbolizerArg struct { 1264 pc uintptr 1265 file *byte 1266 lineno uintptr 1267 funcName *byte 1268 entry uintptr 1269 more uintptr 1270 data uintptr 1271 } 1272 1273 // cgoTraceback prints a traceback of callers. 1274 func printCgoTraceback(callers *cgoCallers) { 1275 if cgoSymbolizer == nil { 1276 for _, c := range callers { 1277 if c == 0 { 1278 break 1279 } 1280 print("non-Go function at pc=", hex(c), "\n") 1281 } 1282 return 1283 } 1284 1285 var arg cgoSymbolizerArg 1286 for _, c := range callers { 1287 if c == 0 { 1288 break 1289 } 1290 printOneCgoTraceback(c, 0x7fffffff, &arg) 1291 } 1292 arg.pc = 0 1293 callCgoSymbolizer(&arg) 1294 } 1295 1296 // printOneCgoTraceback prints the traceback of a single cgo caller. 1297 // This can print more than one line because of inlining. 1298 // Returns the number of frames printed. 1299 func printOneCgoTraceback(pc uintptr, max int, arg *cgoSymbolizerArg) int { 1300 c := 0 1301 arg.pc = pc 1302 for { 1303 if c > max { 1304 break 1305 } 1306 callCgoSymbolizer(arg) 1307 if arg.funcName != nil { 1308 // Note that we don't print any argument 1309 // information here, not even parentheses. 1310 // The symbolizer must add that if appropriate. 1311 println(gostringnocopy(arg.funcName)) 1312 } else { 1313 println("non-Go function") 1314 } 1315 print("\t") 1316 if arg.file != nil { 1317 print(gostringnocopy(arg.file), ":", arg.lineno, " ") 1318 } 1319 print("pc=", hex(pc), "\n") 1320 c++ 1321 if arg.more == 0 { 1322 break 1323 } 1324 } 1325 return c 1326 } 1327 1328 // callCgoSymbolizer calls the cgoSymbolizer function. 1329 func callCgoSymbolizer(arg *cgoSymbolizerArg) { 1330 call := cgocall 1331 if panicking > 0 || getg().m.curg != getg() { 1332 // We do not want to call into the scheduler when panicking 1333 // or when on the system stack. 1334 call = asmcgocall 1335 } 1336 if msanenabled { 1337 msanwrite(unsafe.Pointer(arg), unsafe.Sizeof(cgoSymbolizerArg{})) 1338 } 1339 call(cgoSymbolizer, noescape(unsafe.Pointer(arg))) 1340 } 1341 1342 // cgoContextPCs gets the PC values from a cgo traceback. 1343 func cgoContextPCs(ctxt uintptr, buf []uintptr) { 1344 if cgoTraceback == nil { 1345 return 1346 } 1347 call := cgocall 1348 if panicking > 0 || getg().m.curg != getg() { 1349 // We do not want to call into the scheduler when panicking 1350 // or when on the system stack. 1351 call = asmcgocall 1352 } 1353 arg := cgoTracebackArg{ 1354 context: ctxt, 1355 buf: (*uintptr)(noescape(unsafe.Pointer(&buf[0]))), 1356 max: uintptr(len(buf)), 1357 } 1358 if msanenabled { 1359 msanwrite(unsafe.Pointer(&arg), unsafe.Sizeof(arg)) 1360 } 1361 call(cgoTraceback, noescape(unsafe.Pointer(&arg))) 1362 }