github.com/lzhfromustc/gofuzz@v0.0.0-20211116160056-151b3108bbd1/runtime/panic.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // We have two different ways of doing defers. The older way involves creating a 14 // defer record at the time that a defer statement is executing and adding it to a 15 // defer chain. This chain is inspected by the deferreturn call at all function 16 // exits in order to run the appropriate defer calls. A cheaper way (which we call 17 // open-coded defers) is used for functions in which no defer statements occur in 18 // loops. In that case, we simply store the defer function/arg information into 19 // specific stack slots at the point of each defer statement, as well as setting a 20 // bit in a bitmask. At each function exit, we add inline code to directly make 21 // the appropriate defer calls based on the bitmask and fn/arg information stored 22 // on the stack. During panic/Goexit processing, the appropriate defer calls are 23 // made using extra funcdata info that indicates the exact stack slots that 24 // contain the bitmask and defer fn/args. 25 26 // Check to make sure we can really generate a panic. If the panic 27 // was generated from the runtime, or from inside malloc, then convert 28 // to a throw of msg. 29 // pc should be the program counter of the compiler-generated code that 30 // triggered this panic. 31 func panicCheck1(pc uintptr, msg string) { 32 if sys.GoarchWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") { 33 // Note: wasm can't tail call, so we can't get the original caller's pc. 34 throw(msg) 35 } 36 // TODO: is this redundant? How could we be in malloc 37 // but not in the runtime? runtime/internal/*, maybe? 38 gp := getg() 39 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 40 throw(msg) 41 } 42 } 43 44 // Same as above, but calling from the runtime is allowed. 45 // 46 // Using this function is necessary for any panic that may be 47 // generated by runtime.sigpanic, since those are always called by the 48 // runtime. 49 func panicCheck2(err string) { 50 // panic allocates, so to avoid recursive malloc, turn panics 51 // during malloc into throws. 52 gp := getg() 53 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 54 throw(err) 55 } 56 } 57 58 // Many of the following panic entry-points turn into throws when they 59 // happen in various runtime contexts. These should never happen in 60 // the runtime, and if they do, they indicate a serious issue and 61 // should not be caught by user code. 62 // 63 // The panic{Index,Slice,divide,shift} functions are called by 64 // code generated by the compiler for out of bounds index expressions, 65 // out of bounds slice expressions, division by zero, and shift by negative. 66 // The panicdivide (again), panicoverflow, panicfloat, and panicmem 67 // functions are called by the signal handler when a signal occurs 68 // indicating the respective problem. 69 // 70 // Since panic{Index,Slice,shift} are never called directly, and 71 // since the runtime package should never have an out of bounds slice 72 // or array reference or negative shift, if we see those functions called from the 73 // runtime package we turn the panic into a throw. That will dump the 74 // entire runtime stack for easier debugging. 75 // 76 // The entry points called by the signal handler will be called from 77 // runtime.sigpanic, so we can't disallow calls from the runtime to 78 // these (they always look like they're called from the runtime). 79 // Hence, for these, we just check for clearly bad runtime conditions. 80 // 81 // The panic{Index,Slice} functions are implemented in assembly and tail call 82 // to the goPanic{Index,Slice} functions below. This is done so we can use 83 // a space-minimal register calling convention. 84 85 // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) 86 func goPanicIndex(x int, y int) { 87 panicCheck1(getcallerpc(), "index out of range") 88 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) 89 } 90 func goPanicIndexU(x uint, y int) { 91 panicCheck1(getcallerpc(), "index out of range") 92 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) 93 } 94 95 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) 96 func goPanicSliceAlen(x int, y int) { 97 panicCheck1(getcallerpc(), "slice bounds out of range") 98 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) 99 } 100 func goPanicSliceAlenU(x uint, y int) { 101 panicCheck1(getcallerpc(), "slice bounds out of range") 102 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) 103 } 104 func goPanicSliceAcap(x int, y int) { 105 panicCheck1(getcallerpc(), "slice bounds out of range") 106 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) 107 } 108 func goPanicSliceAcapU(x uint, y int) { 109 panicCheck1(getcallerpc(), "slice bounds out of range") 110 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) 111 } 112 113 // failures in the comparisons for s[x:y], 0 <= x <= y 114 func goPanicSliceB(x int, y int) { 115 panicCheck1(getcallerpc(), "slice bounds out of range") 116 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) 117 } 118 func goPanicSliceBU(x uint, y int) { 119 panicCheck1(getcallerpc(), "slice bounds out of range") 120 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) 121 } 122 123 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) 124 func goPanicSlice3Alen(x int, y int) { 125 panicCheck1(getcallerpc(), "slice bounds out of range") 126 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) 127 } 128 func goPanicSlice3AlenU(x uint, y int) { 129 panicCheck1(getcallerpc(), "slice bounds out of range") 130 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) 131 } 132 func goPanicSlice3Acap(x int, y int) { 133 panicCheck1(getcallerpc(), "slice bounds out of range") 134 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) 135 } 136 func goPanicSlice3AcapU(x uint, y int) { 137 panicCheck1(getcallerpc(), "slice bounds out of range") 138 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) 139 } 140 141 // failures in the comparisons for s[:x:y], 0 <= x <= y 142 func goPanicSlice3B(x int, y int) { 143 panicCheck1(getcallerpc(), "slice bounds out of range") 144 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) 145 } 146 func goPanicSlice3BU(x uint, y int) { 147 panicCheck1(getcallerpc(), "slice bounds out of range") 148 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) 149 } 150 151 // failures in the comparisons for s[x:y:], 0 <= x <= y 152 func goPanicSlice3C(x int, y int) { 153 panicCheck1(getcallerpc(), "slice bounds out of range") 154 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) 155 } 156 func goPanicSlice3CU(x uint, y int) { 157 panicCheck1(getcallerpc(), "slice bounds out of range") 158 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) 159 } 160 161 // Implemented in assembly, as they take arguments in registers. 162 // Declared here to mark them as ABIInternal. 163 func panicIndex(x int, y int) 164 func panicIndexU(x uint, y int) 165 func panicSliceAlen(x int, y int) 166 func panicSliceAlenU(x uint, y int) 167 func panicSliceAcap(x int, y int) 168 func panicSliceAcapU(x uint, y int) 169 func panicSliceB(x int, y int) 170 func panicSliceBU(x uint, y int) 171 func panicSlice3Alen(x int, y int) 172 func panicSlice3AlenU(x uint, y int) 173 func panicSlice3Acap(x int, y int) 174 func panicSlice3AcapU(x uint, y int) 175 func panicSlice3B(x int, y int) 176 func panicSlice3BU(x uint, y int) 177 func panicSlice3C(x int, y int) 178 func panicSlice3CU(x uint, y int) 179 180 var shiftError = error(errorString("negative shift amount")) 181 182 func panicshift() { 183 panicCheck1(getcallerpc(), "negative shift amount") 184 panic(shiftError) 185 } 186 187 var divideError = error(errorString("integer divide by zero")) 188 189 func panicdivide() { 190 panicCheck2("integer divide by zero") 191 panic(divideError) 192 } 193 194 var overflowError = error(errorString("integer overflow")) 195 196 func panicoverflow() { 197 panicCheck2("integer overflow") 198 panic(overflowError) 199 } 200 201 var floatError = error(errorString("floating point error")) 202 203 func panicfloat() { 204 panicCheck2("floating point error") 205 panic(floatError) 206 } 207 208 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 209 210 func panicmem() { 211 panicCheck2("invalid memory address or nil pointer dereference") 212 panic(memoryError) 213 } 214 215 func panicmemAddr(addr uintptr) { 216 panicCheck2("invalid memory address or nil pointer dereference") 217 panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr}) 218 } 219 220 // Create a new deferred function fn with siz bytes of arguments. 221 // The compiler turns a defer statement into a call to this. 222 //go:nosplit 223 func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn 224 gp := getg() 225 if gp.m.curg != gp { 226 // go code on the system stack can't defer 227 throw("defer on system stack") 228 } 229 230 // the arguments of fn are in a perilous state. The stack map 231 // for deferproc does not describe them. So we can't let garbage 232 // collection or stack copying trigger until we've copied them out 233 // to somewhere safe. The memmove below does that. 234 // Until the copy completes, we can only call nosplit routines. 235 sp := getcallersp() 236 argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn) 237 callerpc := getcallerpc() 238 239 d := newdefer(siz) 240 if d._panic != nil { 241 throw("deferproc: d.panic != nil after newdefer") 242 } 243 d.link = gp._defer 244 gp._defer = d 245 d.fn = fn 246 d.pc = callerpc 247 d.sp = sp 248 switch siz { 249 case 0: 250 // Do nothing. 251 case sys.PtrSize: 252 *(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp)) 253 default: 254 memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz)) 255 } 256 257 // deferproc returns 0 normally. 258 // a deferred func that stops a panic 259 // makes the deferproc return 1. 260 // the code the compiler generates always 261 // checks the return value and jumps to the 262 // end of the function if deferproc returns != 0. 263 return0() 264 // No code can go here - the C return register has 265 // been set and must not be clobbered. 266 } 267 268 // deferprocStack queues a new deferred function with a defer record on the stack. 269 // The defer record must have its siz and fn fields initialized. 270 // All other fields can contain junk. 271 // The defer record must be immediately followed in memory by 272 // the arguments of the defer. 273 // Nosplit because the arguments on the stack won't be scanned 274 // until the defer record is spliced into the gp._defer list. 275 //go:nosplit 276 func deferprocStack(d *_defer) { 277 gp := getg() 278 if gp.m.curg != gp { 279 // go code on the system stack can't defer 280 throw("defer on system stack") 281 } 282 // siz and fn are already set. 283 // The other fields are junk on entry to deferprocStack and 284 // are initialized here. 285 d.started = false 286 d.heap = false 287 d.openDefer = false 288 d.sp = getcallersp() 289 d.pc = getcallerpc() 290 d.framepc = 0 291 d.varp = 0 292 // The lines below implement: 293 // d.panic = nil 294 // d.fd = nil 295 // d.link = gp._defer 296 // gp._defer = d 297 // But without write barriers. The first three are writes to 298 // the stack so they don't need a write barrier, and furthermore 299 // are to uninitialized memory, so they must not use a write barrier. 300 // The fourth write does not require a write barrier because we 301 // explicitly mark all the defer structures, so we don't need to 302 // keep track of pointers to them with a write barrier. 303 *(*uintptr)(unsafe.Pointer(&d._panic)) = 0 304 *(*uintptr)(unsafe.Pointer(&d.fd)) = 0 305 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) 306 *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) 307 308 return0() 309 // No code can go here - the C return register has 310 // been set and must not be clobbered. 311 } 312 313 // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... 314 // Each P holds a pool for defers with small arg sizes. 315 // Assign defer allocations to pools by rounding to 16, to match malloc size classes. 316 317 const ( 318 deferHeaderSize = unsafe.Sizeof(_defer{}) 319 minDeferAlloc = (deferHeaderSize + 15) &^ 15 320 minDeferArgs = minDeferAlloc - deferHeaderSize 321 ) 322 323 // defer size class for arg size sz 324 //go:nosplit 325 func deferclass(siz uintptr) uintptr { 326 if siz <= minDeferArgs { 327 return 0 328 } 329 return (siz - minDeferArgs + 15) / 16 330 } 331 332 // total size of memory block for defer with arg size sz 333 func totaldefersize(siz uintptr) uintptr { 334 if siz <= minDeferArgs { 335 return minDeferAlloc 336 } 337 return deferHeaderSize + siz 338 } 339 340 // Ensure that defer arg sizes that map to the same defer size class 341 // also map to the same malloc size class. 342 func testdefersizes() { 343 var m [len(p{}.deferpool)]int32 344 345 for i := range m { 346 m[i] = -1 347 } 348 for i := uintptr(0); ; i++ { 349 defersc := deferclass(i) 350 if defersc >= uintptr(len(m)) { 351 break 352 } 353 siz := roundupsize(totaldefersize(i)) 354 if m[defersc] < 0 { 355 m[defersc] = int32(siz) 356 continue 357 } 358 if m[defersc] != int32(siz) { 359 print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") 360 throw("bad defer size class") 361 } 362 } 363 } 364 365 // The arguments associated with a deferred call are stored 366 // immediately after the _defer header in memory. 367 //go:nosplit 368 func deferArgs(d *_defer) unsafe.Pointer { 369 if d.siz == 0 { 370 // Avoid pointer past the defer allocation. 371 return nil 372 } 373 return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) 374 } 375 376 var deferType *_type // type of _defer struct 377 378 func init() { 379 var x interface{} 380 x = (*_defer)(nil) 381 deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem 382 } 383 384 // Allocate a Defer, usually using per-P pool. 385 // Each defer must be released with freedefer. The defer is not 386 // added to any defer chain yet. 387 // 388 // This must not grow the stack because there may be a frame without 389 // stack map information when this is called. 390 // 391 //go:nosplit 392 func newdefer(siz int32) *_defer { 393 var d *_defer 394 sc := deferclass(uintptr(siz)) 395 gp := getg() 396 if sc < uintptr(len(p{}.deferpool)) { 397 pp := gp.m.p.ptr() 398 if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil { 399 // Take the slow path on the system stack so 400 // we don't grow newdefer's stack. 401 systemstack(func() { 402 lock(&sched.deferlock) 403 for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil { 404 d := sched.deferpool[sc] 405 sched.deferpool[sc] = d.link 406 d.link = nil 407 pp.deferpool[sc] = append(pp.deferpool[sc], d) 408 } 409 unlock(&sched.deferlock) 410 }) 411 } 412 if n := len(pp.deferpool[sc]); n > 0 { 413 d = pp.deferpool[sc][n-1] 414 pp.deferpool[sc][n-1] = nil 415 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 416 } 417 } 418 if d == nil { 419 // Allocate new defer+args. 420 systemstack(func() { 421 total := roundupsize(totaldefersize(uintptr(siz))) 422 d = (*_defer)(mallocgc(total, deferType, true)) 423 }) 424 } 425 d.siz = siz 426 d.heap = true 427 return d 428 } 429 430 // Free the given defer. 431 // The defer cannot be used after this call. 432 // 433 // This must not grow the stack because there may be a frame without a 434 // stack map when this is called. 435 // 436 //go:nosplit 437 func freedefer(d *_defer) { 438 if d._panic != nil { 439 freedeferpanic() 440 } 441 if d.fn != nil { 442 freedeferfn() 443 } 444 if !d.heap { 445 return 446 } 447 sc := deferclass(uintptr(d.siz)) 448 if sc >= uintptr(len(p{}.deferpool)) { 449 return 450 } 451 pp := getg().m.p.ptr() 452 if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) { 453 // Transfer half of local cache to the central cache. 454 // 455 // Take this slow path on the system stack so 456 // we don't grow freedefer's stack. 457 systemstack(func() { 458 var first, last *_defer 459 for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 { 460 n := len(pp.deferpool[sc]) 461 d := pp.deferpool[sc][n-1] 462 pp.deferpool[sc][n-1] = nil 463 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 464 if first == nil { 465 first = d 466 } else { 467 last.link = d 468 } 469 last = d 470 } 471 lock(&sched.deferlock) 472 last.link = sched.deferpool[sc] 473 sched.deferpool[sc] = first 474 unlock(&sched.deferlock) 475 }) 476 } 477 478 // These lines used to be simply `*d = _defer{}` but that 479 // started causing a nosplit stack overflow via typedmemmove. 480 d.siz = 0 481 d.started = false 482 d.openDefer = false 483 d.sp = 0 484 d.pc = 0 485 d.framepc = 0 486 d.varp = 0 487 d.fd = nil 488 // d._panic and d.fn must be nil already. 489 // If not, we would have called freedeferpanic or freedeferfn above, 490 // both of which throw. 491 d.link = nil 492 493 pp.deferpool[sc] = append(pp.deferpool[sc], d) 494 } 495 496 // Separate function so that it can split stack. 497 // Windows otherwise runs out of stack space. 498 func freedeferpanic() { 499 // _panic must be cleared before d is unlinked from gp. 500 throw("freedefer with d._panic != nil") 501 } 502 503 func freedeferfn() { 504 // fn must be cleared before d is unlinked from gp. 505 throw("freedefer with d.fn != nil") 506 } 507 508 // Run a deferred function if there is one. 509 // The compiler inserts a call to this at the end of any 510 // function which calls defer. 511 // If there is a deferred function, this will call runtime·jmpdefer, 512 // which will jump to the deferred function such that it appears 513 // to have been called by the caller of deferreturn at the point 514 // just before deferreturn was called. The effect is that deferreturn 515 // is called again and again until there are no more deferred functions. 516 // 517 // Declared as nosplit, because the function should not be preempted once we start 518 // modifying the caller's frame in order to reuse the frame to call the deferred 519 // function. 520 // 521 // The single argument isn't actually used - it just has its address 522 // taken so it can be matched against pending defers. 523 //go:nosplit 524 func deferreturn(arg0 uintptr) { 525 gp := getg() 526 d := gp._defer 527 if d == nil { 528 return 529 } 530 sp := getcallersp() 531 if d.sp != sp { 532 return 533 } 534 if d.openDefer { 535 done := runOpenDeferFrame(gp, d) 536 if !done { 537 throw("unfinished open-coded defers in deferreturn") 538 } 539 gp._defer = d.link 540 freedefer(d) 541 return 542 } 543 544 // Moving arguments around. 545 // 546 // Everything called after this point must be recursively 547 // nosplit because the garbage collector won't know the form 548 // of the arguments until the jmpdefer can flip the PC over to 549 // fn. 550 switch d.siz { 551 case 0: 552 // Do nothing. 553 case sys.PtrSize: 554 *(*uintptr)(unsafe.Pointer(&arg0)) = *(*uintptr)(deferArgs(d)) 555 default: 556 memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz)) 557 } 558 fn := d.fn 559 d.fn = nil 560 gp._defer = d.link 561 freedefer(d) 562 // If the defer function pointer is nil, force the seg fault to happen 563 // here rather than in jmpdefer. gentraceback() throws an error if it is 564 // called with a callback on an LR architecture and jmpdefer is on the 565 // stack, because the stack trace can be incorrect in that case - see 566 // issue #8153). 567 _ = fn.fn 568 jmpdefer(fn, uintptr(unsafe.Pointer(&arg0))) 569 } 570 571 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 572 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 573 // is not a panic, any recover calls in those deferred functions will return nil. 574 // 575 // Calling Goexit from the main goroutine terminates that goroutine 576 // without func main returning. Since func main has not returned, 577 // the program continues execution of other goroutines. 578 // If all other goroutines exit, the program crashes. 579 func Goexit() { 580 // Run all deferred functions for the current goroutine. 581 // This code is similar to gopanic, see that implementation 582 // for detailed comments. 583 gp := getg() 584 585 // Create a panic object for Goexit, so we can recognize when it might be 586 // bypassed by a recover(). 587 var p _panic 588 p.goexit = true 589 p.link = gp._panic 590 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 591 592 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 593 for { 594 d := gp._defer 595 if d == nil { 596 break 597 } 598 if d.started { 599 if d._panic != nil { 600 d._panic.aborted = true 601 d._panic = nil 602 } 603 if !d.openDefer { 604 d.fn = nil 605 gp._defer = d.link 606 freedefer(d) 607 continue 608 } 609 } 610 d.started = true 611 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 612 if d.openDefer { 613 done := runOpenDeferFrame(gp, d) 614 if !done { 615 // We should always run all defers in the frame, 616 // since there is no panic associated with this 617 // defer that can be recovered. 618 throw("unfinished open-coded defers in Goexit") 619 } 620 if p.aborted { 621 // Since our current defer caused a panic and may 622 // have been already freed, just restart scanning 623 // for open-coded defers from this frame again. 624 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 625 } else { 626 addOneOpenDeferFrame(gp, 0, nil) 627 } 628 } else { 629 630 // Save the pc/sp in reflectcallSave(), so we can "recover" back to this 631 // loop if necessary. 632 reflectcallSave(&p, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz)) 633 } 634 if p.aborted { 635 // We had a recursive panic in the defer d we started, and 636 // then did a recover in a defer that was further down the 637 // defer chain than d. In the case of an outstanding Goexit, 638 // we force the recover to return back to this loop. d will 639 // have already been freed if completed, so just continue 640 // immediately to the next defer on the chain. 641 p.aborted = false 642 continue 643 } 644 if gp._defer != d { 645 throw("bad defer entry in Goexit") 646 } 647 d._panic = nil 648 d.fn = nil 649 gp._defer = d.link 650 freedefer(d) 651 // Note: we ignore recovers here because Goexit isn't a panic 652 } 653 goexit1() 654 } 655 656 // Call all Error and String methods before freezing the world. 657 // Used when crashing with panicking. 658 func preprintpanics(p *_panic) { 659 defer func() { 660 if recover() != nil { 661 throw("panic while printing panic value") 662 } 663 }() 664 for p != nil { 665 switch v := p.arg.(type) { 666 case error: 667 p.arg = v.Error() 668 case stringer: 669 p.arg = v.String() 670 } 671 p = p.link 672 } 673 } 674 675 // Print all currently active panics. Used when crashing. 676 // Should only be called after preprintpanics. 677 func printpanics(p *_panic) { 678 if p.link != nil { 679 printpanics(p.link) 680 if !p.link.goexit { 681 print("\t") 682 } 683 } 684 if p.goexit { 685 return 686 } 687 print("panic: ") 688 printany(p.arg) 689 if p.recovered { 690 print(" [recovered]") 691 } 692 print("\n") 693 } 694 695 // addOneOpenDeferFrame scans the stack for the first frame (if any) with 696 // open-coded defers and if it finds one, adds a single record to the defer chain 697 // for that frame. If sp is non-nil, it starts the stack scan from the frame 698 // specified by sp. If sp is nil, it uses the sp from the current defer record 699 // (which has just been finished). Hence, it continues the stack scan from the 700 // frame of the defer that just finished. It skips any frame that already has an 701 // open-coded _defer record, which would have been been created from a previous 702 // (unrecovered) panic. 703 // 704 // Note: All entries of the defer chain (including this new open-coded entry) have 705 // their pointers (including sp) adjusted properly if the stack moves while 706 // running deferred functions. Also, it is safe to pass in the sp arg (which is 707 // the direct result of calling getcallersp()), because all pointer variables 708 // (including arguments) are adjusted as needed during stack copies. 709 func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) { 710 var prevDefer *_defer 711 if sp == nil { 712 prevDefer = gp._defer 713 pc = prevDefer.framepc 714 sp = unsafe.Pointer(prevDefer.sp) 715 } 716 systemstack(func() { 717 gentraceback(pc, uintptr(sp), 0, gp, 0, nil, 0x7fffffff, 718 func(frame *stkframe, unused unsafe.Pointer) bool { 719 if prevDefer != nil && prevDefer.sp == frame.sp { 720 // Skip the frame for the previous defer that 721 // we just finished (and was used to set 722 // where we restarted the stack scan) 723 return true 724 } 725 f := frame.fn 726 fd := funcdata(f, _FUNCDATA_OpenCodedDeferInfo) 727 if fd == nil { 728 return true 729 } 730 // Insert the open defer record in the 731 // chain, in order sorted by sp. 732 d := gp._defer 733 var prev *_defer 734 for d != nil { 735 dsp := d.sp 736 if frame.sp < dsp { 737 break 738 } 739 if frame.sp == dsp { 740 if !d.openDefer { 741 throw("duplicated defer entry") 742 } 743 return true 744 } 745 prev = d 746 d = d.link 747 } 748 if frame.fn.deferreturn == 0 { 749 throw("missing deferreturn") 750 } 751 752 maxargsize, _ := readvarintUnsafe(fd) 753 d1 := newdefer(int32(maxargsize)) 754 d1.openDefer = true 755 d1._panic = nil 756 // These are the pc/sp to set after we've 757 // run a defer in this frame that did a 758 // recover. We return to a special 759 // deferreturn that runs any remaining 760 // defers and then returns from the 761 // function. 762 d1.pc = frame.fn.entry + uintptr(frame.fn.deferreturn) 763 d1.varp = frame.varp 764 d1.fd = fd 765 // Save the SP/PC associated with current frame, 766 // so we can continue stack trace later if needed. 767 d1.framepc = frame.pc 768 d1.sp = frame.sp 769 d1.link = d 770 if prev == nil { 771 gp._defer = d1 772 } else { 773 prev.link = d1 774 } 775 // Stop stack scanning after adding one open defer record 776 return false 777 }, 778 nil, 0) 779 }) 780 } 781 782 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the 783 // uint32 and a pointer to the byte following the varint. 784 // 785 // There is a similar function runtime.readvarint, which takes a slice of bytes, 786 // rather than an unsafe pointer. These functions are duplicated, because one of 787 // the two use cases for the functions would get slower if the functions were 788 // combined. 789 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { 790 var r uint32 791 var shift int 792 for { 793 b := *(*uint8)((unsafe.Pointer(fd))) 794 fd = add(fd, unsafe.Sizeof(b)) 795 if b < 128 { 796 return r + uint32(b)<<shift, fd 797 } 798 r += ((uint32(b) &^ 128) << shift) 799 shift += 7 800 if shift > 28 { 801 panic("Bad varint") 802 } 803 } 804 } 805 806 // runOpenDeferFrame runs the active open-coded defers in the frame specified by 807 // d. It normally processes all active defers in the frame, but stops immediately 808 // if a defer does a successful recover. It returns true if there are no 809 // remaining defers to run in the frame. 810 func runOpenDeferFrame(gp *g, d *_defer) bool { 811 done := true 812 fd := d.fd 813 814 // Skip the maxargsize 815 _, fd = readvarintUnsafe(fd) 816 deferBitsOffset, fd := readvarintUnsafe(fd) 817 nDefers, fd := readvarintUnsafe(fd) 818 deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) 819 820 for i := int(nDefers) - 1; i >= 0; i-- { 821 // read the funcdata info for this defer 822 var argWidth, closureOffset, nArgs uint32 823 argWidth, fd = readvarintUnsafe(fd) 824 closureOffset, fd = readvarintUnsafe(fd) 825 nArgs, fd = readvarintUnsafe(fd) 826 if deferBits&(1<<i) == 0 { 827 for j := uint32(0); j < nArgs; j++ { 828 _, fd = readvarintUnsafe(fd) 829 _, fd = readvarintUnsafe(fd) 830 _, fd = readvarintUnsafe(fd) 831 } 832 continue 833 } 834 closure := *(**funcval)(unsafe.Pointer(d.varp - uintptr(closureOffset))) 835 d.fn = closure 836 deferArgs := deferArgs(d) 837 // If there is an interface receiver or method receiver, it is 838 // described/included as the first arg. 839 for j := uint32(0); j < nArgs; j++ { 840 var argOffset, argLen, argCallOffset uint32 841 argOffset, fd = readvarintUnsafe(fd) 842 argLen, fd = readvarintUnsafe(fd) 843 argCallOffset, fd = readvarintUnsafe(fd) 844 memmove(unsafe.Pointer(uintptr(deferArgs)+uintptr(argCallOffset)), 845 unsafe.Pointer(d.varp-uintptr(argOffset)), 846 uintptr(argLen)) 847 } 848 deferBits = deferBits &^ (1 << i) 849 *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits 850 p := d._panic 851 reflectcallSave(p, unsafe.Pointer(closure), deferArgs, argWidth) 852 if p != nil && p.aborted { 853 break 854 } 855 d.fn = nil 856 // These args are just a copy, so can be cleared immediately 857 memclrNoHeapPointers(deferArgs, uintptr(argWidth)) 858 if d._panic != nil && d._panic.recovered { 859 done = deferBits == 0 860 break 861 } 862 } 863 864 return done 865 } 866 867 // reflectcallSave calls reflectcall after saving the caller's pc and sp in the 868 // panic record. This allows the runtime to return to the Goexit defer processing 869 // loop, in the unusual case where the Goexit may be bypassed by a successful 870 // recover. 871 func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) { 872 if p != nil { 873 p.argp = unsafe.Pointer(getargp(0)) 874 p.pc = getcallerpc() 875 p.sp = unsafe.Pointer(getcallersp()) 876 } 877 reflectcall(nil, fn, arg, argsize, argsize) 878 if p != nil { 879 p.pc = 0 880 p.sp = unsafe.Pointer(nil) 881 } 882 } 883 884 // The implementation of the predeclared function panic. 885 func gopanic(e interface{}) { 886 gp := getg() 887 if gp.m.curg != gp { 888 print("panic: ") 889 printany(e) 890 print("\n") 891 throw("panic on system stack") 892 } 893 894 if gp.m.mallocing != 0 { 895 print("panic: ") 896 printany(e) 897 print("\n") 898 throw("panic during malloc") 899 } 900 if gp.m.preemptoff != "" { 901 print("panic: ") 902 printany(e) 903 print("\n") 904 print("preempt off reason: ") 905 print(gp.m.preemptoff) 906 print("\n") 907 throw("panic during preemptoff") 908 } 909 if gp.m.locks != 0 { 910 print("panic: ") 911 printany(e) 912 print("\n") 913 throw("panic holding locks") 914 } 915 916 var p _panic 917 p.arg = e 918 p.link = gp._panic 919 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 920 921 atomic.Xadd(&runningPanicDefers, 1) 922 923 // By calculating getcallerpc/getcallersp here, we avoid scanning the 924 // gopanic frame (stack scanning is slow...) 925 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 926 927 for { 928 d := gp._defer 929 if d == nil { 930 break 931 } 932 933 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), 934 // take defer off list. An earlier panic will not continue running, but we will make sure below that an 935 // earlier Goexit does continue running. 936 if d.started { 937 if d._panic != nil { 938 d._panic.aborted = true 939 } 940 d._panic = nil 941 if !d.openDefer { 942 // For open-coded defers, we need to process the 943 // defer again, in case there are any other defers 944 // to call in the frame (not including the defer 945 // call that caused the panic). 946 d.fn = nil 947 gp._defer = d.link 948 freedefer(d) 949 continue 950 } 951 } 952 953 // Mark defer as started, but keep on list, so that traceback 954 // can find and update the defer's argument frame if stack growth 955 // or a garbage collection happens before reflectcall starts executing d.fn. 956 d.started = true 957 958 // Record the panic that is running the defer. 959 // If there is a new panic during the deferred call, that panic 960 // will find d in the list and will mark d._panic (this panic) aborted. 961 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 962 963 done := true 964 if d.openDefer { 965 done = runOpenDeferFrame(gp, d) 966 if done && !d._panic.recovered { 967 addOneOpenDeferFrame(gp, 0, nil) 968 } 969 } else { 970 p.argp = unsafe.Pointer(getargp(0)) 971 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 972 } 973 p.argp = nil 974 975 // reflectcall did not panic. Remove d. 976 if gp._defer != d { 977 throw("bad defer entry in panic") 978 } 979 d._panic = nil 980 981 // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic 982 //GC() 983 984 pc := d.pc 985 sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy 986 if done { 987 d.fn = nil 988 gp._defer = d.link 989 freedefer(d) 990 } 991 if p.recovered { 992 gp._panic = p.link 993 if gp._panic != nil && gp._panic.goexit && gp._panic.aborted { 994 // A normal recover would bypass/abort the Goexit. Instead, 995 // we return to the processing loop of the Goexit. 996 gp.sigcode0 = uintptr(gp._panic.sp) 997 gp.sigcode1 = uintptr(gp._panic.pc) 998 mcall(recovery) 999 throw("bypassed recovery failed") // mcall should not return 1000 } 1001 atomic.Xadd(&runningPanicDefers, -1) 1002 1003 // Remove any remaining non-started, open-coded 1004 // defer entries after a recover, since the 1005 // corresponding defers will be executed normally 1006 // (inline). Any such entry will become stale once 1007 // we run the corresponding defers inline and exit 1008 // the associated stack frame. 1009 d := gp._defer 1010 var prev *_defer 1011 if !done { 1012 // Skip our current frame, if not done. It is 1013 // needed to complete any remaining defers in 1014 // deferreturn() 1015 prev = d 1016 d = d.link 1017 } 1018 for d != nil { 1019 if d.started { 1020 // This defer is started but we 1021 // are in the middle of a 1022 // defer-panic-recover inside of 1023 // it, so don't remove it or any 1024 // further defer entries 1025 break 1026 } 1027 if d.openDefer { 1028 if prev == nil { 1029 gp._defer = d.link 1030 } else { 1031 prev.link = d.link 1032 } 1033 newd := d.link 1034 freedefer(d) 1035 d = newd 1036 } else { 1037 prev = d 1038 d = d.link 1039 } 1040 } 1041 1042 gp._panic = p.link 1043 // Aborted panics are marked but remain on the g.panic list. 1044 // Remove them from the list. 1045 for gp._panic != nil && gp._panic.aborted { 1046 gp._panic = gp._panic.link 1047 } 1048 if gp._panic == nil { // must be done with signal 1049 gp.sig = 0 1050 } 1051 // Pass information about recovering frame to recovery. 1052 gp.sigcode0 = uintptr(sp) 1053 gp.sigcode1 = pc 1054 mcall(recovery) 1055 throw("recovery failed") // mcall should not return 1056 } 1057 } 1058 1059 // ran out of deferred calls - old-school panic now 1060 // Because it is unsafe to call arbitrary user code after freezing 1061 // the world, we call preprintpanics to invoke all necessary Error 1062 // and String methods to prepare the panic strings before startpanic. 1063 preprintpanics(gp._panic) 1064 1065 fatalpanic(gp._panic) // should not return 1066 *(*int)(nil) = 0 // not reached 1067 } 1068 1069 // getargp returns the location where the caller 1070 // writes outgoing function call arguments. 1071 //go:nosplit 1072 //go:noinline 1073 func getargp(x int) uintptr { 1074 // x is an argument mainly so that we can return its address. 1075 return uintptr(noescape(unsafe.Pointer(&x))) 1076 } 1077 1078 // The implementation of the predeclared function recover. 1079 // Cannot split the stack because it needs to reliably 1080 // find the stack segment of its caller. 1081 // 1082 // TODO(rsc): Once we commit to CopyStackAlways, 1083 // this doesn't need to be nosplit. 1084 //go:nosplit 1085 func gorecover(argp uintptr) interface{} { 1086 // Must be in a function running as part of a deferred call during the panic. 1087 // Must be called from the topmost function of the call 1088 // (the function used in the defer statement). 1089 // p.argp is the argument pointer of that topmost deferred function call. 1090 // Compare against argp reported by caller. 1091 // If they match, the caller is the one who can recover. 1092 gp := getg() 1093 p := gp._panic 1094 if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { 1095 p.recovered = true 1096 return p.arg 1097 } 1098 return nil 1099 } 1100 1101 //go:linkname sync_throw sync.throw 1102 func sync_throw(s string) { 1103 throw(s) 1104 } 1105 1106 //go:nosplit 1107 func throw(s string) { 1108 // Everything throw does should be recursively nosplit so it 1109 // can be called even when it's unsafe to grow the stack. 1110 systemstack(func() { 1111 print("fatal error: ", s, "\n") 1112 }) 1113 gp := getg() 1114 if gp.m.throwing == 0 { 1115 gp.m.throwing = 1 1116 } 1117 fatalthrow() 1118 *(*int)(nil) = 0 // not reached 1119 } 1120 1121 // runningPanicDefers is non-zero while running deferred functions for panic. 1122 // runningPanicDefers is incremented and decremented atomically. 1123 // This is used to try hard to get a panic stack trace out when exiting. 1124 var runningPanicDefers uint32 1125 1126 // panicking is non-zero when crashing the program for an unrecovered panic. 1127 // panicking is incremented and decremented atomically. 1128 var panicking uint32 1129 1130 // paniclk is held while printing the panic information and stack trace, 1131 // so that two concurrent panics don't overlap their output. 1132 var paniclk mutex 1133 1134 // Unwind the stack after a deferred function calls recover 1135 // after a panic. Then arrange to continue running as though 1136 // the caller of the deferred function returned normally. 1137 func recovery(gp *g) { 1138 // Info about defer passed in G struct. 1139 sp := gp.sigcode0 1140 pc := gp.sigcode1 1141 1142 // d's arguments need to be in the stack. 1143 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 1144 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1145 throw("bad recovery") 1146 } 1147 1148 // Make the deferproc for this d return again, 1149 // this time returning 1. The calling function will 1150 // jump to the standard return epilogue. 1151 gp.sched.sp = sp 1152 gp.sched.pc = pc 1153 gp.sched.lr = 0 1154 gp.sched.ret = 1 1155 gogo(&gp.sched) 1156 } 1157 1158 // fatalthrow implements an unrecoverable runtime throw. It freezes the 1159 // system, prints stack traces starting from its caller, and terminates the 1160 // process. 1161 // 1162 //go:nosplit 1163 func fatalthrow() { 1164 pc := getcallerpc() 1165 sp := getcallersp() 1166 gp := getg() 1167 // Switch to the system stack to avoid any stack growth, which 1168 // may make things worse if the runtime is in a bad state. 1169 systemstack(func() { 1170 startpanic_m() 1171 1172 if dopanic_m(gp, pc, sp) { 1173 // crash uses a decent amount of nosplit stack and we're already 1174 // low on stack in throw, so crash on the system stack (unlike 1175 // fatalpanic). 1176 crash() 1177 } 1178 1179 exit(2) 1180 }) 1181 1182 *(*int)(nil) = 0 // not reached 1183 } 1184 1185 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except 1186 // that if msgs != nil, fatalpanic also prints panic messages and decrements 1187 // runningPanicDefers once main is blocked from exiting. 1188 // 1189 //go:nosplit 1190 func fatalpanic(msgs *_panic) { 1191 pc := getcallerpc() 1192 sp := getcallersp() 1193 gp := getg() 1194 var docrash bool 1195 // Switch to the system stack to avoid any stack growth, which 1196 // may make things worse if the runtime is in a bad state. 1197 systemstack(func() { 1198 if startpanic_m() && msgs != nil { 1199 // There were panic messages and startpanic_m 1200 // says it's okay to try to print them. 1201 1202 // startpanic_m set panicking, which will 1203 // block main from exiting, so now OK to 1204 // decrement runningPanicDefers. 1205 atomic.Xadd(&runningPanicDefers, -1) 1206 1207 printpanics(msgs) 1208 } 1209 1210 docrash = dopanic_m(gp, pc, sp) 1211 }) 1212 1213 if docrash { 1214 // By crashing outside the above systemstack call, debuggers 1215 // will not be confused when generating a backtrace. 1216 // Function crash is marked nosplit to avoid stack growth. 1217 crash() 1218 } 1219 1220 systemstack(func() { 1221 exit(2) 1222 }) 1223 1224 *(*int)(nil) = 0 // not reached 1225 } 1226 1227 // startpanic_m prepares for an unrecoverable panic. 1228 // 1229 // It returns true if panic messages should be printed, or false if 1230 // the runtime is in bad shape and should just print stacks. 1231 // 1232 // It must not have write barriers even though the write barrier 1233 // explicitly ignores writes once dying > 0. Write barriers still 1234 // assume that g.m.p != nil, and this function may not have P 1235 // in some contexts (e.g. a panic in a signal handler for a signal 1236 // sent to an M with no P). 1237 // 1238 //go:nowritebarrierrec 1239 func startpanic_m() bool { 1240 _g_ := getg() 1241 if mheap_.cachealloc.size == 0 { // very early 1242 print("runtime: panic before malloc heap initialized\n") 1243 } 1244 // Disallow malloc during an unrecoverable panic. A panic 1245 // could happen in a signal handler, or in a throw, or inside 1246 // malloc itself. We want to catch if an allocation ever does 1247 // happen (even if we're not in one of these situations). 1248 _g_.m.mallocing++ 1249 1250 // If we're dying because of a bad lock count, set it to a 1251 // good lock count so we don't recursively panic below. 1252 if _g_.m.locks < 0 { 1253 _g_.m.locks = 1 1254 } 1255 1256 switch _g_.m.dying { 1257 case 0: 1258 // Setting dying >0 has the side-effect of disabling this G's writebuf. 1259 _g_.m.dying = 1 1260 atomic.Xadd(&panicking, 1) 1261 lock(&paniclk) 1262 if debug.schedtrace > 0 || debug.scheddetail > 0 { 1263 schedtrace(true) 1264 } 1265 freezetheworld() 1266 return true 1267 case 1: 1268 // Something failed while panicking. 1269 // Just print a stack trace and exit. 1270 _g_.m.dying = 2 1271 print("panic during panic\n") 1272 return false 1273 case 2: 1274 // This is a genuine bug in the runtime, we couldn't even 1275 // print the stack trace successfully. 1276 _g_.m.dying = 3 1277 print("stack trace unavailable\n") 1278 exit(4) 1279 fallthrough 1280 default: 1281 // Can't even print! Just exit. 1282 exit(5) 1283 return false // Need to return something. 1284 } 1285 } 1286 1287 var didothers bool 1288 var deadlock mutex 1289 1290 func dopanic_m(gp *g, pc, sp uintptr) bool { 1291 if gp.sig != 0 { 1292 signame := signame(gp.sig) 1293 if signame != "" { 1294 print("[signal ", signame) 1295 } else { 1296 print("[signal ", hex(gp.sig)) 1297 } 1298 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 1299 } 1300 1301 level, all, docrash := gotraceback() 1302 _g_ := getg() 1303 if level > 0 { 1304 if gp != gp.m.curg { 1305 all = true 1306 } 1307 if gp != gp.m.g0 { 1308 print("\n") 1309 goroutineheader(gp) 1310 traceback(pc, sp, 0, gp) 1311 } else if level >= 2 || _g_.m.throwing > 0 { 1312 print("\nruntime stack:\n") 1313 traceback(pc, sp, 0, gp) 1314 } 1315 if !didothers && all { 1316 didothers = true 1317 tracebackothers(gp) 1318 } 1319 } 1320 unlock(&paniclk) 1321 1322 if atomic.Xadd(&panicking, -1) != 0 { 1323 // Some other m is panicking too. 1324 // Let it print what it needs to print. 1325 // Wait forever without chewing up cpu. 1326 // It will exit when it's done. 1327 lock(&deadlock) 1328 lock(&deadlock) 1329 } 1330 1331 printDebugLog() 1332 1333 return docrash 1334 } 1335 1336 // canpanic returns false if a signal should throw instead of 1337 // panicking. 1338 // 1339 //go:nosplit 1340 func canpanic(gp *g) bool { 1341 // Note that g is m->gsignal, different from gp. 1342 // Note also that g->m can change at preemption, so m can go stale 1343 // if this function ever makes a function call. 1344 _g_ := getg() 1345 _m_ := _g_.m 1346 1347 // Is it okay for gp to panic instead of crashing the program? 1348 // Yes, as long as it is running Go code, not runtime code, 1349 // and not stuck in a system call. 1350 if gp == nil || gp != _m_.curg { 1351 return false 1352 } 1353 if _m_.locks != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 { 1354 return false 1355 } 1356 status := readgstatus(gp) 1357 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 1358 return false 1359 } 1360 if GOOS == "windows" && _m_.libcallsp != 0 { 1361 return false 1362 } 1363 return true 1364 } 1365 1366 // shouldPushSigpanic reports whether pc should be used as sigpanic's 1367 // return PC (pushing a frame for the call). Otherwise, it should be 1368 // left alone so that LR is used as sigpanic's return PC, effectively 1369 // replacing the top-most frame with sigpanic. This is used by 1370 // preparePanic. 1371 func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { 1372 if pc == 0 { 1373 // Probably a call to a nil func. The old LR is more 1374 // useful in the stack trace. Not pushing the frame 1375 // will make the trace look like a call to sigpanic 1376 // instead. (Otherwise the trace will end at sigpanic 1377 // and we won't get to see who faulted.) 1378 return false 1379 } 1380 // If we don't recognize the PC as code, but we do recognize 1381 // the link register as code, then this assumes the panic was 1382 // caused by a call to non-code. In this case, we want to 1383 // ignore this call to make unwinding show the context. 1384 // 1385 // If we running C code, we're not going to recognize pc as a 1386 // Go function, so just assume it's good. Otherwise, traceback 1387 // may try to read a stale LR that looks like a Go code 1388 // pointer and wander into the woods. 1389 if gp.m.incgo || findfunc(pc).valid() { 1390 // This wasn't a bad call, so use PC as sigpanic's 1391 // return PC. 1392 return true 1393 } 1394 if findfunc(lr).valid() { 1395 // This was a bad call, but the LR is good, so use the 1396 // LR as sigpanic's return PC. 1397 return false 1398 } 1399 // Neither the PC or LR is good. Hopefully pushing a frame 1400 // will work. 1401 return true 1402 } 1403 1404 // isAbortPC reports whether pc is the program counter at which 1405 // runtime.abort raises a signal. 1406 // 1407 // It is nosplit because it's part of the isgoexception 1408 // implementation. 1409 // 1410 //go:nosplit 1411 func isAbortPC(pc uintptr) bool { 1412 return pc == funcPC(abort) || ((GOARCH == "arm" || GOARCH == "arm64") && pc == funcPC(abort)+sys.PCQuantum) 1413 }