github.com/x04/go/src@v0.0.0-20200202162449-3d481ceb3525/runtime/panic.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "github.com/x04/go/src/runtime/internal/atomic" 9 "github.com/x04/go/src/runtime/internal/sys" 10 "github.com/x04/go/src/unsafe" 11 ) 12 13 // We have two different ways of doing defers. The older way involves creating a 14 // defer record at the time that a defer statement is executing and adding it to a 15 // defer chain. This chain is inspected by the deferreturn call at all function 16 // exits in order to run the appropriate defer calls. A cheaper way (which we call 17 // open-coded defers) is used for functions in which no defer statements occur in 18 // loops. In that case, we simply store the defer function/arg information into 19 // specific stack slots at the point of each defer statement, as well as setting a 20 // bit in a bitmask. At each function exit, we add inline code to directly make 21 // the appropriate defer calls based on the bitmask and fn/arg information stored 22 // on the stack. During panic/Goexit processing, the appropriate defer calls are 23 // made using extra funcdata info that indicates the exact stack slots that 24 // contain the bitmask and defer fn/args. 25 26 // Check to make sure we can really generate a panic. If the panic 27 // was generated from the runtime, or from inside malloc, then convert 28 // to a throw of msg. 29 // pc should be the program counter of the compiler-generated code that 30 // triggered this panic. 31 func panicCheck1(pc uintptr, msg string) { 32 if sys.GoarchWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") { 33 // Note: wasm can't tail call, so we can't get the original caller's pc. 34 throw(msg) 35 } 36 // TODO: is this redundant? How could we be in malloc 37 // but not in the runtime? runtime/internal/*, maybe? 38 gp := getg() 39 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 40 throw(msg) 41 } 42 } 43 44 // Same as above, but calling from the runtime is allowed. 45 // 46 // Using this function is necessary for any panic that may be 47 // generated by runtime.sigpanic, since those are always called by the 48 // runtime. 49 func panicCheck2(err string) { 50 // panic allocates, so to avoid recursive malloc, turn panics 51 // during malloc into throws. 52 gp := getg() 53 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 54 throw(err) 55 } 56 } 57 58 // Many of the following panic entry-points turn into throws when they 59 // happen in various runtime contexts. These should never happen in 60 // the runtime, and if they do, they indicate a serious issue and 61 // should not be caught by user code. 62 // 63 // The panic{Index,Slice,divide,shift} functions are called by 64 // code generated by the compiler for out of bounds index expressions, 65 // out of bounds slice expressions, division by zero, and shift by negative. 66 // The panicdivide (again), panicoverflow, panicfloat, and panicmem 67 // functions are called by the signal handler when a signal occurs 68 // indicating the respective problem. 69 // 70 // Since panic{Index,Slice,shift} are never called directly, and 71 // since the runtime package should never have an out of bounds slice 72 // or array reference or negative shift, if we see those functions called from the 73 // runtime package we turn the panic into a throw. That will dump the 74 // entire runtime stack for easier debugging. 75 // 76 // The entry points called by the signal handler will be called from 77 // runtime.sigpanic, so we can't disallow calls from the runtime to 78 // these (they always look like they're called from the runtime). 79 // Hence, for these, we just check for clearly bad runtime conditions. 80 // 81 // The panic{Index,Slice} functions are implemented in assembly and tail call 82 // to the goPanic{Index,Slice} functions below. This is done so we can use 83 // a space-minimal register calling convention. 84 85 // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) 86 func goPanicIndex(x int, y int) { 87 panicCheck1(getcallerpc(), "index out of range") 88 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) 89 } 90 func goPanicIndexU(x uint, y int) { 91 panicCheck1(getcallerpc(), "index out of range") 92 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) 93 } 94 95 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) 96 func goPanicSliceAlen(x int, y int) { 97 panicCheck1(getcallerpc(), "slice bounds out of range") 98 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) 99 } 100 func goPanicSliceAlenU(x uint, y int) { 101 panicCheck1(getcallerpc(), "slice bounds out of range") 102 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) 103 } 104 func goPanicSliceAcap(x int, y int) { 105 panicCheck1(getcallerpc(), "slice bounds out of range") 106 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) 107 } 108 func goPanicSliceAcapU(x uint, y int) { 109 panicCheck1(getcallerpc(), "slice bounds out of range") 110 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) 111 } 112 113 // failures in the comparisons for s[x:y], 0 <= x <= y 114 func goPanicSliceB(x int, y int) { 115 panicCheck1(getcallerpc(), "slice bounds out of range") 116 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) 117 } 118 func goPanicSliceBU(x uint, y int) { 119 panicCheck1(getcallerpc(), "slice bounds out of range") 120 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) 121 } 122 123 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) 124 func goPanicSlice3Alen(x int, y int) { 125 panicCheck1(getcallerpc(), "slice bounds out of range") 126 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) 127 } 128 func goPanicSlice3AlenU(x uint, y int) { 129 panicCheck1(getcallerpc(), "slice bounds out of range") 130 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) 131 } 132 func goPanicSlice3Acap(x int, y int) { 133 panicCheck1(getcallerpc(), "slice bounds out of range") 134 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) 135 } 136 func goPanicSlice3AcapU(x uint, y int) { 137 panicCheck1(getcallerpc(), "slice bounds out of range") 138 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) 139 } 140 141 // failures in the comparisons for s[:x:y], 0 <= x <= y 142 func goPanicSlice3B(x int, y int) { 143 panicCheck1(getcallerpc(), "slice bounds out of range") 144 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) 145 } 146 func goPanicSlice3BU(x uint, y int) { 147 panicCheck1(getcallerpc(), "slice bounds out of range") 148 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) 149 } 150 151 // failures in the comparisons for s[x:y:], 0 <= x <= y 152 func goPanicSlice3C(x int, y int) { 153 panicCheck1(getcallerpc(), "slice bounds out of range") 154 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) 155 } 156 func goPanicSlice3CU(x uint, y int) { 157 panicCheck1(getcallerpc(), "slice bounds out of range") 158 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) 159 } 160 161 // Implemented in assembly, as they take arguments in registers. 162 // Declared here to mark them as ABIInternal. 163 func panicIndex(x int, y int) 164 func panicIndexU(x uint, y int) 165 func panicSliceAlen(x int, y int) 166 func panicSliceAlenU(x uint, y int) 167 func panicSliceAcap(x int, y int) 168 func panicSliceAcapU(x uint, y int) 169 func panicSliceB(x int, y int) 170 func panicSliceBU(x uint, y int) 171 func panicSlice3Alen(x int, y int) 172 func panicSlice3AlenU(x uint, y int) 173 func panicSlice3Acap(x int, y int) 174 func panicSlice3AcapU(x uint, y int) 175 func panicSlice3B(x int, y int) 176 func panicSlice3BU(x uint, y int) 177 func panicSlice3C(x int, y int) 178 func panicSlice3CU(x uint, y int) 179 180 var shiftError = error(errorString("negative shift amount")) 181 182 func panicshift() { 183 panicCheck1(getcallerpc(), "negative shift amount") 184 panic(shiftError) 185 } 186 187 var divideError = error(errorString("integer divide by zero")) 188 189 func panicdivide() { 190 panicCheck2("integer divide by zero") 191 panic(divideError) 192 } 193 194 var overflowError = error(errorString("integer overflow")) 195 196 func panicoverflow() { 197 panicCheck2("integer overflow") 198 panic(overflowError) 199 } 200 201 var floatError = error(errorString("floating point error")) 202 203 func panicfloat() { 204 panicCheck2("floating point error") 205 panic(floatError) 206 } 207 208 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 209 210 func panicmem() { 211 panicCheck2("invalid memory address or nil pointer dereference") 212 panic(memoryError) 213 } 214 215 // Create a new deferred function fn with siz bytes of arguments. 216 // The compiler turns a defer statement into a call to this. 217 //go:nosplit 218 func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn 219 if getg().m.curg != getg() { 220 // go code on the system stack can't defer 221 throw("defer on system stack") 222 } 223 224 // the arguments of fn are in a perilous state. The stack map 225 // for deferproc does not describe them. So we can't let garbage 226 // collection or stack copying trigger until we've copied them out 227 // to somewhere safe. The memmove below does that. 228 // Until the copy completes, we can only call nosplit routines. 229 sp := getcallersp() 230 argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn) 231 callerpc := getcallerpc() 232 233 d := newdefer(siz) 234 if d._panic != nil { 235 throw("deferproc: d.panic != nil after newdefer") 236 } 237 d.fn = fn 238 d.pc = callerpc 239 d.sp = sp 240 switch siz { 241 case 0: 242 // Do nothing. 243 case sys.PtrSize: 244 *(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp)) 245 default: 246 memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz)) 247 } 248 249 // deferproc returns 0 normally. 250 // a deferred func that stops a panic 251 // makes the deferproc return 1. 252 // the code the compiler generates always 253 // checks the return value and jumps to the 254 // end of the function if deferproc returns != 0. 255 return0() 256 // No code can go here - the C return register has 257 // been set and must not be clobbered. 258 } 259 260 // deferprocStack queues a new deferred function with a defer record on the stack. 261 // The defer record must have its siz and fn fields initialized. 262 // All other fields can contain junk. 263 // The defer record must be immediately followed in memory by 264 // the arguments of the defer. 265 // Nosplit because the arguments on the stack won't be scanned 266 // until the defer record is spliced into the gp._defer list. 267 //go:nosplit 268 func deferprocStack(d *_defer) { 269 gp := getg() 270 if gp.m.curg != gp { 271 // go code on the system stack can't defer 272 throw("defer on system stack") 273 } 274 // siz and fn are already set. 275 // The other fields are junk on entry to deferprocStack and 276 // are initialized here. 277 d.started = false 278 d.heap = false 279 d.openDefer = false 280 d.sp = getcallersp() 281 d.pc = getcallerpc() 282 d.framepc = 0 283 d.varp = 0 284 // The lines below implement: 285 // d.panic = nil 286 // d.fd = nil 287 // d.link = gp._defer 288 // gp._defer = d 289 // But without write barriers. The first three are writes to 290 // the stack so they don't need a write barrier, and furthermore 291 // are to uninitialized memory, so they must not use a write barrier. 292 // The fourth write does not require a write barrier because we 293 // explicitly mark all the defer structures, so we don't need to 294 // keep track of pointers to them with a write barrier. 295 *(*uintptr)(unsafe.Pointer(&d._panic)) = 0 296 *(*uintptr)(unsafe.Pointer(&d.fd)) = 0 297 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) 298 *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) 299 300 return0() 301 // No code can go here - the C return register has 302 // been set and must not be clobbered. 303 } 304 305 // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... 306 // Each P holds a pool for defers with small arg sizes. 307 // Assign defer allocations to pools by rounding to 16, to match malloc size classes. 308 309 const ( 310 deferHeaderSize = unsafe.Sizeof(_defer{}) 311 minDeferAlloc = (deferHeaderSize + 15) &^ 15 312 minDeferArgs = minDeferAlloc - deferHeaderSize 313 ) 314 315 // defer size class for arg size sz 316 //go:nosplit 317 func deferclass(siz uintptr) uintptr { 318 if siz <= minDeferArgs { 319 return 0 320 } 321 return (siz - minDeferArgs + 15) / 16 322 } 323 324 // total size of memory block for defer with arg size sz 325 func totaldefersize(siz uintptr) uintptr { 326 if siz <= minDeferArgs { 327 return minDeferAlloc 328 } 329 return deferHeaderSize + siz 330 } 331 332 // Ensure that defer arg sizes that map to the same defer size class 333 // also map to the same malloc size class. 334 func testdefersizes() { 335 var m [len(p{}.deferpool)]int32 336 337 for i := range m { 338 m[i] = -1 339 } 340 for i := uintptr(0); ; i++ { 341 defersc := deferclass(i) 342 if defersc >= uintptr(len(m)) { 343 break 344 } 345 siz := roundupsize(totaldefersize(i)) 346 if m[defersc] < 0 { 347 m[defersc] = int32(siz) 348 continue 349 } 350 if m[defersc] != int32(siz) { 351 print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") 352 throw("bad defer size class") 353 } 354 } 355 } 356 357 // The arguments associated with a deferred call are stored 358 // immediately after the _defer header in memory. 359 //go:nosplit 360 func deferArgs(d *_defer) unsafe.Pointer { 361 if d.siz == 0 { 362 // Avoid pointer past the defer allocation. 363 return nil 364 } 365 return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) 366 } 367 368 var deferType *_type // type of _defer struct 369 370 func init() { 371 var x interface{} 372 x = (*_defer)(nil) 373 deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem 374 } 375 376 // Allocate a Defer, usually using per-P pool. 377 // Each defer must be released with freedefer. 378 // 379 // This must not grow the stack because there may be a frame without 380 // stack map information when this is called. 381 // 382 //go:nosplit 383 func newdefer(siz int32) *_defer { 384 var d *_defer 385 sc := deferclass(uintptr(siz)) 386 gp := getg() 387 if sc < uintptr(len(p{}.deferpool)) { 388 pp := gp.m.p.ptr() 389 if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil { 390 // Take the slow path on the system stack so 391 // we don't grow newdefer's stack. 392 systemstack(func() { 393 lock(&sched.deferlock) 394 for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil { 395 d := sched.deferpool[sc] 396 sched.deferpool[sc] = d.link 397 d.link = nil 398 pp.deferpool[sc] = append(pp.deferpool[sc], d) 399 } 400 unlock(&sched.deferlock) 401 }) 402 } 403 if n := len(pp.deferpool[sc]); n > 0 { 404 d = pp.deferpool[sc][n-1] 405 pp.deferpool[sc][n-1] = nil 406 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 407 } 408 } 409 if d == nil { 410 // Allocate new defer+args. 411 systemstack(func() { 412 total := roundupsize(totaldefersize(uintptr(siz))) 413 d = (*_defer)(mallocgc(total, deferType, true)) 414 }) 415 if debugCachedWork { 416 // Duplicate the tail below so if there's a 417 // crash in checkPut we can tell if d was just 418 // allocated or came from the pool. 419 d.siz = siz 420 d.link = gp._defer 421 gp._defer = d 422 return d 423 } 424 } 425 d.siz = siz 426 d.heap = true 427 d.link = gp._defer 428 gp._defer = d 429 return d 430 } 431 432 // Free the given defer. 433 // The defer cannot be used after this call. 434 // 435 // This must not grow the stack because there may be a frame without a 436 // stack map when this is called. 437 // 438 //go:nosplit 439 func freedefer(d *_defer) { 440 if d._panic != nil { 441 freedeferpanic() 442 } 443 if d.fn != nil { 444 freedeferfn() 445 } 446 if !d.heap { 447 return 448 } 449 sc := deferclass(uintptr(d.siz)) 450 if sc >= uintptr(len(p{}.deferpool)) { 451 return 452 } 453 pp := getg().m.p.ptr() 454 if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) { 455 // Transfer half of local cache to the central cache. 456 // 457 // Take this slow path on the system stack so 458 // we don't grow freedefer's stack. 459 systemstack(func() { 460 var first, last *_defer 461 for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 { 462 n := len(pp.deferpool[sc]) 463 d := pp.deferpool[sc][n-1] 464 pp.deferpool[sc][n-1] = nil 465 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 466 if first == nil { 467 first = d 468 } else { 469 last.link = d 470 } 471 last = d 472 } 473 lock(&sched.deferlock) 474 last.link = sched.deferpool[sc] 475 sched.deferpool[sc] = first 476 unlock(&sched.deferlock) 477 }) 478 } 479 480 // These lines used to be simply `*d = _defer{}` but that 481 // started causing a nosplit stack overflow via typedmemmove. 482 d.siz = 0 483 d.started = false 484 d.openDefer = false 485 d.sp = 0 486 d.pc = 0 487 d.framepc = 0 488 d.varp = 0 489 d.fd = nil 490 // d._panic and d.fn must be nil already. 491 // If not, we would have called freedeferpanic or freedeferfn above, 492 // both of which throw. 493 d.link = nil 494 495 pp.deferpool[sc] = append(pp.deferpool[sc], d) 496 } 497 498 // Separate function so that it can split stack. 499 // Windows otherwise runs out of stack space. 500 func freedeferpanic() { 501 // _panic must be cleared before d is unlinked from gp. 502 throw("freedefer with d._panic != nil") 503 } 504 505 func freedeferfn() { 506 // fn must be cleared before d is unlinked from gp. 507 throw("freedefer with d.fn != nil") 508 } 509 510 // Run a deferred function if there is one. 511 // The compiler inserts a call to this at the end of any 512 // function which calls defer. 513 // If there is a deferred function, this will call runtime·jmpdefer, 514 // which will jump to the deferred function such that it appears 515 // to have been called by the caller of deferreturn at the point 516 // just before deferreturn was called. The effect is that deferreturn 517 // is called again and again until there are no more deferred functions. 518 // 519 // Declared as nosplit, because the function should not be preempted once we start 520 // modifying the caller's frame in order to reuse the frame to call the deferred 521 // function. 522 // 523 // The single argument isn't actually used - it just has its address 524 // taken so it can be matched against pending defers. 525 //go:nosplit 526 func deferreturn(arg0 uintptr) { 527 gp := getg() 528 d := gp._defer 529 if d == nil { 530 return 531 } 532 sp := getcallersp() 533 if d.sp != sp { 534 return 535 } 536 if d.openDefer { 537 done := runOpenDeferFrame(gp, d) 538 if !done { 539 throw("unfinished open-coded defers in deferreturn") 540 } 541 gp._defer = d.link 542 freedefer(d) 543 return 544 } 545 546 // Moving arguments around. 547 // 548 // Everything called after this point must be recursively 549 // nosplit because the garbage collector won't know the form 550 // of the arguments until the jmpdefer can flip the PC over to 551 // fn. 552 switch d.siz { 553 case 0: 554 // Do nothing. 555 case sys.PtrSize: 556 *(*uintptr)(unsafe.Pointer(&arg0)) = *(*uintptr)(deferArgs(d)) 557 default: 558 memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz)) 559 } 560 fn := d.fn 561 d.fn = nil 562 gp._defer = d.link 563 freedefer(d) 564 // If the defer function pointer is nil, force the seg fault to happen 565 // here rather than in jmpdefer. gentraceback() throws an error if it is 566 // called with a callback on an LR architecture and jmpdefer is on the 567 // stack, because the stack trace can be incorrect in that case - see 568 // issue #8153). 569 _ = fn.fn 570 jmpdefer(fn, uintptr(unsafe.Pointer(&arg0))) 571 } 572 573 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 574 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 575 // is not a panic, any recover calls in those deferred functions will return nil. 576 // 577 // Calling Goexit from the main goroutine terminates that goroutine 578 // without func main returning. Since func main has not returned, 579 // the program continues execution of other goroutines. 580 // If all other goroutines exit, the program crashes. 581 func Goexit() { 582 // Run all deferred functions for the current goroutine. 583 // This code is similar to gopanic, see that implementation 584 // for detailed comments. 585 gp := getg() 586 587 // Create a panic object for Goexit, so we can recognize when it might be 588 // bypassed by a recover(). 589 var p _panic 590 p.goexit = true 591 p.link = gp._panic 592 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 593 594 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 595 for { 596 d := gp._defer 597 if d == nil { 598 break 599 } 600 if d.started { 601 if d._panic != nil { 602 d._panic.aborted = true 603 d._panic = nil 604 } 605 if !d.openDefer { 606 d.fn = nil 607 gp._defer = d.link 608 freedefer(d) 609 continue 610 } 611 } 612 d.started = true 613 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 614 if d.openDefer { 615 done := runOpenDeferFrame(gp, d) 616 if !done { 617 // We should always run all defers in the frame, 618 // since there is no panic associated with this 619 // defer that can be recovered. 620 throw("unfinished open-coded defers in Goexit") 621 } 622 if p.aborted { 623 // Since our current defer caused a panic and may 624 // have been already freed, just restart scanning 625 // for open-coded defers from this frame again. 626 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 627 } else { 628 addOneOpenDeferFrame(gp, 0, nil) 629 } 630 } else { 631 632 // Save the pc/sp in reflectcallSave(), so we can "recover" back to this 633 // loop if necessary. 634 reflectcallSave(&p, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz)) 635 } 636 if p.aborted { 637 // We had a recursive panic in the defer d we started, and 638 // then did a recover in a defer that was further down the 639 // defer chain than d. In the case of an outstanding Goexit, 640 // we force the recover to return back to this loop. d will 641 // have already been freed if completed, so just continue 642 // immediately to the next defer on the chain. 643 p.aborted = false 644 continue 645 } 646 if gp._defer != d { 647 throw("bad defer entry in Goexit") 648 } 649 d._panic = nil 650 d.fn = nil 651 gp._defer = d.link 652 freedefer(d) 653 // Note: we ignore recovers here because Goexit isn't a panic 654 } 655 goexit1() 656 } 657 658 // Call all Error and String methods before freezing the world. 659 // Used when crashing with panicking. 660 func preprintpanics(p *_panic) { 661 defer func() { 662 if recover() != nil { 663 throw("panic while printing panic value") 664 } 665 }() 666 for p != nil { 667 switch v := p.arg.(type) { 668 case error: 669 p.arg = v.Error() 670 case stringer: 671 p.arg = v.String() 672 } 673 p = p.link 674 } 675 } 676 677 // Print all currently active panics. Used when crashing. 678 // Should only be called after preprintpanics. 679 func printpanics(p *_panic) { 680 if p.link != nil { 681 printpanics(p.link) 682 if !p.link.goexit { 683 print("\t") 684 } 685 } 686 if p.goexit { 687 return 688 } 689 print("panic: ") 690 printany(p.arg) 691 if p.recovered { 692 print(" [recovered]") 693 } 694 print("\n") 695 } 696 697 // addOneOpenDeferFrame scans the stack for the first frame (if any) with 698 // open-coded defers and if it finds one, adds a single record to the defer chain 699 // for that frame. If sp is non-nil, it starts the stack scan from the frame 700 // specified by sp. If sp is nil, it uses the sp from the current defer record 701 // (which has just been finished). Hence, it continues the stack scan from the 702 // frame of the defer that just finished. It skips any frame that already has an 703 // open-coded _defer record, which would have been been created from a previous 704 // (unrecovered) panic. 705 // 706 // Note: All entries of the defer chain (including this new open-coded entry) have 707 // their pointers (including sp) adjusted properly if the stack moves while 708 // running deferred functions. Also, it is safe to pass in the sp arg (which is 709 // the direct result of calling getcallersp()), because all pointer variables 710 // (including arguments) are adjusted as needed during stack copies. 711 func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) { 712 var prevDefer *_defer 713 if sp == nil { 714 prevDefer = gp._defer 715 pc = prevDefer.framepc 716 sp = unsafe.Pointer(prevDefer.sp) 717 } 718 systemstack(func() { 719 gentraceback(pc, uintptr(sp), 0, gp, 0, nil, 0x7fffffff, 720 func(frame *stkframe, unused unsafe.Pointer) bool { 721 if prevDefer != nil && prevDefer.sp == frame.sp { 722 // Skip the frame for the previous defer that 723 // we just finished (and was used to set 724 // where we restarted the stack scan) 725 return true 726 } 727 f := frame.fn 728 fd := funcdata(f, _FUNCDATA_OpenCodedDeferInfo) 729 if fd == nil { 730 return true 731 } 732 // Insert the open defer record in the 733 // chain, in order sorted by sp. 734 d := gp._defer 735 var prev *_defer 736 for d != nil { 737 dsp := d.sp 738 if frame.sp < dsp { 739 break 740 } 741 if frame.sp == dsp { 742 if !d.openDefer { 743 throw("duplicated defer entry") 744 } 745 return true 746 } 747 prev = d 748 d = d.link 749 } 750 if frame.fn.deferreturn == 0 { 751 throw("missing deferreturn") 752 } 753 754 maxargsize, _ := readvarintUnsafe(fd) 755 d1 := newdefer(int32(maxargsize)) 756 d1.openDefer = true 757 d1._panic = nil 758 // These are the pc/sp to set after we've 759 // run a defer in this frame that did a 760 // recover. We return to a special 761 // deferreturn that runs any remaining 762 // defers and then returns from the 763 // function. 764 d1.pc = frame.fn.entry + uintptr(frame.fn.deferreturn) 765 d1.varp = frame.varp 766 d1.fd = fd 767 // Save the SP/PC associated with current frame, 768 // so we can continue stack trace later if needed. 769 d1.framepc = frame.pc 770 d1.sp = frame.sp 771 d1.link = d 772 if prev == nil { 773 gp._defer = d1 774 } else { 775 prev.link = d1 776 } 777 // Stop stack scanning after adding one open defer record 778 return false 779 }, 780 nil, 0) 781 }) 782 } 783 784 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the 785 // uint32 and a pointer to the byte following the varint. 786 // 787 // There is a similar function runtime.readvarint, which takes a slice of bytes, 788 // rather than an unsafe pointer. These functions are duplicated, because one of 789 // the two use cases for the functions would get slower if the functions were 790 // combined. 791 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { 792 var r uint32 793 var shift int 794 for { 795 b := *(*uint8)((unsafe.Pointer(fd))) 796 fd = add(fd, unsafe.Sizeof(b)) 797 if b < 128 { 798 return r + uint32(b)<<shift, fd 799 } 800 r += ((uint32(b) &^ 128) << shift) 801 shift += 7 802 if shift > 28 { 803 panic("Bad varint") 804 } 805 } 806 } 807 808 // runOpenDeferFrame runs the active open-coded defers in the frame specified by 809 // d. It normally processes all active defers in the frame, but stops immediately 810 // if a defer does a successful recover. It returns true if there are no 811 // remaining defers to run in the frame. 812 func runOpenDeferFrame(gp *g, d *_defer) bool { 813 done := true 814 fd := d.fd 815 816 // Skip the maxargsize 817 _, fd = readvarintUnsafe(fd) 818 deferBitsOffset, fd := readvarintUnsafe(fd) 819 nDefers, fd := readvarintUnsafe(fd) 820 deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) 821 822 for i := int(nDefers) - 1; i >= 0; i-- { 823 // read the funcdata info for this defer 824 var argWidth, closureOffset, nArgs uint32 825 argWidth, fd = readvarintUnsafe(fd) 826 closureOffset, fd = readvarintUnsafe(fd) 827 nArgs, fd = readvarintUnsafe(fd) 828 if deferBits&(1<<i) == 0 { 829 for j := uint32(0); j < nArgs; j++ { 830 _, fd = readvarintUnsafe(fd) 831 _, fd = readvarintUnsafe(fd) 832 _, fd = readvarintUnsafe(fd) 833 } 834 continue 835 } 836 closure := *(**funcval)(unsafe.Pointer(d.varp - uintptr(closureOffset))) 837 d.fn = closure 838 deferArgs := deferArgs(d) 839 // If there is an interface receiver or method receiver, it is 840 // described/included as the first arg. 841 for j := uint32(0); j < nArgs; j++ { 842 var argOffset, argLen, argCallOffset uint32 843 argOffset, fd = readvarintUnsafe(fd) 844 argLen, fd = readvarintUnsafe(fd) 845 argCallOffset, fd = readvarintUnsafe(fd) 846 memmove(unsafe.Pointer(uintptr(deferArgs)+uintptr(argCallOffset)), 847 unsafe.Pointer(d.varp-uintptr(argOffset)), 848 uintptr(argLen)) 849 } 850 deferBits = deferBits &^ (1 << i) 851 *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits 852 p := d._panic 853 reflectcallSave(p, unsafe.Pointer(closure), deferArgs, argWidth) 854 if p != nil && p.aborted { 855 break 856 } 857 d.fn = nil 858 // These args are just a copy, so can be cleared immediately 859 memclrNoHeapPointers(deferArgs, uintptr(argWidth)) 860 if d._panic != nil && d._panic.recovered { 861 done = deferBits == 0 862 break 863 } 864 } 865 866 return done 867 } 868 869 // reflectcallSave calls reflectcall after saving the caller's pc and sp in the 870 // panic record. This allows the runtime to return to the Goexit defer processing 871 // loop, in the unusual case where the Goexit may be bypassed by a successful 872 // recover. 873 func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) { 874 if p != nil { 875 p.argp = unsafe.Pointer(getargp(0)) 876 p.pc = getcallerpc() 877 p.sp = unsafe.Pointer(getcallersp()) 878 } 879 reflectcall(nil, fn, arg, argsize, argsize) 880 if p != nil { 881 p.pc = 0 882 p.sp = unsafe.Pointer(nil) 883 } 884 } 885 886 // The implementation of the predeclared function panic. 887 func gopanic(e interface{}) { 888 gp := getg() 889 if gp.m.curg != gp { 890 print("panic: ") 891 printany(e) 892 print("\n") 893 throw("panic on system stack") 894 } 895 896 if gp.m.mallocing != 0 { 897 print("panic: ") 898 printany(e) 899 print("\n") 900 throw("panic during malloc") 901 } 902 if gp.m.preemptoff != "" { 903 print("panic: ") 904 printany(e) 905 print("\n") 906 print("preempt off reason: ") 907 print(gp.m.preemptoff) 908 print("\n") 909 throw("panic during preemptoff") 910 } 911 if gp.m.locks != 0 { 912 print("panic: ") 913 printany(e) 914 print("\n") 915 throw("panic holding locks") 916 } 917 918 var p _panic 919 p.arg = e 920 p.link = gp._panic 921 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 922 923 atomic.Xadd(&runningPanicDefers, 1) 924 925 // By calculating getcallerpc/getcallersp here, we avoid scanning the 926 // gopanic frame (stack scanning is slow...) 927 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 928 929 for { 930 d := gp._defer 931 if d == nil { 932 break 933 } 934 935 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), 936 // take defer off list. An earlier panic will not continue running, but we will make sure below that an 937 // earlier Goexit does continue running. 938 if d.started { 939 if d._panic != nil { 940 d._panic.aborted = true 941 } 942 d._panic = nil 943 if !d.openDefer { 944 // For open-coded defers, we need to process the 945 // defer again, in case there are any other defers 946 // to call in the frame (not including the defer 947 // call that caused the panic). 948 d.fn = nil 949 gp._defer = d.link 950 freedefer(d) 951 continue 952 } 953 } 954 955 // Mark defer as started, but keep on list, so that traceback 956 // can find and update the defer's argument frame if stack growth 957 // or a garbage collection happens before reflectcall starts executing d.fn. 958 d.started = true 959 960 // Record the panic that is running the defer. 961 // If there is a new panic during the deferred call, that panic 962 // will find d in the list and will mark d._panic (this panic) aborted. 963 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 964 965 done := true 966 if d.openDefer { 967 done = runOpenDeferFrame(gp, d) 968 if done && !d._panic.recovered { 969 addOneOpenDeferFrame(gp, 0, nil) 970 } 971 } else { 972 p.argp = unsafe.Pointer(getargp(0)) 973 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 974 } 975 p.argp = nil 976 977 // reflectcall did not panic. Remove d. 978 if gp._defer != d { 979 throw("bad defer entry in panic") 980 } 981 d._panic = nil 982 983 // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic 984 //GC() 985 986 pc := d.pc 987 sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy 988 if done { 989 d.fn = nil 990 gp._defer = d.link 991 freedefer(d) 992 } 993 if p.recovered { 994 gp._panic = p.link 995 if gp._panic != nil && gp._panic.goexit && gp._panic.aborted { 996 // A normal recover would bypass/abort the Goexit. Instead, 997 // we return to the processing loop of the Goexit. 998 gp.sigcode0 = uintptr(gp._panic.sp) 999 gp.sigcode1 = uintptr(gp._panic.pc) 1000 mcall(recovery) 1001 throw("bypassed recovery failed") // mcall should not return 1002 } 1003 atomic.Xadd(&runningPanicDefers, -1) 1004 1005 if done { 1006 // Remove any remaining non-started, open-coded defer 1007 // entry after a recover (there's at most one, if we just 1008 // ran a non-open-coded defer), since the entry will 1009 // become out-dated and the defer will be executed 1010 // normally. 1011 d := gp._defer 1012 var prev *_defer 1013 for d != nil { 1014 if d.openDefer { 1015 if d.started { 1016 // This defer is started but we 1017 // are in the middle of a 1018 // defer-panic-recover inside of 1019 // it, so don't remove it or any 1020 // further defer entries 1021 break 1022 } 1023 if prev == nil { 1024 gp._defer = d.link 1025 } else { 1026 prev.link = d.link 1027 } 1028 freedefer(d) 1029 break 1030 } else { 1031 prev = d 1032 d = d.link 1033 } 1034 } 1035 } 1036 1037 gp._panic = p.link 1038 // Aborted panics are marked but remain on the g.panic list. 1039 // Remove them from the list. 1040 for gp._panic != nil && gp._panic.aborted { 1041 gp._panic = gp._panic.link 1042 } 1043 if gp._panic == nil { // must be done with signal 1044 gp.sig = 0 1045 } 1046 // Pass information about recovering frame to recovery. 1047 gp.sigcode0 = uintptr(sp) 1048 gp.sigcode1 = pc 1049 mcall(recovery) 1050 throw("recovery failed") // mcall should not return 1051 } 1052 } 1053 1054 // ran out of deferred calls - old-school panic now 1055 // Because it is unsafe to call arbitrary user code after freezing 1056 // the world, we call preprintpanics to invoke all necessary Error 1057 // and String methods to prepare the panic strings before startpanic. 1058 preprintpanics(gp._panic) 1059 1060 fatalpanic(gp._panic) // should not return 1061 *(*int)(nil) = 0 // not reached 1062 } 1063 1064 // getargp returns the location where the caller 1065 // writes outgoing function call arguments. 1066 //go:nosplit 1067 //go:noinline 1068 func getargp(x int) uintptr { 1069 // x is an argument mainly so that we can return its address. 1070 return uintptr(noescape(unsafe.Pointer(&x))) 1071 } 1072 1073 // The implementation of the predeclared function recover. 1074 // Cannot split the stack because it needs to reliably 1075 // find the stack segment of its caller. 1076 // 1077 // TODO(rsc): Once we commit to CopyStackAlways, 1078 // this doesn't need to be nosplit. 1079 //go:nosplit 1080 func gorecover(argp uintptr) interface{} { 1081 // Must be in a function running as part of a deferred call during the panic. 1082 // Must be called from the topmost function of the call 1083 // (the function used in the defer statement). 1084 // p.argp is the argument pointer of that topmost deferred function call. 1085 // Compare against argp reported by caller. 1086 // If they match, the caller is the one who can recover. 1087 gp := getg() 1088 p := gp._panic 1089 if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { 1090 p.recovered = true 1091 return p.arg 1092 } 1093 return nil 1094 } 1095 1096 //go:linkname sync_throw sync.throw 1097 func sync_throw(s string) { 1098 throw(s) 1099 } 1100 1101 //go:nosplit 1102 func throw(s string) { 1103 // Everything throw does should be recursively nosplit so it 1104 // can be called even when it's unsafe to grow the stack. 1105 systemstack(func() { 1106 print("fatal error: ", s, "\n") 1107 }) 1108 gp := getg() 1109 if gp.m.throwing == 0 { 1110 gp.m.throwing = 1 1111 } 1112 fatalthrow() 1113 *(*int)(nil) = 0 // not reached 1114 } 1115 1116 // runningPanicDefers is non-zero while running deferred functions for panic. 1117 // runningPanicDefers is incremented and decremented atomically. 1118 // This is used to try hard to get a panic stack trace out when exiting. 1119 var runningPanicDefers uint32 1120 1121 // panicking is non-zero when crashing the program for an unrecovered panic. 1122 // panicking is incremented and decremented atomically. 1123 var panicking uint32 1124 1125 // paniclk is held while printing the panic information and stack trace, 1126 // so that two concurrent panics don't overlap their output. 1127 var paniclk mutex 1128 1129 // Unwind the stack after a deferred function calls recover 1130 // after a panic. Then arrange to continue running as though 1131 // the caller of the deferred function returned normally. 1132 func recovery(gp *g) { 1133 // Info about defer passed in G struct. 1134 sp := gp.sigcode0 1135 pc := gp.sigcode1 1136 1137 // d's arguments need to be in the stack. 1138 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 1139 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1140 throw("bad recovery") 1141 } 1142 1143 // Make the deferproc for this d return again, 1144 // this time returning 1. The calling function will 1145 // jump to the standard return epilogue. 1146 gp.sched.sp = sp 1147 gp.sched.pc = pc 1148 gp.sched.lr = 0 1149 gp.sched.ret = 1 1150 gogo(&gp.sched) 1151 } 1152 1153 // fatalthrow implements an unrecoverable runtime throw. It freezes the 1154 // system, prints stack traces starting from its caller, and terminates the 1155 // process. 1156 // 1157 //go:nosplit 1158 func fatalthrow() { 1159 pc := getcallerpc() 1160 sp := getcallersp() 1161 gp := getg() 1162 // Switch to the system stack to avoid any stack growth, which 1163 // may make things worse if the runtime is in a bad state. 1164 systemstack(func() { 1165 startpanic_m() 1166 1167 if dopanic_m(gp, pc, sp) { 1168 // crash uses a decent amount of nosplit stack and we're already 1169 // low on stack in throw, so crash on the system stack (unlike 1170 // fatalpanic). 1171 crash() 1172 } 1173 1174 exit(2) 1175 }) 1176 1177 *(*int)(nil) = 0 // not reached 1178 } 1179 1180 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except 1181 // that if msgs != nil, fatalpanic also prints panic messages and decrements 1182 // runningPanicDefers once main is blocked from exiting. 1183 // 1184 //go:nosplit 1185 func fatalpanic(msgs *_panic) { 1186 pc := getcallerpc() 1187 sp := getcallersp() 1188 gp := getg() 1189 var docrash bool 1190 // Switch to the system stack to avoid any stack growth, which 1191 // may make things worse if the runtime is in a bad state. 1192 systemstack(func() { 1193 if startpanic_m() && msgs != nil { 1194 // There were panic messages and startpanic_m 1195 // says it's okay to try to print them. 1196 1197 // startpanic_m set panicking, which will 1198 // block main from exiting, so now OK to 1199 // decrement runningPanicDefers. 1200 atomic.Xadd(&runningPanicDefers, -1) 1201 1202 printpanics(msgs) 1203 } 1204 1205 docrash = dopanic_m(gp, pc, sp) 1206 }) 1207 1208 if docrash { 1209 // By crashing outside the above systemstack call, debuggers 1210 // will not be confused when generating a backtrace. 1211 // Function crash is marked nosplit to avoid stack growth. 1212 crash() 1213 } 1214 1215 systemstack(func() { 1216 exit(2) 1217 }) 1218 1219 *(*int)(nil) = 0 // not reached 1220 } 1221 1222 // startpanic_m prepares for an unrecoverable panic. 1223 // 1224 // It returns true if panic messages should be printed, or false if 1225 // the runtime is in bad shape and should just print stacks. 1226 // 1227 // It must not have write barriers even though the write barrier 1228 // explicitly ignores writes once dying > 0. Write barriers still 1229 // assume that g.m.p != nil, and this function may not have P 1230 // in some contexts (e.g. a panic in a signal handler for a signal 1231 // sent to an M with no P). 1232 // 1233 //go:nowritebarrierrec 1234 func startpanic_m() bool { 1235 _g_ := getg() 1236 if mheap_.cachealloc.size == 0 { // very early 1237 print("runtime: panic before malloc heap initialized\n") 1238 } 1239 // Disallow malloc during an unrecoverable panic. A panic 1240 // could happen in a signal handler, or in a throw, or inside 1241 // malloc itself. We want to catch if an allocation ever does 1242 // happen (even if we're not in one of these situations). 1243 _g_.m.mallocing++ 1244 1245 // If we're dying because of a bad lock count, set it to a 1246 // good lock count so we don't recursively panic below. 1247 if _g_.m.locks < 0 { 1248 _g_.m.locks = 1 1249 } 1250 1251 switch _g_.m.dying { 1252 case 0: 1253 // Setting dying >0 has the side-effect of disabling this G's writebuf. 1254 _g_.m.dying = 1 1255 atomic.Xadd(&panicking, 1) 1256 lock(&paniclk) 1257 if debug.schedtrace > 0 || debug.scheddetail > 0 { 1258 schedtrace(true) 1259 } 1260 freezetheworld() 1261 return true 1262 case 1: 1263 // Something failed while panicking. 1264 // Just print a stack trace and exit. 1265 _g_.m.dying = 2 1266 print("panic during panic\n") 1267 return false 1268 case 2: 1269 // This is a genuine bug in the runtime, we couldn't even 1270 // print the stack trace successfully. 1271 _g_.m.dying = 3 1272 print("stack trace unavailable\n") 1273 exit(4) 1274 fallthrough 1275 default: 1276 // Can't even print! Just exit. 1277 exit(5) 1278 return false // Need to return something. 1279 } 1280 } 1281 1282 var didothers bool 1283 var deadlock mutex 1284 1285 func dopanic_m(gp *g, pc, sp uintptr) bool { 1286 if gp.sig != 0 { 1287 signame := signame(gp.sig) 1288 if signame != "" { 1289 print("[signal ", signame) 1290 } else { 1291 print("[signal ", hex(gp.sig)) 1292 } 1293 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 1294 } 1295 1296 level, all, docrash := gotraceback() 1297 _g_ := getg() 1298 if level > 0 { 1299 if gp != gp.m.curg { 1300 all = true 1301 } 1302 if gp != gp.m.g0 { 1303 print("\n") 1304 goroutineheader(gp) 1305 traceback(pc, sp, 0, gp) 1306 } else if level >= 2 || _g_.m.throwing > 0 { 1307 print("\nruntime stack:\n") 1308 traceback(pc, sp, 0, gp) 1309 } 1310 if !didothers && all { 1311 didothers = true 1312 tracebackothers(gp) 1313 } 1314 } 1315 unlock(&paniclk) 1316 1317 if atomic.Xadd(&panicking, -1) != 0 { 1318 // Some other m is panicking too. 1319 // Let it print what it needs to print. 1320 // Wait forever without chewing up cpu. 1321 // It will exit when it's done. 1322 lock(&deadlock) 1323 lock(&deadlock) 1324 } 1325 1326 printDebugLog() 1327 1328 return docrash 1329 } 1330 1331 // canpanic returns false if a signal should throw instead of 1332 // panicking. 1333 // 1334 //go:nosplit 1335 func canpanic(gp *g) bool { 1336 // Note that g is m->gsignal, different from gp. 1337 // Note also that g->m can change at preemption, so m can go stale 1338 // if this function ever makes a function call. 1339 _g_ := getg() 1340 _m_ := _g_.m 1341 1342 // Is it okay for gp to panic instead of crashing the program? 1343 // Yes, as long as it is running Go code, not runtime code, 1344 // and not stuck in a system call. 1345 if gp == nil || gp != _m_.curg { 1346 return false 1347 } 1348 if _m_.locks != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 { 1349 return false 1350 } 1351 status := readgstatus(gp) 1352 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 1353 return false 1354 } 1355 if GOOS == "windows" && _m_.libcallsp != 0 { 1356 return false 1357 } 1358 return true 1359 } 1360 1361 // shouldPushSigpanic reports whether pc should be used as sigpanic's 1362 // return PC (pushing a frame for the call). Otherwise, it should be 1363 // left alone so that LR is used as sigpanic's return PC, effectively 1364 // replacing the top-most frame with sigpanic. This is used by 1365 // preparePanic. 1366 func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { 1367 if pc == 0 { 1368 // Probably a call to a nil func. The old LR is more 1369 // useful in the stack trace. Not pushing the frame 1370 // will make the trace look like a call to sigpanic 1371 // instead. (Otherwise the trace will end at sigpanic 1372 // and we won't get to see who faulted.) 1373 return false 1374 } 1375 // If we don't recognize the PC as code, but we do recognize 1376 // the link register as code, then this assumes the panic was 1377 // caused by a call to non-code. In this case, we want to 1378 // ignore this call to make unwinding show the context. 1379 // 1380 // If we running C code, we're not going to recognize pc as a 1381 // Go function, so just assume it's good. Otherwise, traceback 1382 // may try to read a stale LR that looks like a Go code 1383 // pointer and wander into the woods. 1384 if gp.m.incgo || findfunc(pc).valid() { 1385 // This wasn't a bad call, so use PC as sigpanic's 1386 // return PC. 1387 return true 1388 } 1389 if findfunc(lr).valid() { 1390 // This was a bad call, but the LR is good, so use the 1391 // LR as sigpanic's return PC. 1392 return false 1393 } 1394 // Neither the PC or LR is good. Hopefully pushing a frame 1395 // will work. 1396 return true 1397 } 1398 1399 // isAbortPC reports whether pc is the program counter at which 1400 // runtime.abort raises a signal. 1401 // 1402 // It is nosplit because it's part of the isgoexception 1403 // implementation. 1404 // 1405 //go:nosplit 1406 func isAbortPC(pc uintptr) bool { 1407 return pc == funcPC(abort) || ((GOARCH == "arm" || GOARCH == "arm64") && pc == funcPC(abort)+sys.PCQuantum) 1408 }