github.com/comwrg/go/src@v0.0.0-20220319063731-c238d0440370/runtime/panic.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "github.com/comwrg/go/src/internal/abi" 9 "github.com/comwrg/go/src/internal/goexperiment" 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 // We have two different ways of doing defers. The older way involves creating a 16 // defer record at the time that a defer statement is executing and adding it to a 17 // defer chain. This chain is inspected by the deferreturn call at all function 18 // exits in order to run the appropriate defer calls. A cheaper way (which we call 19 // open-coded defers) is used for functions in which no defer statements occur in 20 // loops. In that case, we simply store the defer function/arg information into 21 // specific stack slots at the point of each defer statement, as well as setting a 22 // bit in a bitmask. At each function exit, we add inline code to directly make 23 // the appropriate defer calls based on the bitmask and fn/arg information stored 24 // on the stack. During panic/Goexit processing, the appropriate defer calls are 25 // made using extra funcdata info that indicates the exact stack slots that 26 // contain the bitmask and defer fn/args. 27 28 // Check to make sure we can really generate a panic. If the panic 29 // was generated from the runtime, or from inside malloc, then convert 30 // to a throw of msg. 31 // pc should be the program counter of the compiler-generated code that 32 // triggered this panic. 33 func panicCheck1(pc uintptr, msg string) { 34 if sys.GoarchWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") { 35 // Note: wasm can't tail call, so we can't get the original caller's pc. 36 throw(msg) 37 } 38 // TODO: is this redundant? How could we be in malloc 39 // but not in the runtime? runtime/internal/*, maybe? 40 gp := getg() 41 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 42 throw(msg) 43 } 44 } 45 46 // Same as above, but calling from the runtime is allowed. 47 // 48 // Using this function is necessary for any panic that may be 49 // generated by runtime.sigpanic, since those are always called by the 50 // runtime. 51 func panicCheck2(err string) { 52 // panic allocates, so to avoid recursive malloc, turn panics 53 // during malloc into throws. 54 gp := getg() 55 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 56 throw(err) 57 } 58 } 59 60 // Many of the following panic entry-points turn into throws when they 61 // happen in various runtime contexts. These should never happen in 62 // the runtime, and if they do, they indicate a serious issue and 63 // should not be caught by user code. 64 // 65 // The panic{Index,Slice,divide,shift} functions are called by 66 // code generated by the compiler for out of bounds index expressions, 67 // out of bounds slice expressions, division by zero, and shift by negative. 68 // The panicdivide (again), panicoverflow, panicfloat, and panicmem 69 // functions are called by the signal handler when a signal occurs 70 // indicating the respective problem. 71 // 72 // Since panic{Index,Slice,shift} are never called directly, and 73 // since the runtime package should never have an out of bounds slice 74 // or array reference or negative shift, if we see those functions called from the 75 // runtime package we turn the panic into a throw. That will dump the 76 // entire runtime stack for easier debugging. 77 // 78 // The entry points called by the signal handler will be called from 79 // runtime.sigpanic, so we can't disallow calls from the runtime to 80 // these (they always look like they're called from the runtime). 81 // Hence, for these, we just check for clearly bad runtime conditions. 82 // 83 // The panic{Index,Slice} functions are implemented in assembly and tail call 84 // to the goPanic{Index,Slice} functions below. This is done so we can use 85 // a space-minimal register calling convention. 86 87 // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) 88 func goPanicIndex(x int, y int) { 89 panicCheck1(getcallerpc(), "index out of range") 90 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) 91 } 92 func goPanicIndexU(x uint, y int) { 93 panicCheck1(getcallerpc(), "index out of range") 94 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) 95 } 96 97 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) 98 func goPanicSliceAlen(x int, y int) { 99 panicCheck1(getcallerpc(), "slice bounds out of range") 100 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) 101 } 102 func goPanicSliceAlenU(x uint, y int) { 103 panicCheck1(getcallerpc(), "slice bounds out of range") 104 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) 105 } 106 func goPanicSliceAcap(x int, y int) { 107 panicCheck1(getcallerpc(), "slice bounds out of range") 108 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) 109 } 110 func goPanicSliceAcapU(x uint, y int) { 111 panicCheck1(getcallerpc(), "slice bounds out of range") 112 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) 113 } 114 115 // failures in the comparisons for s[x:y], 0 <= x <= y 116 func goPanicSliceB(x int, y int) { 117 panicCheck1(getcallerpc(), "slice bounds out of range") 118 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) 119 } 120 func goPanicSliceBU(x uint, y int) { 121 panicCheck1(getcallerpc(), "slice bounds out of range") 122 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) 123 } 124 125 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) 126 func goPanicSlice3Alen(x int, y int) { 127 panicCheck1(getcallerpc(), "slice bounds out of range") 128 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) 129 } 130 func goPanicSlice3AlenU(x uint, y int) { 131 panicCheck1(getcallerpc(), "slice bounds out of range") 132 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) 133 } 134 func goPanicSlice3Acap(x int, y int) { 135 panicCheck1(getcallerpc(), "slice bounds out of range") 136 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) 137 } 138 func goPanicSlice3AcapU(x uint, y int) { 139 panicCheck1(getcallerpc(), "slice bounds out of range") 140 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) 141 } 142 143 // failures in the comparisons for s[:x:y], 0 <= x <= y 144 func goPanicSlice3B(x int, y int) { 145 panicCheck1(getcallerpc(), "slice bounds out of range") 146 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) 147 } 148 func goPanicSlice3BU(x uint, y int) { 149 panicCheck1(getcallerpc(), "slice bounds out of range") 150 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) 151 } 152 153 // failures in the comparisons for s[x:y:], 0 <= x <= y 154 func goPanicSlice3C(x int, y int) { 155 panicCheck1(getcallerpc(), "slice bounds out of range") 156 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) 157 } 158 func goPanicSlice3CU(x uint, y int) { 159 panicCheck1(getcallerpc(), "slice bounds out of range") 160 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) 161 } 162 163 // failures in the conversion (*[x]T)s, 0 <= x <= y, x == cap(s) 164 func goPanicSliceConvert(x int, y int) { 165 panicCheck1(getcallerpc(), "slice length too short to convert to pointer to array") 166 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert}) 167 } 168 169 // Implemented in assembly, as they take arguments in registers. 170 // Declared here to mark them as ABIInternal. 171 func panicIndex(x int, y int) 172 func panicIndexU(x uint, y int) 173 func panicSliceAlen(x int, y int) 174 func panicSliceAlenU(x uint, y int) 175 func panicSliceAcap(x int, y int) 176 func panicSliceAcapU(x uint, y int) 177 func panicSliceB(x int, y int) 178 func panicSliceBU(x uint, y int) 179 func panicSlice3Alen(x int, y int) 180 func panicSlice3AlenU(x uint, y int) 181 func panicSlice3Acap(x int, y int) 182 func panicSlice3AcapU(x uint, y int) 183 func panicSlice3B(x int, y int) 184 func panicSlice3BU(x uint, y int) 185 func panicSlice3C(x int, y int) 186 func panicSlice3CU(x uint, y int) 187 func panicSliceConvert(x int, y int) 188 189 var shiftError = error(errorString("negative shift amount")) 190 191 func panicshift() { 192 panicCheck1(getcallerpc(), "negative shift amount") 193 panic(shiftError) 194 } 195 196 var divideError = error(errorString("integer divide by zero")) 197 198 func panicdivide() { 199 panicCheck2("integer divide by zero") 200 panic(divideError) 201 } 202 203 var overflowError = error(errorString("integer overflow")) 204 205 func panicoverflow() { 206 panicCheck2("integer overflow") 207 panic(overflowError) 208 } 209 210 var floatError = error(errorString("floating point error")) 211 212 func panicfloat() { 213 panicCheck2("floating point error") 214 panic(floatError) 215 } 216 217 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 218 219 func panicmem() { 220 panicCheck2("invalid memory address or nil pointer dereference") 221 panic(memoryError) 222 } 223 224 func panicmemAddr(addr uintptr) { 225 panicCheck2("invalid memory address or nil pointer dereference") 226 panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr}) 227 } 228 229 // Create a new deferred function fn with siz bytes of arguments. 230 // The compiler turns a defer statement into a call to this. 231 //go:nosplit 232 func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn 233 gp := getg() 234 if gp.m.curg != gp { 235 // go code on the system stack can't defer 236 throw("defer on system stack") 237 } 238 239 if goexperiment.RegabiDefer && siz != 0 { 240 // TODO: Make deferproc just take a func(). 241 throw("defer with non-empty frame") 242 } 243 244 // the arguments of fn are in a perilous state. The stack map 245 // for deferproc does not describe them. So we can't let garbage 246 // collection or stack copying trigger until we've copied them out 247 // to somewhere safe. The memmove below does that. 248 // Until the copy completes, we can only call nosplit routines. 249 sp := getcallersp() 250 argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn) 251 callerpc := getcallerpc() 252 253 d := newdefer(siz) 254 if d._panic != nil { 255 throw("deferproc: d.panic != nil after newdefer") 256 } 257 d.link = gp._defer 258 gp._defer = d 259 d.fn = fn 260 d.pc = callerpc 261 d.sp = sp 262 switch siz { 263 case 0: 264 // Do nothing. 265 case sys.PtrSize: 266 *(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp)) 267 default: 268 memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz)) 269 } 270 271 // deferproc returns 0 normally. 272 // a deferred func that stops a panic 273 // makes the deferproc return 1. 274 // the code the compiler generates always 275 // checks the return value and jumps to the 276 // end of the function if deferproc returns != 0. 277 return0() 278 // No code can go here - the C return register has 279 // been set and must not be clobbered. 280 } 281 282 // deferprocStack queues a new deferred function with a defer record on the stack. 283 // The defer record must have its siz and fn fields initialized. 284 // All other fields can contain junk. 285 // The defer record must be immediately followed in memory by 286 // the arguments of the defer. 287 // Nosplit because the arguments on the stack won't be scanned 288 // until the defer record is spliced into the gp._defer list. 289 //go:nosplit 290 func deferprocStack(d *_defer) { 291 gp := getg() 292 if gp.m.curg != gp { 293 // go code on the system stack can't defer 294 throw("defer on system stack") 295 } 296 if goexperiment.RegabiDefer && d.siz != 0 { 297 throw("defer with non-empty frame") 298 } 299 // siz and fn are already set. 300 // The other fields are junk on entry to deferprocStack and 301 // are initialized here. 302 d.started = false 303 d.heap = false 304 d.openDefer = false 305 d.sp = getcallersp() 306 d.pc = getcallerpc() 307 d.framepc = 0 308 d.varp = 0 309 // The lines below implement: 310 // d.panic = nil 311 // d.fd = nil 312 // d.link = gp._defer 313 // gp._defer = d 314 // But without write barriers. The first three are writes to 315 // the stack so they don't need a write barrier, and furthermore 316 // are to uninitialized memory, so they must not use a write barrier. 317 // The fourth write does not require a write barrier because we 318 // explicitly mark all the defer structures, so we don't need to 319 // keep track of pointers to them with a write barrier. 320 *(*uintptr)(unsafe.Pointer(&d._panic)) = 0 321 *(*uintptr)(unsafe.Pointer(&d.fd)) = 0 322 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) 323 *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) 324 325 return0() 326 // No code can go here - the C return register has 327 // been set and must not be clobbered. 328 } 329 330 // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... 331 // Each P holds a pool for defers with small arg sizes. 332 // Assign defer allocations to pools by rounding to 16, to match malloc size classes. 333 334 const ( 335 deferHeaderSize = unsafe.Sizeof(_defer{}) 336 minDeferAlloc = (deferHeaderSize + 15) &^ 15 337 minDeferArgs = minDeferAlloc - deferHeaderSize 338 ) 339 340 // defer size class for arg size sz 341 //go:nosplit 342 func deferclass(siz uintptr) uintptr { 343 if siz <= minDeferArgs { 344 return 0 345 } 346 return (siz - minDeferArgs + 15) / 16 347 } 348 349 // total size of memory block for defer with arg size sz 350 func totaldefersize(siz uintptr) uintptr { 351 if siz <= minDeferArgs { 352 return minDeferAlloc 353 } 354 return deferHeaderSize + siz 355 } 356 357 // Ensure that defer arg sizes that map to the same defer size class 358 // also map to the same malloc size class. 359 func testdefersizes() { 360 var m [len(p{}.deferpool)]int32 361 362 for i := range m { 363 m[i] = -1 364 } 365 for i := uintptr(0); ; i++ { 366 defersc := deferclass(i) 367 if defersc >= uintptr(len(m)) { 368 break 369 } 370 siz := roundupsize(totaldefersize(i)) 371 if m[defersc] < 0 { 372 m[defersc] = int32(siz) 373 continue 374 } 375 if m[defersc] != int32(siz) { 376 print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") 377 throw("bad defer size class") 378 } 379 } 380 } 381 382 // The arguments associated with a deferred call are stored 383 // immediately after the _defer header in memory. 384 //go:nosplit 385 func deferArgs(d *_defer) unsafe.Pointer { 386 if d.siz == 0 { 387 // Avoid pointer past the defer allocation. 388 return nil 389 } 390 return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) 391 } 392 393 // deferFunc returns d's deferred function. This is temporary while we 394 // support both modes of GOEXPERIMENT=regabidefer. Once we commit to 395 // that experiment, we should change the type of d.fn. 396 //go:nosplit 397 func deferFunc(d *_defer) func() { 398 if !goexperiment.RegabiDefer { 399 throw("requires GOEXPERIMENT=regabidefer") 400 } 401 var fn func() 402 *(**funcval)(unsafe.Pointer(&fn)) = d.fn 403 return fn 404 } 405 406 var deferType *_type // type of _defer struct 407 408 func init() { 409 var x interface{} 410 x = (*_defer)(nil) 411 deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem 412 } 413 414 // Allocate a Defer, usually using per-P pool. 415 // Each defer must be released with freedefer. The defer is not 416 // added to any defer chain yet. 417 // 418 // This must not grow the stack because there may be a frame without 419 // stack map information when this is called. 420 // 421 //go:nosplit 422 func newdefer(siz int32) *_defer { 423 var d *_defer 424 sc := deferclass(uintptr(siz)) 425 gp := getg() 426 if sc < uintptr(len(p{}.deferpool)) { 427 pp := gp.m.p.ptr() 428 if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil { 429 // Take the slow path on the system stack so 430 // we don't grow newdefer's stack. 431 systemstack(func() { 432 lock(&sched.deferlock) 433 for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil { 434 d := sched.deferpool[sc] 435 sched.deferpool[sc] = d.link 436 d.link = nil 437 pp.deferpool[sc] = append(pp.deferpool[sc], d) 438 } 439 unlock(&sched.deferlock) 440 }) 441 } 442 if n := len(pp.deferpool[sc]); n > 0 { 443 d = pp.deferpool[sc][n-1] 444 pp.deferpool[sc][n-1] = nil 445 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 446 } 447 } 448 if d == nil { 449 // Allocate new defer+args. 450 systemstack(func() { 451 total := roundupsize(totaldefersize(uintptr(siz))) 452 d = (*_defer)(mallocgc(total, deferType, true)) 453 }) 454 } 455 d.siz = siz 456 d.heap = true 457 return d 458 } 459 460 // Free the given defer. 461 // The defer cannot be used after this call. 462 // 463 // This must not grow the stack because there may be a frame without a 464 // stack map when this is called. 465 // 466 //go:nosplit 467 func freedefer(d *_defer) { 468 if d._panic != nil { 469 freedeferpanic() 470 } 471 if d.fn != nil { 472 freedeferfn() 473 } 474 if !d.heap { 475 return 476 } 477 sc := deferclass(uintptr(d.siz)) 478 if sc >= uintptr(len(p{}.deferpool)) { 479 return 480 } 481 pp := getg().m.p.ptr() 482 if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) { 483 // Transfer half of local cache to the central cache. 484 // 485 // Take this slow path on the system stack so 486 // we don't grow freedefer's stack. 487 systemstack(func() { 488 var first, last *_defer 489 for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 { 490 n := len(pp.deferpool[sc]) 491 d := pp.deferpool[sc][n-1] 492 pp.deferpool[sc][n-1] = nil 493 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 494 if first == nil { 495 first = d 496 } else { 497 last.link = d 498 } 499 last = d 500 } 501 lock(&sched.deferlock) 502 last.link = sched.deferpool[sc] 503 sched.deferpool[sc] = first 504 unlock(&sched.deferlock) 505 }) 506 } 507 508 // These lines used to be simply `*d = _defer{}` but that 509 // started causing a nosplit stack overflow via typedmemmove. 510 d.siz = 0 511 d.started = false 512 d.openDefer = false 513 d.sp = 0 514 d.pc = 0 515 d.framepc = 0 516 d.varp = 0 517 d.fd = nil 518 // d._panic and d.fn must be nil already. 519 // If not, we would have called freedeferpanic or freedeferfn above, 520 // both of which throw. 521 d.link = nil 522 523 pp.deferpool[sc] = append(pp.deferpool[sc], d) 524 } 525 526 // Separate function so that it can split stack. 527 // Windows otherwise runs out of stack space. 528 func freedeferpanic() { 529 // _panic must be cleared before d is unlinked from gp. 530 throw("freedefer with d._panic != nil") 531 } 532 533 func freedeferfn() { 534 // fn must be cleared before d is unlinked from gp. 535 throw("freedefer with d.fn != nil") 536 } 537 538 // Run a deferred function if there is one. 539 // The compiler inserts a call to this at the end of any 540 // function which calls defer. 541 // If there is a deferred function, this will call runtime·jmpdefer, 542 // which will jump to the deferred function such that it appears 543 // to have been called by the caller of deferreturn at the point 544 // just before deferreturn was called. The effect is that deferreturn 545 // is called again and again until there are no more deferred functions. 546 // 547 // Declared as nosplit, because the function should not be preempted once we start 548 // modifying the caller's frame in order to reuse the frame to call the deferred 549 // function. 550 // 551 //go:nosplit 552 func deferreturn() { 553 gp := getg() 554 d := gp._defer 555 if d == nil { 556 return 557 } 558 sp := getcallersp() 559 if d.sp != sp { 560 return 561 } 562 if d.openDefer { 563 done := runOpenDeferFrame(gp, d) 564 if !done { 565 throw("unfinished open-coded defers in deferreturn") 566 } 567 gp._defer = d.link 568 freedefer(d) 569 return 570 } 571 572 // Moving arguments around. 573 // 574 // Everything called after this point must be recursively 575 // nosplit because the garbage collector won't know the form 576 // of the arguments until the jmpdefer can flip the PC over to 577 // fn. 578 argp := getcallersp() + sys.MinFrameSize 579 switch d.siz { 580 case 0: 581 // Do nothing. 582 case sys.PtrSize: 583 *(*uintptr)(unsafe.Pointer(argp)) = *(*uintptr)(deferArgs(d)) 584 default: 585 memmove(unsafe.Pointer(argp), deferArgs(d), uintptr(d.siz)) 586 } 587 fn := d.fn 588 d.fn = nil 589 gp._defer = d.link 590 freedefer(d) 591 // If the defer function pointer is nil, force the seg fault to happen 592 // here rather than in jmpdefer. gentraceback() throws an error if it is 593 // called with a callback on an LR architecture and jmpdefer is on the 594 // stack, because the stack trace can be incorrect in that case - see 595 // issue #8153). 596 _ = fn.fn 597 jmpdefer(fn, argp) 598 } 599 600 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 601 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 602 // is not a panic, any recover calls in those deferred functions will return nil. 603 // 604 // Calling Goexit from the main goroutine terminates that goroutine 605 // without func main returning. Since func main has not returned, 606 // the program continues execution of other goroutines. 607 // If all other goroutines exit, the program crashes. 608 func Goexit() { 609 // Run all deferred functions for the current goroutine. 610 // This code is similar to gopanic, see that implementation 611 // for detailed comments. 612 gp := getg() 613 614 // Create a panic object for Goexit, so we can recognize when it might be 615 // bypassed by a recover(). 616 var p _panic 617 p.goexit = true 618 p.link = gp._panic 619 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 620 621 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 622 for { 623 d := gp._defer 624 if d == nil { 625 break 626 } 627 if d.started { 628 if d._panic != nil { 629 d._panic.aborted = true 630 d._panic = nil 631 } 632 if !d.openDefer { 633 d.fn = nil 634 gp._defer = d.link 635 freedefer(d) 636 continue 637 } 638 } 639 d.started = true 640 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 641 if d.openDefer { 642 done := runOpenDeferFrame(gp, d) 643 if !done { 644 // We should always run all defers in the frame, 645 // since there is no panic associated with this 646 // defer that can be recovered. 647 throw("unfinished open-coded defers in Goexit") 648 } 649 if p.aborted { 650 // Since our current defer caused a panic and may 651 // have been already freed, just restart scanning 652 // for open-coded defers from this frame again. 653 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 654 } else { 655 addOneOpenDeferFrame(gp, 0, nil) 656 } 657 } else { 658 if goexperiment.RegabiDefer { 659 // Save the pc/sp in deferCallSave(), so we can "recover" back to this 660 // loop if necessary. 661 deferCallSave(&p, deferFunc(d)) 662 } else { 663 // Save the pc/sp in reflectcallSave(), so we can "recover" back to this 664 // loop if necessary. 665 reflectcallSave(&p, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz)) 666 } 667 } 668 if p.aborted { 669 // We had a recursive panic in the defer d we started, and 670 // then did a recover in a defer that was further down the 671 // defer chain than d. In the case of an outstanding Goexit, 672 // we force the recover to return back to this loop. d will 673 // have already been freed if completed, so just continue 674 // immediately to the next defer on the chain. 675 p.aborted = false 676 continue 677 } 678 if gp._defer != d { 679 throw("bad defer entry in Goexit") 680 } 681 d._panic = nil 682 d.fn = nil 683 gp._defer = d.link 684 freedefer(d) 685 // Note: we ignore recovers here because Goexit isn't a panic 686 } 687 goexit1() 688 } 689 690 // Call all Error and String methods before freezing the world. 691 // Used when crashing with panicking. 692 func preprintpanics(p *_panic) { 693 defer func() { 694 if recover() != nil { 695 throw("panic while printing panic value") 696 } 697 }() 698 for p != nil { 699 switch v := p.arg.(type) { 700 case error: 701 p.arg = v.Error() 702 case stringer: 703 p.arg = v.String() 704 } 705 p = p.link 706 } 707 } 708 709 // Print all currently active panics. Used when crashing. 710 // Should only be called after preprintpanics. 711 func printpanics(p *_panic) { 712 if p.link != nil { 713 printpanics(p.link) 714 if !p.link.goexit { 715 print("\t") 716 } 717 } 718 if p.goexit { 719 return 720 } 721 print("panic: ") 722 printany(p.arg) 723 if p.recovered { 724 print(" [recovered]") 725 } 726 print("\n") 727 } 728 729 // addOneOpenDeferFrame scans the stack for the first frame (if any) with 730 // open-coded defers and if it finds one, adds a single record to the defer chain 731 // for that frame. If sp is non-nil, it starts the stack scan from the frame 732 // specified by sp. If sp is nil, it uses the sp from the current defer record 733 // (which has just been finished). Hence, it continues the stack scan from the 734 // frame of the defer that just finished. It skips any frame that already has an 735 // open-coded _defer record, which would have been created from a previous 736 // (unrecovered) panic. 737 // 738 // Note: All entries of the defer chain (including this new open-coded entry) have 739 // their pointers (including sp) adjusted properly if the stack moves while 740 // running deferred functions. Also, it is safe to pass in the sp arg (which is 741 // the direct result of calling getcallersp()), because all pointer variables 742 // (including arguments) are adjusted as needed during stack copies. 743 func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) { 744 var prevDefer *_defer 745 if sp == nil { 746 prevDefer = gp._defer 747 pc = prevDefer.framepc 748 sp = unsafe.Pointer(prevDefer.sp) 749 } 750 systemstack(func() { 751 gentraceback(pc, uintptr(sp), 0, gp, 0, nil, 0x7fffffff, 752 func(frame *stkframe, unused unsafe.Pointer) bool { 753 if prevDefer != nil && prevDefer.sp == frame.sp { 754 // Skip the frame for the previous defer that 755 // we just finished (and was used to set 756 // where we restarted the stack scan) 757 return true 758 } 759 f := frame.fn 760 fd := funcdata(f, _FUNCDATA_OpenCodedDeferInfo) 761 if fd == nil { 762 return true 763 } 764 // Insert the open defer record in the 765 // chain, in order sorted by sp. 766 d := gp._defer 767 var prev *_defer 768 for d != nil { 769 dsp := d.sp 770 if frame.sp < dsp { 771 break 772 } 773 if frame.sp == dsp { 774 if !d.openDefer { 775 throw("duplicated defer entry") 776 } 777 return true 778 } 779 prev = d 780 d = d.link 781 } 782 if frame.fn.deferreturn == 0 { 783 throw("missing deferreturn") 784 } 785 786 maxargsize, _ := readvarintUnsafe(fd) 787 d1 := newdefer(int32(maxargsize)) 788 d1.openDefer = true 789 d1._panic = nil 790 // These are the pc/sp to set after we've 791 // run a defer in this frame that did a 792 // recover. We return to a special 793 // deferreturn that runs any remaining 794 // defers and then returns from the 795 // function. 796 d1.pc = frame.fn.entry + uintptr(frame.fn.deferreturn) 797 d1.varp = frame.varp 798 d1.fd = fd 799 // Save the SP/PC associated with current frame, 800 // so we can continue stack trace later if needed. 801 d1.framepc = frame.pc 802 d1.sp = frame.sp 803 d1.link = d 804 if prev == nil { 805 gp._defer = d1 806 } else { 807 prev.link = d1 808 } 809 // Stop stack scanning after adding one open defer record 810 return false 811 }, 812 nil, 0) 813 }) 814 } 815 816 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the 817 // uint32 and a pointer to the byte following the varint. 818 // 819 // There is a similar function runtime.readvarint, which takes a slice of bytes, 820 // rather than an unsafe pointer. These functions are duplicated, because one of 821 // the two use cases for the functions would get slower if the functions were 822 // combined. 823 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { 824 var r uint32 825 var shift int 826 for { 827 b := *(*uint8)((unsafe.Pointer(fd))) 828 fd = add(fd, unsafe.Sizeof(b)) 829 if b < 128 { 830 return r + uint32(b)<<shift, fd 831 } 832 r += ((uint32(b) &^ 128) << shift) 833 shift += 7 834 if shift > 28 { 835 panic("Bad varint") 836 } 837 } 838 } 839 840 // runOpenDeferFrame runs the active open-coded defers in the frame specified by 841 // d. It normally processes all active defers in the frame, but stops immediately 842 // if a defer does a successful recover. It returns true if there are no 843 // remaining defers to run in the frame. 844 func runOpenDeferFrame(gp *g, d *_defer) bool { 845 done := true 846 fd := d.fd 847 848 // Skip the maxargsize 849 _, fd = readvarintUnsafe(fd) 850 deferBitsOffset, fd := readvarintUnsafe(fd) 851 nDefers, fd := readvarintUnsafe(fd) 852 deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) 853 854 for i := int(nDefers) - 1; i >= 0; i-- { 855 // read the funcdata info for this defer 856 var argWidth, closureOffset, nArgs uint32 857 argWidth, fd = readvarintUnsafe(fd) 858 closureOffset, fd = readvarintUnsafe(fd) 859 nArgs, fd = readvarintUnsafe(fd) 860 if goexperiment.RegabiDefer && argWidth != 0 { 861 throw("defer with non-empty frame") 862 } 863 if deferBits&(1<<i) == 0 { 864 for j := uint32(0); j < nArgs; j++ { 865 _, fd = readvarintUnsafe(fd) 866 _, fd = readvarintUnsafe(fd) 867 _, fd = readvarintUnsafe(fd) 868 } 869 continue 870 } 871 closure := *(**funcval)(unsafe.Pointer(d.varp - uintptr(closureOffset))) 872 d.fn = closure 873 deferArgs := deferArgs(d) 874 // If there is an interface receiver or method receiver, it is 875 // described/included as the first arg. 876 for j := uint32(0); j < nArgs; j++ { 877 var argOffset, argLen, argCallOffset uint32 878 argOffset, fd = readvarintUnsafe(fd) 879 argLen, fd = readvarintUnsafe(fd) 880 argCallOffset, fd = readvarintUnsafe(fd) 881 memmove(unsafe.Pointer(uintptr(deferArgs)+uintptr(argCallOffset)), 882 unsafe.Pointer(d.varp-uintptr(argOffset)), 883 uintptr(argLen)) 884 } 885 deferBits = deferBits &^ (1 << i) 886 *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits 887 p := d._panic 888 if goexperiment.RegabiDefer { 889 deferCallSave(p, deferFunc(d)) 890 } else { 891 reflectcallSave(p, unsafe.Pointer(closure), deferArgs, argWidth) 892 } 893 if p != nil && p.aborted { 894 break 895 } 896 d.fn = nil 897 // These args are just a copy, so can be cleared immediately 898 memclrNoHeapPointers(deferArgs, uintptr(argWidth)) 899 if d._panic != nil && d._panic.recovered { 900 done = deferBits == 0 901 break 902 } 903 } 904 905 return done 906 } 907 908 // reflectcallSave calls reflectcall after saving the caller's pc and sp in the 909 // panic record. This allows the runtime to return to the Goexit defer processing 910 // loop, in the unusual case where the Goexit may be bypassed by a successful 911 // recover. 912 // 913 // This is marked as a wrapper by the compiler so it doesn't appear in 914 // tracebacks. 915 func reflectcallSave(p *_panic, fn, arg unsafe.Pointer, argsize uint32) { 916 if goexperiment.RegabiDefer { 917 throw("not allowed with GOEXPERIMENT=regabidefer") 918 } 919 if p != nil { 920 p.argp = unsafe.Pointer(getargp()) 921 p.pc = getcallerpc() 922 p.sp = unsafe.Pointer(getcallersp()) 923 } 924 // Pass a dummy RegArgs since we'll only take this path if 925 // we're not using the register ABI. 926 var regs abi.RegArgs 927 reflectcall(nil, fn, arg, argsize, argsize, argsize, ®s) 928 if p != nil { 929 p.pc = 0 930 p.sp = unsafe.Pointer(nil) 931 } 932 } 933 934 // deferCallSave calls fn() after saving the caller's pc and sp in the 935 // panic record. This allows the runtime to return to the Goexit defer 936 // processing loop, in the unusual case where the Goexit may be 937 // bypassed by a successful recover. 938 // 939 // This is marked as a wrapper by the compiler so it doesn't appear in 940 // tracebacks. 941 func deferCallSave(p *_panic, fn func()) { 942 if !goexperiment.RegabiDefer { 943 throw("only allowed with GOEXPERIMENT=regabidefer") 944 } 945 if p != nil { 946 p.argp = unsafe.Pointer(getargp()) 947 p.pc = getcallerpc() 948 p.sp = unsafe.Pointer(getcallersp()) 949 } 950 fn() 951 if p != nil { 952 p.pc = 0 953 p.sp = unsafe.Pointer(nil) 954 } 955 } 956 957 // The implementation of the predeclared function panic. 958 func gopanic(e interface{}) { 959 gp := getg() 960 if gp.m.curg != gp { 961 print("panic: ") 962 printany(e) 963 print("\n") 964 throw("panic on system stack") 965 } 966 967 if gp.m.mallocing != 0 { 968 print("panic: ") 969 printany(e) 970 print("\n") 971 throw("panic during malloc") 972 } 973 if gp.m.preemptoff != "" { 974 print("panic: ") 975 printany(e) 976 print("\n") 977 print("preempt off reason: ") 978 print(gp.m.preemptoff) 979 print("\n") 980 throw("panic during preemptoff") 981 } 982 if gp.m.locks != 0 { 983 print("panic: ") 984 printany(e) 985 print("\n") 986 throw("panic holding locks") 987 } 988 989 var p _panic 990 p.arg = e 991 p.link = gp._panic 992 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 993 994 atomic.Xadd(&runningPanicDefers, 1) 995 996 // By calculating getcallerpc/getcallersp here, we avoid scanning the 997 // gopanic frame (stack scanning is slow...) 998 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 999 1000 for { 1001 d := gp._defer 1002 if d == nil { 1003 break 1004 } 1005 1006 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), 1007 // take defer off list. An earlier panic will not continue running, but we will make sure below that an 1008 // earlier Goexit does continue running. 1009 if d.started { 1010 if d._panic != nil { 1011 d._panic.aborted = true 1012 } 1013 d._panic = nil 1014 if !d.openDefer { 1015 // For open-coded defers, we need to process the 1016 // defer again, in case there are any other defers 1017 // to call in the frame (not including the defer 1018 // call that caused the panic). 1019 d.fn = nil 1020 gp._defer = d.link 1021 freedefer(d) 1022 continue 1023 } 1024 } 1025 1026 // Mark defer as started, but keep on list, so that traceback 1027 // can find and update the defer's argument frame if stack growth 1028 // or a garbage collection happens before executing d.fn. 1029 d.started = true 1030 1031 // Record the panic that is running the defer. 1032 // If there is a new panic during the deferred call, that panic 1033 // will find d in the list and will mark d._panic (this panic) aborted. 1034 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 1035 1036 done := true 1037 if d.openDefer { 1038 done = runOpenDeferFrame(gp, d) 1039 if done && !d._panic.recovered { 1040 addOneOpenDeferFrame(gp, 0, nil) 1041 } 1042 } else { 1043 p.argp = unsafe.Pointer(getargp()) 1044 1045 if goexperiment.RegabiDefer { 1046 fn := deferFunc(d) 1047 fn() 1048 } else { 1049 // Pass a dummy RegArgs since we'll only take this path if 1050 // we're not using the register ABI. 1051 var regs abi.RegArgs 1052 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz), uint32(d.siz), ®s) 1053 } 1054 } 1055 p.argp = nil 1056 1057 // Deferred function did not panic. Remove d. 1058 if gp._defer != d { 1059 throw("bad defer entry in panic") 1060 } 1061 d._panic = nil 1062 1063 // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic 1064 //GC() 1065 1066 pc := d.pc 1067 sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy 1068 if done { 1069 d.fn = nil 1070 gp._defer = d.link 1071 freedefer(d) 1072 } 1073 if p.recovered { 1074 gp._panic = p.link 1075 if gp._panic != nil && gp._panic.goexit && gp._panic.aborted { 1076 // A normal recover would bypass/abort the Goexit. Instead, 1077 // we return to the processing loop of the Goexit. 1078 gp.sigcode0 = uintptr(gp._panic.sp) 1079 gp.sigcode1 = uintptr(gp._panic.pc) 1080 mcall(recovery) 1081 throw("bypassed recovery failed") // mcall should not return 1082 } 1083 atomic.Xadd(&runningPanicDefers, -1) 1084 1085 // Remove any remaining non-started, open-coded 1086 // defer entries after a recover, since the 1087 // corresponding defers will be executed normally 1088 // (inline). Any such entry will become stale once 1089 // we run the corresponding defers inline and exit 1090 // the associated stack frame. 1091 d := gp._defer 1092 var prev *_defer 1093 if !done { 1094 // Skip our current frame, if not done. It is 1095 // needed to complete any remaining defers in 1096 // deferreturn() 1097 prev = d 1098 d = d.link 1099 } 1100 for d != nil { 1101 if d.started { 1102 // This defer is started but we 1103 // are in the middle of a 1104 // defer-panic-recover inside of 1105 // it, so don't remove it or any 1106 // further defer entries 1107 break 1108 } 1109 if d.openDefer { 1110 if prev == nil { 1111 gp._defer = d.link 1112 } else { 1113 prev.link = d.link 1114 } 1115 newd := d.link 1116 freedefer(d) 1117 d = newd 1118 } else { 1119 prev = d 1120 d = d.link 1121 } 1122 } 1123 1124 gp._panic = p.link 1125 // Aborted panics are marked but remain on the g.panic list. 1126 // Remove them from the list. 1127 for gp._panic != nil && gp._panic.aborted { 1128 gp._panic = gp._panic.link 1129 } 1130 if gp._panic == nil { // must be done with signal 1131 gp.sig = 0 1132 } 1133 // Pass information about recovering frame to recovery. 1134 gp.sigcode0 = uintptr(sp) 1135 gp.sigcode1 = pc 1136 mcall(recovery) 1137 throw("recovery failed") // mcall should not return 1138 } 1139 } 1140 1141 // ran out of deferred calls - old-school panic now 1142 // Because it is unsafe to call arbitrary user code after freezing 1143 // the world, we call preprintpanics to invoke all necessary Error 1144 // and String methods to prepare the panic strings before startpanic. 1145 preprintpanics(gp._panic) 1146 1147 fatalpanic(gp._panic) // should not return 1148 *(*int)(nil) = 0 // not reached 1149 } 1150 1151 // getargp returns the location where the caller 1152 // writes outgoing function call arguments. 1153 //go:nosplit 1154 //go:noinline 1155 func getargp() uintptr { 1156 return getcallersp() + sys.MinFrameSize 1157 } 1158 1159 // The implementation of the predeclared function recover. 1160 // Cannot split the stack because it needs to reliably 1161 // find the stack segment of its caller. 1162 // 1163 // TODO(rsc): Once we commit to CopyStackAlways, 1164 // this doesn't need to be nosplit. 1165 //go:nosplit 1166 func gorecover(argp uintptr) interface{} { 1167 // Must be in a function running as part of a deferred call during the panic. 1168 // Must be called from the topmost function of the call 1169 // (the function used in the defer statement). 1170 // p.argp is the argument pointer of that topmost deferred function call. 1171 // Compare against argp reported by caller. 1172 // If they match, the caller is the one who can recover. 1173 gp := getg() 1174 p := gp._panic 1175 if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { 1176 p.recovered = true 1177 return p.arg 1178 } 1179 return nil 1180 } 1181 1182 //go:linkname sync_throw sync.throw 1183 func sync_throw(s string) { 1184 throw(s) 1185 } 1186 1187 //go:nosplit 1188 func throw(s string) { 1189 // Everything throw does should be recursively nosplit so it 1190 // can be called even when it's unsafe to grow the stack. 1191 systemstack(func() { 1192 print("fatal error: ", s, "\n") 1193 }) 1194 gp := getg() 1195 if gp.m.throwing == 0 { 1196 gp.m.throwing = 1 1197 } 1198 fatalthrow() 1199 *(*int)(nil) = 0 // not reached 1200 } 1201 1202 // runningPanicDefers is non-zero while running deferred functions for panic. 1203 // runningPanicDefers is incremented and decremented atomically. 1204 // This is used to try hard to get a panic stack trace out when exiting. 1205 var runningPanicDefers uint32 1206 1207 // panicking is non-zero when crashing the program for an unrecovered panic. 1208 // panicking is incremented and decremented atomically. 1209 var panicking uint32 1210 1211 // paniclk is held while printing the panic information and stack trace, 1212 // so that two concurrent panics don't overlap their output. 1213 var paniclk mutex 1214 1215 // Unwind the stack after a deferred function calls recover 1216 // after a panic. Then arrange to continue running as though 1217 // the caller of the deferred function returned normally. 1218 func recovery(gp *g) { 1219 // Info about defer passed in G struct. 1220 sp := gp.sigcode0 1221 pc := gp.sigcode1 1222 1223 // d's arguments need to be in the stack. 1224 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 1225 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1226 throw("bad recovery") 1227 } 1228 1229 // Make the deferproc for this d return again, 1230 // this time returning 1. The calling function will 1231 // jump to the standard return epilogue. 1232 gp.sched.sp = sp 1233 gp.sched.pc = pc 1234 gp.sched.lr = 0 1235 gp.sched.ret = 1 1236 gogo(&gp.sched) 1237 } 1238 1239 // fatalthrow implements an unrecoverable runtime throw. It freezes the 1240 // system, prints stack traces starting from its caller, and terminates the 1241 // process. 1242 // 1243 //go:nosplit 1244 func fatalthrow() { 1245 pc := getcallerpc() 1246 sp := getcallersp() 1247 gp := getg() 1248 // Switch to the system stack to avoid any stack growth, which 1249 // may make things worse if the runtime is in a bad state. 1250 systemstack(func() { 1251 startpanic_m() 1252 1253 if dopanic_m(gp, pc, sp) { 1254 // crash uses a decent amount of nosplit stack and we're already 1255 // low on stack in throw, so crash on the system stack (unlike 1256 // fatalpanic). 1257 crash() 1258 } 1259 1260 exit(2) 1261 }) 1262 1263 *(*int)(nil) = 0 // not reached 1264 } 1265 1266 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except 1267 // that if msgs != nil, fatalpanic also prints panic messages and decrements 1268 // runningPanicDefers once main is blocked from exiting. 1269 // 1270 //go:nosplit 1271 func fatalpanic(msgs *_panic) { 1272 pc := getcallerpc() 1273 sp := getcallersp() 1274 gp := getg() 1275 var docrash bool 1276 // Switch to the system stack to avoid any stack growth, which 1277 // may make things worse if the runtime is in a bad state. 1278 systemstack(func() { 1279 if startpanic_m() && msgs != nil { 1280 // There were panic messages and startpanic_m 1281 // says it's okay to try to print them. 1282 1283 // startpanic_m set panicking, which will 1284 // block main from exiting, so now OK to 1285 // decrement runningPanicDefers. 1286 atomic.Xadd(&runningPanicDefers, -1) 1287 1288 printpanics(msgs) 1289 } 1290 1291 docrash = dopanic_m(gp, pc, sp) 1292 }) 1293 1294 if docrash { 1295 // By crashing outside the above systemstack call, debuggers 1296 // will not be confused when generating a backtrace. 1297 // Function crash is marked nosplit to avoid stack growth. 1298 crash() 1299 } 1300 1301 systemstack(func() { 1302 exit(2) 1303 }) 1304 1305 *(*int)(nil) = 0 // not reached 1306 } 1307 1308 // startpanic_m prepares for an unrecoverable panic. 1309 // 1310 // It returns true if panic messages should be printed, or false if 1311 // the runtime is in bad shape and should just print stacks. 1312 // 1313 // It must not have write barriers even though the write barrier 1314 // explicitly ignores writes once dying > 0. Write barriers still 1315 // assume that g.m.p != nil, and this function may not have P 1316 // in some contexts (e.g. a panic in a signal handler for a signal 1317 // sent to an M with no P). 1318 // 1319 //go:nowritebarrierrec 1320 func startpanic_m() bool { 1321 _g_ := getg() 1322 if mheap_.cachealloc.size == 0 { // very early 1323 print("runtime: panic before malloc heap initialized\n") 1324 } 1325 // Disallow malloc during an unrecoverable panic. A panic 1326 // could happen in a signal handler, or in a throw, or inside 1327 // malloc itself. We want to catch if an allocation ever does 1328 // happen (even if we're not in one of these situations). 1329 _g_.m.mallocing++ 1330 1331 // If we're dying because of a bad lock count, set it to a 1332 // good lock count so we don't recursively panic below. 1333 if _g_.m.locks < 0 { 1334 _g_.m.locks = 1 1335 } 1336 1337 switch _g_.m.dying { 1338 case 0: 1339 // Setting dying >0 has the side-effect of disabling this G's writebuf. 1340 _g_.m.dying = 1 1341 atomic.Xadd(&panicking, 1) 1342 lock(&paniclk) 1343 if debug.schedtrace > 0 || debug.scheddetail > 0 { 1344 schedtrace(true) 1345 } 1346 freezetheworld() 1347 return true 1348 case 1: 1349 // Something failed while panicking. 1350 // Just print a stack trace and exit. 1351 _g_.m.dying = 2 1352 print("panic during panic\n") 1353 return false 1354 case 2: 1355 // This is a genuine bug in the runtime, we couldn't even 1356 // print the stack trace successfully. 1357 _g_.m.dying = 3 1358 print("stack trace unavailable\n") 1359 exit(4) 1360 fallthrough 1361 default: 1362 // Can't even print! Just exit. 1363 exit(5) 1364 return false // Need to return something. 1365 } 1366 } 1367 1368 var didothers bool 1369 var deadlock mutex 1370 1371 func dopanic_m(gp *g, pc, sp uintptr) bool { 1372 if gp.sig != 0 { 1373 signame := signame(gp.sig) 1374 if signame != "" { 1375 print("[signal ", signame) 1376 } else { 1377 print("[signal ", hex(gp.sig)) 1378 } 1379 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 1380 } 1381 1382 level, all, docrash := gotraceback() 1383 _g_ := getg() 1384 if level > 0 { 1385 if gp != gp.m.curg { 1386 all = true 1387 } 1388 if gp != gp.m.g0 { 1389 print("\n") 1390 goroutineheader(gp) 1391 traceback(pc, sp, 0, gp) 1392 } else if level >= 2 || _g_.m.throwing > 0 { 1393 print("\nruntime stack:\n") 1394 traceback(pc, sp, 0, gp) 1395 } 1396 if !didothers && all { 1397 didothers = true 1398 tracebackothers(gp) 1399 } 1400 } 1401 unlock(&paniclk) 1402 1403 if atomic.Xadd(&panicking, -1) != 0 { 1404 // Some other m is panicking too. 1405 // Let it print what it needs to print. 1406 // Wait forever without chewing up cpu. 1407 // It will exit when it's done. 1408 lock(&deadlock) 1409 lock(&deadlock) 1410 } 1411 1412 printDebugLog() 1413 1414 return docrash 1415 } 1416 1417 // canpanic returns false if a signal should throw instead of 1418 // panicking. 1419 // 1420 //go:nosplit 1421 func canpanic(gp *g) bool { 1422 // Note that g is m->gsignal, different from gp. 1423 // Note also that g->m can change at preemption, so m can go stale 1424 // if this function ever makes a function call. 1425 _g_ := getg() 1426 _m_ := _g_.m 1427 1428 // Is it okay for gp to panic instead of crashing the program? 1429 // Yes, as long as it is running Go code, not runtime code, 1430 // and not stuck in a system call. 1431 if gp == nil || gp != _m_.curg { 1432 return false 1433 } 1434 if _m_.locks != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 { 1435 return false 1436 } 1437 status := readgstatus(gp) 1438 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 1439 return false 1440 } 1441 if GOOS == "windows" && _m_.libcallsp != 0 { 1442 return false 1443 } 1444 return true 1445 } 1446 1447 // shouldPushSigpanic reports whether pc should be used as sigpanic's 1448 // return PC (pushing a frame for the call). Otherwise, it should be 1449 // left alone so that LR is used as sigpanic's return PC, effectively 1450 // replacing the top-most frame with sigpanic. This is used by 1451 // preparePanic. 1452 func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { 1453 if pc == 0 { 1454 // Probably a call to a nil func. The old LR is more 1455 // useful in the stack trace. Not pushing the frame 1456 // will make the trace look like a call to sigpanic 1457 // instead. (Otherwise the trace will end at sigpanic 1458 // and we won't get to see who faulted.) 1459 return false 1460 } 1461 // If we don't recognize the PC as code, but we do recognize 1462 // the link register as code, then this assumes the panic was 1463 // caused by a call to non-code. In this case, we want to 1464 // ignore this call to make unwinding show the context. 1465 // 1466 // If we running C code, we're not going to recognize pc as a 1467 // Go function, so just assume it's good. Otherwise, traceback 1468 // may try to read a stale LR that looks like a Go code 1469 // pointer and wander into the woods. 1470 if gp.m.incgo || findfunc(pc).valid() { 1471 // This wasn't a bad call, so use PC as sigpanic's 1472 // return PC. 1473 return true 1474 } 1475 if findfunc(lr).valid() { 1476 // This was a bad call, but the LR is good, so use the 1477 // LR as sigpanic's return PC. 1478 return false 1479 } 1480 // Neither the PC or LR is good. Hopefully pushing a frame 1481 // will work. 1482 return true 1483 } 1484 1485 // isAbortPC reports whether pc is the program counter at which 1486 // runtime.abort raises a signal. 1487 // 1488 // It is nosplit because it's part of the isgoexception 1489 // implementation. 1490 // 1491 //go:nosplit 1492 func isAbortPC(pc uintptr) bool { 1493 f := findfunc(pc) 1494 if !f.valid() { 1495 return false 1496 } 1497 return f.funcID == funcID_abort 1498 }