github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/panic.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "internal/abi" 9 "internal/goarch" 10 "runtime/internal/atomic" 11 "runtime/internal/sys" 12 "unsafe" 13 ) 14 15 // throwType indicates the current type of ongoing throw, which affects the 16 // amount of detail printed to stderr. Higher values include more detail. 17 type throwType uint32 18 19 const ( 20 // throwTypeNone means that we are not throwing. 21 throwTypeNone throwType = iota 22 23 // throwTypeUser is a throw due to a problem with the application. 24 // 25 // These throws do not include runtime frames, system goroutines, or 26 // frame metadata. 27 throwTypeUser 28 29 // throwTypeRuntime is a throw due to a problem with Go itself. 30 // 31 // These throws include as much information as possible to aid in 32 // debugging the runtime, including runtime frames, system goroutines, 33 // and frame metadata. 34 throwTypeRuntime 35 ) 36 37 // We have two different ways of doing defers. The older way involves creating a 38 // defer record at the time that a defer statement is executing and adding it to a 39 // defer chain. This chain is inspected by the deferreturn call at all function 40 // exits in order to run the appropriate defer calls. A cheaper way (which we call 41 // open-coded defers) is used for functions in which no defer statements occur in 42 // loops. In that case, we simply store the defer function/arg information into 43 // specific stack slots at the point of each defer statement, as well as setting a 44 // bit in a bitmask. At each function exit, we add inline code to directly make 45 // the appropriate defer calls based on the bitmask and fn/arg information stored 46 // on the stack. During panic/Goexit processing, the appropriate defer calls are 47 // made using extra funcdata info that indicates the exact stack slots that 48 // contain the bitmask and defer fn/args. 49 50 // Check to make sure we can really generate a panic. If the panic 51 // was generated from the runtime, or from inside malloc, then convert 52 // to a throw of msg. 53 // pc should be the program counter of the compiler-generated code that 54 // triggered this panic. 55 func panicCheck1(pc uintptr, msg string) { 56 if goarch.IsWasm == 0 && hasPrefix(funcname(findfunc(pc)), "runtime.") { 57 // Note: wasm can't tail call, so we can't get the original caller's pc. 58 throw(msg) 59 } 60 // TODO: is this redundant? How could we be in malloc 61 // but not in the runtime? runtime/internal/*, maybe? 62 gp := getg() 63 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 64 throw(msg) 65 } 66 } 67 68 // Same as above, but calling from the runtime is allowed. 69 // 70 // Using this function is necessary for any panic that may be 71 // generated by runtime.sigpanic, since those are always called by the 72 // runtime. 73 func panicCheck2(err string) { 74 // panic allocates, so to avoid recursive malloc, turn panics 75 // during malloc into throws. 76 gp := getg() 77 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 78 throw(err) 79 } 80 } 81 82 // Many of the following panic entry-points turn into throws when they 83 // happen in various runtime contexts. These should never happen in 84 // the runtime, and if they do, they indicate a serious issue and 85 // should not be caught by user code. 86 // 87 // The panic{Index,Slice,divide,shift} functions are called by 88 // code generated by the compiler for out of bounds index expressions, 89 // out of bounds slice expressions, division by zero, and shift by negative. 90 // The panicdivide (again), panicoverflow, panicfloat, and panicmem 91 // functions are called by the signal handler when a signal occurs 92 // indicating the respective problem. 93 // 94 // Since panic{Index,Slice,shift} are never called directly, and 95 // since the runtime package should never have an out of bounds slice 96 // or array reference or negative shift, if we see those functions called from the 97 // runtime package we turn the panic into a throw. That will dump the 98 // entire runtime stack for easier debugging. 99 // 100 // The entry points called by the signal handler will be called from 101 // runtime.sigpanic, so we can't disallow calls from the runtime to 102 // these (they always look like they're called from the runtime). 103 // Hence, for these, we just check for clearly bad runtime conditions. 104 // 105 // The panic{Index,Slice} functions are implemented in assembly and tail call 106 // to the goPanic{Index,Slice} functions below. This is done so we can use 107 // a space-minimal register calling convention. 108 109 // failures in the comparisons for s[x], 0 <= x < y (y == len(s)) 110 // 111 //go:yeswritebarrierrec 112 func goPanicIndex(x int, y int) { 113 panicCheck1(getcallerpc(), "index out of range") 114 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsIndex}) 115 } 116 117 //go:yeswritebarrierrec 118 func goPanicIndexU(x uint, y int) { 119 panicCheck1(getcallerpc(), "index out of range") 120 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsIndex}) 121 } 122 123 // failures in the comparisons for s[:x], 0 <= x <= y (y == len(s) or cap(s)) 124 // 125 //go:yeswritebarrierrec 126 func goPanicSliceAlen(x int, y int) { 127 panicCheck1(getcallerpc(), "slice bounds out of range") 128 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAlen}) 129 } 130 131 //go:yeswritebarrierrec 132 func goPanicSliceAlenU(x uint, y int) { 133 panicCheck1(getcallerpc(), "slice bounds out of range") 134 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAlen}) 135 } 136 137 //go:yeswritebarrierrec 138 func goPanicSliceAcap(x int, y int) { 139 panicCheck1(getcallerpc(), "slice bounds out of range") 140 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceAcap}) 141 } 142 143 //go:yeswritebarrierrec 144 func goPanicSliceAcapU(x uint, y int) { 145 panicCheck1(getcallerpc(), "slice bounds out of range") 146 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceAcap}) 147 } 148 149 // failures in the comparisons for s[x:y], 0 <= x <= y 150 // 151 //go:yeswritebarrierrec 152 func goPanicSliceB(x int, y int) { 153 panicCheck1(getcallerpc(), "slice bounds out of range") 154 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSliceB}) 155 } 156 157 //go:yeswritebarrierrec 158 func goPanicSliceBU(x uint, y int) { 159 panicCheck1(getcallerpc(), "slice bounds out of range") 160 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSliceB}) 161 } 162 163 // failures in the comparisons for s[::x], 0 <= x <= y (y == len(s) or cap(s)) 164 func goPanicSlice3Alen(x int, y int) { 165 panicCheck1(getcallerpc(), "slice bounds out of range") 166 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Alen}) 167 } 168 func goPanicSlice3AlenU(x uint, y int) { 169 panicCheck1(getcallerpc(), "slice bounds out of range") 170 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Alen}) 171 } 172 func goPanicSlice3Acap(x int, y int) { 173 panicCheck1(getcallerpc(), "slice bounds out of range") 174 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3Acap}) 175 } 176 func goPanicSlice3AcapU(x uint, y int) { 177 panicCheck1(getcallerpc(), "slice bounds out of range") 178 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3Acap}) 179 } 180 181 // failures in the comparisons for s[:x:y], 0 <= x <= y 182 func goPanicSlice3B(x int, y int) { 183 panicCheck1(getcallerpc(), "slice bounds out of range") 184 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3B}) 185 } 186 func goPanicSlice3BU(x uint, y int) { 187 panicCheck1(getcallerpc(), "slice bounds out of range") 188 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3B}) 189 } 190 191 // failures in the comparisons for s[x:y:], 0 <= x <= y 192 func goPanicSlice3C(x int, y int) { 193 panicCheck1(getcallerpc(), "slice bounds out of range") 194 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsSlice3C}) 195 } 196 func goPanicSlice3CU(x uint, y int) { 197 panicCheck1(getcallerpc(), "slice bounds out of range") 198 panic(boundsError{x: int64(x), signed: false, y: y, code: boundsSlice3C}) 199 } 200 201 // failures in the conversion ([x]T)(s) or (*[x]T)(s), 0 <= x <= y, y == len(s) 202 func goPanicSliceConvert(x int, y int) { 203 panicCheck1(getcallerpc(), "slice length too short to convert to array or pointer to array") 204 panic(boundsError{x: int64(x), signed: true, y: y, code: boundsConvert}) 205 } 206 207 // Implemented in assembly, as they take arguments in registers. 208 // Declared here to mark them as ABIInternal. 209 func panicIndex(x int, y int) 210 func panicIndexU(x uint, y int) 211 func panicSliceAlen(x int, y int) 212 func panicSliceAlenU(x uint, y int) 213 func panicSliceAcap(x int, y int) 214 func panicSliceAcapU(x uint, y int) 215 func panicSliceB(x int, y int) 216 func panicSliceBU(x uint, y int) 217 func panicSlice3Alen(x int, y int) 218 func panicSlice3AlenU(x uint, y int) 219 func panicSlice3Acap(x int, y int) 220 func panicSlice3AcapU(x uint, y int) 221 func panicSlice3B(x int, y int) 222 func panicSlice3BU(x uint, y int) 223 func panicSlice3C(x int, y int) 224 func panicSlice3CU(x uint, y int) 225 func panicSliceConvert(x int, y int) 226 227 var shiftError = error(errorString("negative shift amount")) 228 229 //go:yeswritebarrierrec 230 func panicshift() { 231 panicCheck1(getcallerpc(), "negative shift amount") 232 panic(shiftError) 233 } 234 235 var divideError = error(errorString("integer divide by zero")) 236 237 //go:yeswritebarrierrec 238 func panicdivide() { 239 panicCheck2("integer divide by zero") 240 panic(divideError) 241 } 242 243 var overflowError = error(errorString("integer overflow")) 244 245 func panicoverflow() { 246 panicCheck2("integer overflow") 247 panic(overflowError) 248 } 249 250 var floatError = error(errorString("floating point error")) 251 252 func panicfloat() { 253 panicCheck2("floating point error") 254 panic(floatError) 255 } 256 257 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 258 259 func panicmem() { 260 panicCheck2("invalid memory address or nil pointer dereference") 261 panic(memoryError) 262 } 263 264 func panicmemAddr(addr uintptr) { 265 panicCheck2("invalid memory address or nil pointer dereference") 266 panic(errorAddressString{msg: "invalid memory address or nil pointer dereference", addr: addr}) 267 } 268 269 // Create a new deferred function fn, which has no arguments and results. 270 // The compiler turns a defer statement into a call to this. 271 func deferproc(fn func()) { 272 gp := getg() 273 if gp.m.curg != gp { 274 // go code on the system stack can't defer 275 throw("defer on system stack") 276 } 277 278 d := newdefer() 279 if d._panic != nil { 280 throw("deferproc: d.panic != nil after newdefer") 281 } 282 d.link = gp._defer 283 gp._defer = d 284 d.fn = fn 285 d.pc = getcallerpc() 286 // We must not be preempted between calling getcallersp and 287 // storing it to d.sp because getcallersp's result is a 288 // uintptr stack pointer. 289 d.sp = getcallersp() 290 291 // deferproc returns 0 normally. 292 // a deferred func that stops a panic 293 // makes the deferproc return 1. 294 // the code the compiler generates always 295 // checks the return value and jumps to the 296 // end of the function if deferproc returns != 0. 297 return0() 298 // No code can go here - the C return register has 299 // been set and must not be clobbered. 300 } 301 302 // deferprocStack queues a new deferred function with a defer record on the stack. 303 // The defer record must have its fn field initialized. 304 // All other fields can contain junk. 305 // Nosplit because of the uninitialized pointer fields on the stack. 306 // 307 //go:nosplit 308 func deferprocStack(d *_defer) { 309 gp := getg() 310 if gp.m.curg != gp { 311 // go code on the system stack can't defer 312 throw("defer on system stack") 313 } 314 // fn is already set. 315 // The other fields are junk on entry to deferprocStack and 316 // are initialized here. 317 d.started = false 318 d.heap = false 319 d.openDefer = false 320 d.sp = getcallersp() 321 d.pc = getcallerpc() 322 d.framepc = 0 323 d.varp = 0 324 // The lines below implement: 325 // d.panic = nil 326 // d.fd = nil 327 // d.link = gp._defer 328 // gp._defer = d 329 // But without write barriers. The first three are writes to 330 // the stack so they don't need a write barrier, and furthermore 331 // are to uninitialized memory, so they must not use a write barrier. 332 // The fourth write does not require a write barrier because we 333 // explicitly mark all the defer structures, so we don't need to 334 // keep track of pointers to them with a write barrier. 335 *(*uintptr)(unsafe.Pointer(&d._panic)) = 0 336 *(*uintptr)(unsafe.Pointer(&d.fd)) = 0 337 *(*uintptr)(unsafe.Pointer(&d.link)) = uintptr(unsafe.Pointer(gp._defer)) 338 *(*uintptr)(unsafe.Pointer(&gp._defer)) = uintptr(unsafe.Pointer(d)) 339 340 return0() 341 // No code can go here - the C return register has 342 // been set and must not be clobbered. 343 } 344 345 // Each P holds a pool for defers. 346 347 // Allocate a Defer, usually using per-P pool. 348 // Each defer must be released with freedefer. The defer is not 349 // added to any defer chain yet. 350 func newdefer() *_defer { 351 var d *_defer 352 mp := acquirem() 353 pp := mp.p.ptr() 354 if len(pp.deferpool) == 0 && sched.deferpool != nil { 355 lock(&sched.deferlock) 356 for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil { 357 d := sched.deferpool 358 sched.deferpool = d.link 359 d.link = nil 360 pp.deferpool = append(pp.deferpool, d) 361 } 362 unlock(&sched.deferlock) 363 } 364 if n := len(pp.deferpool); n > 0 { 365 d = pp.deferpool[n-1] 366 pp.deferpool[n-1] = nil 367 pp.deferpool = pp.deferpool[:n-1] 368 } 369 releasem(mp) 370 mp, pp = nil, nil 371 372 if d == nil { 373 // Allocate new defer. 374 d = new(_defer) 375 } 376 d.heap = true 377 return d 378 } 379 380 // Free the given defer. 381 // The defer cannot be used after this call. 382 // 383 // This is nosplit because the incoming defer is in a perilous state. 384 // It's not on any defer list, so stack copying won't adjust stack 385 // pointers in it (namely, d.link). Hence, if we were to copy the 386 // stack, d could then contain a stale pointer. 387 // 388 //go:nosplit 389 func freedefer(d *_defer) { 390 d.link = nil 391 // After this point we can copy the stack. 392 393 if d._panic != nil { 394 freedeferpanic() 395 } 396 if d.fn != nil { 397 freedeferfn() 398 } 399 if !d.heap { 400 return 401 } 402 403 mp := acquirem() 404 pp := mp.p.ptr() 405 if len(pp.deferpool) == cap(pp.deferpool) { 406 // Transfer half of local cache to the central cache. 407 var first, last *_defer 408 for len(pp.deferpool) > cap(pp.deferpool)/2 { 409 n := len(pp.deferpool) 410 d := pp.deferpool[n-1] 411 pp.deferpool[n-1] = nil 412 pp.deferpool = pp.deferpool[:n-1] 413 if first == nil { 414 first = d 415 } else { 416 last.link = d 417 } 418 last = d 419 } 420 lock(&sched.deferlock) 421 last.link = sched.deferpool 422 sched.deferpool = first 423 unlock(&sched.deferlock) 424 } 425 426 *d = _defer{} 427 428 pp.deferpool = append(pp.deferpool, d) 429 430 releasem(mp) 431 mp, pp = nil, nil 432 } 433 434 // Separate function so that it can split stack. 435 // Windows otherwise runs out of stack space. 436 func freedeferpanic() { 437 // _panic must be cleared before d is unlinked from gp. 438 throw("freedefer with d._panic != nil") 439 } 440 441 func freedeferfn() { 442 // fn must be cleared before d is unlinked from gp. 443 throw("freedefer with d.fn != nil") 444 } 445 446 // deferreturn runs deferred functions for the caller's frame. 447 // The compiler inserts a call to this at the end of any 448 // function which calls defer. 449 func deferreturn() { 450 gp := getg() 451 for { 452 d := gp._defer 453 if d == nil { 454 return 455 } 456 sp := getcallersp() 457 if d.sp != sp { 458 return 459 } 460 if d.openDefer { 461 done := runOpenDeferFrame(d) 462 if !done { 463 throw("unfinished open-coded defers in deferreturn") 464 } 465 gp._defer = d.link 466 freedefer(d) 467 // If this frame uses open defers, then this 468 // must be the only defer record for the 469 // frame, so we can just return. 470 return 471 } 472 473 fn := d.fn 474 d.fn = nil 475 gp._defer = d.link 476 freedefer(d) 477 fn() 478 } 479 } 480 481 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 482 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 483 // is not a panic, any recover calls in those deferred functions will return nil. 484 // 485 // Calling Goexit from the main goroutine terminates that goroutine 486 // without func main returning. Since func main has not returned, 487 // the program continues execution of other goroutines. 488 // If all other goroutines exit, the program crashes. 489 func Goexit() { 490 // Run all deferred functions for the current goroutine. 491 // This code is similar to gopanic, see that implementation 492 // for detailed comments. 493 gp := getg() 494 495 // Create a panic object for Goexit, so we can recognize when it might be 496 // bypassed by a recover(). 497 var p _panic 498 p.goexit = true 499 p.link = gp._panic 500 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 501 502 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 503 for { 504 d := gp._defer 505 if d == nil { 506 break 507 } 508 if d.started { 509 if d._panic != nil { 510 d._panic.aborted = true 511 d._panic = nil 512 } 513 if !d.openDefer { 514 d.fn = nil 515 gp._defer = d.link 516 freedefer(d) 517 continue 518 } 519 } 520 d.started = true 521 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 522 if d.openDefer { 523 done := runOpenDeferFrame(d) 524 if !done { 525 // We should always run all defers in the frame, 526 // since there is no panic associated with this 527 // defer that can be recovered. 528 throw("unfinished open-coded defers in Goexit") 529 } 530 if p.aborted { 531 // Since our current defer caused a panic and may 532 // have been already freed, just restart scanning 533 // for open-coded defers from this frame again. 534 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 535 } else { 536 addOneOpenDeferFrame(gp, 0, nil) 537 } 538 } else { 539 // Save the pc/sp in deferCallSave(), so we can "recover" back to this 540 // loop if necessary. 541 deferCallSave(&p, d.fn) 542 } 543 if p.aborted { 544 // We had a recursive panic in the defer d we started, and 545 // then did a recover in a defer that was further down the 546 // defer chain than d. In the case of an outstanding Goexit, 547 // we force the recover to return back to this loop. d will 548 // have already been freed if completed, so just continue 549 // immediately to the next defer on the chain. 550 p.aborted = false 551 continue 552 } 553 if gp._defer != d { 554 throw("bad defer entry in Goexit") 555 } 556 d._panic = nil 557 d.fn = nil 558 gp._defer = d.link 559 freedefer(d) 560 // Note: we ignore recovers here because Goexit isn't a panic 561 } 562 goexit1() 563 } 564 565 // Call all Error and String methods before freezing the world. 566 // Used when crashing with panicking. 567 func preprintpanics(p *_panic) { 568 defer func() { 569 text := "panic while printing panic value" 570 switch r := recover().(type) { 571 case nil: 572 // nothing to do 573 case string: 574 throw(text + ": " + r) 575 default: 576 throw(text + ": type " + toRType(efaceOf(&r)._type).string()) 577 } 578 }() 579 for p != nil { 580 switch v := p.arg.(type) { 581 case error: 582 p.arg = v.Error() 583 case stringer: 584 p.arg = v.String() 585 } 586 p = p.link 587 } 588 } 589 590 // Print all currently active panics. Used when crashing. 591 // Should only be called after preprintpanics. 592 func printpanics(p *_panic) { 593 if p.link != nil { 594 printpanics(p.link) 595 if !p.link.goexit { 596 print("\t") 597 } 598 } 599 if p.goexit { 600 return 601 } 602 print("panic: ") 603 printany(p.arg) 604 if p.recovered { 605 print(" [recovered]") 606 } 607 print("\n") 608 } 609 610 // addOneOpenDeferFrame scans the stack (in gentraceback order, from inner frames to 611 // outer frames) for the first frame (if any) with open-coded defers. If it finds 612 // one, it adds a single entry to the defer chain for that frame. The entry added 613 // represents all the defers in the associated open defer frame, and is sorted in 614 // order with respect to any non-open-coded defers. 615 // 616 // addOneOpenDeferFrame stops (possibly without adding a new entry) if it encounters 617 // an in-progress open defer entry. An in-progress open defer entry means there has 618 // been a new panic because of a defer in the associated frame. addOneOpenDeferFrame 619 // does not add an open defer entry past a started entry, because that started entry 620 // still needs to finished, and addOneOpenDeferFrame will be called when that started 621 // entry is completed. The defer removal loop in gopanic() similarly stops at an 622 // in-progress defer entry. Together, addOneOpenDeferFrame and the defer removal loop 623 // ensure the invariant that there is no open defer entry further up the stack than 624 // an in-progress defer, and also that the defer removal loop is guaranteed to remove 625 // all not-in-progress open defer entries from the defer chain. 626 // 627 // If sp is non-nil, addOneOpenDeferFrame starts the stack scan from the frame 628 // specified by sp. If sp is nil, it uses the sp from the current defer record (which 629 // has just been finished). Hence, it continues the stack scan from the frame of the 630 // defer that just finished. It skips any frame that already has a (not-in-progress) 631 // open-coded _defer record in the defer chain. 632 // 633 // Note: All entries of the defer chain (including this new open-coded entry) have 634 // their pointers (including sp) adjusted properly if the stack moves while 635 // running deferred functions. Also, it is safe to pass in the sp arg (which is 636 // the direct result of calling getcallersp()), because all pointer variables 637 // (including arguments) are adjusted as needed during stack copies. 638 func addOneOpenDeferFrame(gp *g, pc uintptr, sp unsafe.Pointer) { 639 var prevDefer *_defer 640 if sp == nil { 641 prevDefer = gp._defer 642 pc = prevDefer.framepc 643 sp = unsafe.Pointer(prevDefer.sp) 644 } 645 systemstack(func() { 646 var u unwinder 647 frames: 648 for u.initAt(pc, uintptr(sp), 0, gp, 0); u.valid(); u.next() { 649 frame := &u.frame 650 if prevDefer != nil && prevDefer.sp == frame.sp { 651 // Skip the frame for the previous defer that 652 // we just finished (and was used to set 653 // where we restarted the stack scan) 654 continue 655 } 656 f := frame.fn 657 fd := funcdata(f, abi.FUNCDATA_OpenCodedDeferInfo) 658 if fd == nil { 659 continue 660 } 661 // Insert the open defer record in the 662 // chain, in order sorted by sp. 663 d := gp._defer 664 var prev *_defer 665 for d != nil { 666 dsp := d.sp 667 if frame.sp < dsp { 668 break 669 } 670 if frame.sp == dsp { 671 if !d.openDefer { 672 throw("duplicated defer entry") 673 } 674 // Don't add any record past an 675 // in-progress defer entry. We don't 676 // need it, and more importantly, we 677 // want to keep the invariant that 678 // there is no open defer entry 679 // passed an in-progress entry (see 680 // header comment). 681 if d.started { 682 break frames 683 } 684 continue frames 685 } 686 prev = d 687 d = d.link 688 } 689 if frame.fn.deferreturn == 0 { 690 throw("missing deferreturn") 691 } 692 693 d1 := newdefer() 694 d1.openDefer = true 695 d1._panic = nil 696 // These are the pc/sp to set after we've 697 // run a defer in this frame that did a 698 // recover. We return to a special 699 // deferreturn that runs any remaining 700 // defers and then returns from the 701 // function. 702 d1.pc = frame.fn.entry() + uintptr(frame.fn.deferreturn) 703 d1.varp = frame.varp 704 d1.fd = fd 705 // Save the SP/PC associated with current frame, 706 // so we can continue stack trace later if needed. 707 d1.framepc = frame.pc 708 d1.sp = frame.sp 709 d1.link = d 710 if prev == nil { 711 gp._defer = d1 712 } else { 713 prev.link = d1 714 } 715 // Stop stack scanning after adding one open defer record 716 break 717 } 718 }) 719 } 720 721 // readvarintUnsafe reads the uint32 in varint format starting at fd, and returns the 722 // uint32 and a pointer to the byte following the varint. 723 // 724 // There is a similar function runtime.readvarint, which takes a slice of bytes, 725 // rather than an unsafe pointer. These functions are duplicated, because one of 726 // the two use cases for the functions would get slower if the functions were 727 // combined. 728 func readvarintUnsafe(fd unsafe.Pointer) (uint32, unsafe.Pointer) { 729 var r uint32 730 var shift int 731 for { 732 b := *(*uint8)((unsafe.Pointer(fd))) 733 fd = add(fd, unsafe.Sizeof(b)) 734 if b < 128 { 735 return r + uint32(b)<<shift, fd 736 } 737 r += ((uint32(b) &^ 128) << shift) 738 shift += 7 739 if shift > 28 { 740 panic("Bad varint") 741 } 742 } 743 } 744 745 // runOpenDeferFrame runs the active open-coded defers in the frame specified by 746 // d. It normally processes all active defers in the frame, but stops immediately 747 // if a defer does a successful recover. It returns true if there are no 748 // remaining defers to run in the frame. 749 func runOpenDeferFrame(d *_defer) bool { 750 done := true 751 fd := d.fd 752 753 deferBitsOffset, fd := readvarintUnsafe(fd) 754 nDefers, fd := readvarintUnsafe(fd) 755 deferBits := *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) 756 757 for i := int(nDefers) - 1; i >= 0; i-- { 758 // read the funcdata info for this defer 759 var closureOffset uint32 760 closureOffset, fd = readvarintUnsafe(fd) 761 if deferBits&(1<<i) == 0 { 762 continue 763 } 764 closure := *(*func())(unsafe.Pointer(d.varp - uintptr(closureOffset))) 765 d.fn = closure 766 deferBits = deferBits &^ (1 << i) 767 *(*uint8)(unsafe.Pointer(d.varp - uintptr(deferBitsOffset))) = deferBits 768 p := d._panic 769 // Call the defer. Note that this can change d.varp if 770 // the stack moves. 771 deferCallSave(p, d.fn) 772 if p != nil && p.aborted { 773 break 774 } 775 d.fn = nil 776 if d._panic != nil && d._panic.recovered { 777 done = deferBits == 0 778 break 779 } 780 } 781 782 return done 783 } 784 785 // deferCallSave calls fn() after saving the caller's pc and sp in the 786 // panic record. This allows the runtime to return to the Goexit defer 787 // processing loop, in the unusual case where the Goexit may be 788 // bypassed by a successful recover. 789 // 790 // This is marked as a wrapper by the compiler so it doesn't appear in 791 // tracebacks. 792 func deferCallSave(p *_panic, fn func()) { 793 if p != nil { 794 p.argp = unsafe.Pointer(getargp()) 795 p.pc = getcallerpc() 796 p.sp = unsafe.Pointer(getcallersp()) 797 } 798 fn() 799 if p != nil { 800 p.pc = 0 801 p.sp = unsafe.Pointer(nil) 802 } 803 } 804 805 // A PanicNilError happens when code calls panic(nil). 806 // 807 // Before Go 1.21, programs that called panic(nil) observed recover returning nil. 808 // Starting in Go 1.21, programs that call panic(nil) observe recover returning a *PanicNilError. 809 // Programs can change back to the old behavior by setting GODEBUG=panicnil=1. 810 type PanicNilError struct { 811 // This field makes PanicNilError structurally different from 812 // any other struct in this package, and the _ makes it different 813 // from any struct in other packages too. 814 // This avoids any accidental conversions being possible 815 // between this struct and some other struct sharing the same fields, 816 // like happened in go.dev/issue/56603. 817 _ [0]*PanicNilError 818 } 819 820 func (*PanicNilError) Error() string { return "panic called with nil argument" } 821 func (*PanicNilError) RuntimeError() {} 822 823 var panicnil = &godebugInc{name: "panicnil"} 824 825 // The implementation of the predeclared function panic. 826 func gopanic(e any) { 827 if e == nil { 828 if debug.panicnil.Load() != 1 { 829 e = new(PanicNilError) 830 } else { 831 panicnil.IncNonDefault() 832 } 833 } 834 835 gp := getg() 836 if gp.m.curg != gp { 837 print("panic: ") 838 printany(e) 839 print("\n") 840 throw("panic on system stack") 841 } 842 843 if gp.m.mallocing != 0 { 844 print("panic: ") 845 printany(e) 846 print("\n") 847 throw("panic during malloc") 848 } 849 if gp.m.preemptoff != "" { 850 print("panic: ") 851 printany(e) 852 print("\n") 853 print("preempt off reason: ") 854 print(gp.m.preemptoff) 855 print("\n") 856 throw("panic during preemptoff") 857 } 858 if gp.m.locks != 0 { 859 print("panic: ") 860 printany(e) 861 print("\n") 862 throw("panic holding locks") 863 } 864 865 var p _panic 866 p.arg = e 867 p.link = gp._panic 868 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 869 870 runningPanicDefers.Add(1) 871 872 // By calculating getcallerpc/getcallersp here, we avoid scanning the 873 // gopanic frame (stack scanning is slow...) 874 addOneOpenDeferFrame(gp, getcallerpc(), unsafe.Pointer(getcallersp())) 875 876 for { 877 d := gp._defer 878 if d == nil { 879 break 880 } 881 882 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), 883 // take defer off list. An earlier panic will not continue running, but we will make sure below that an 884 // earlier Goexit does continue running. 885 if d.started { 886 if d._panic != nil { 887 d._panic.aborted = true 888 } 889 d._panic = nil 890 if !d.openDefer { 891 // For open-coded defers, we need to process the 892 // defer again, in case there are any other defers 893 // to call in the frame (not including the defer 894 // call that caused the panic). 895 d.fn = nil 896 gp._defer = d.link 897 freedefer(d) 898 continue 899 } 900 } 901 902 // Mark defer as started, but keep on list, so that traceback 903 // can find and update the defer's argument frame if stack growth 904 // or a garbage collection happens before executing d.fn. 905 d.started = true 906 907 // Record the panic that is running the defer. 908 // If there is a new panic during the deferred call, that panic 909 // will find d in the list and will mark d._panic (this panic) aborted. 910 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 911 912 done := true 913 if d.openDefer { 914 done = runOpenDeferFrame(d) 915 if done && !d._panic.recovered { 916 addOneOpenDeferFrame(gp, 0, nil) 917 } 918 } else { 919 p.argp = unsafe.Pointer(getargp()) 920 d.fn() 921 } 922 p.argp = nil 923 924 // Deferred function did not panic. Remove d. 925 if gp._defer != d { 926 throw("bad defer entry in panic") 927 } 928 d._panic = nil 929 930 // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic 931 //GC() 932 933 pc := d.pc 934 sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy 935 if done { 936 d.fn = nil 937 gp._defer = d.link 938 freedefer(d) 939 } 940 if p.recovered { 941 gp._panic = p.link 942 if gp._panic != nil && gp._panic.goexit && gp._panic.aborted { 943 // A normal recover would bypass/abort the Goexit. Instead, 944 // we return to the processing loop of the Goexit. 945 gp.sigcode0 = uintptr(gp._panic.sp) 946 gp.sigcode1 = uintptr(gp._panic.pc) 947 mcall(recovery) 948 throw("bypassed recovery failed") // mcall should not return 949 } 950 runningPanicDefers.Add(-1) 951 952 // After a recover, remove any remaining non-started, 953 // open-coded defer entries, since the corresponding defers 954 // will be executed normally (inline). Any such entry will 955 // become stale once we run the corresponding defers inline 956 // and exit the associated stack frame. We only remove up to 957 // the first started (in-progress) open defer entry, not 958 // including the current frame, since any higher entries will 959 // be from a higher panic in progress, and will still be 960 // needed. 961 d := gp._defer 962 var prev *_defer 963 if !done { 964 // Skip our current frame, if not done. It is 965 // needed to complete any remaining defers in 966 // deferreturn() 967 prev = d 968 d = d.link 969 } 970 for d != nil { 971 if d.started { 972 // This defer is started but we 973 // are in the middle of a 974 // defer-panic-recover inside of 975 // it, so don't remove it or any 976 // further defer entries 977 break 978 } 979 if d.openDefer { 980 if prev == nil { 981 gp._defer = d.link 982 } else { 983 prev.link = d.link 984 } 985 newd := d.link 986 freedefer(d) 987 d = newd 988 } else { 989 prev = d 990 d = d.link 991 } 992 } 993 994 gp._panic = p.link 995 // Aborted panics are marked but remain on the g.panic list. 996 // Remove them from the list. 997 for gp._panic != nil && gp._panic.aborted { 998 gp._panic = gp._panic.link 999 } 1000 if gp._panic == nil { // must be done with signal 1001 gp.sig = 0 1002 } 1003 // Pass information about recovering frame to recovery. 1004 gp.sigcode0 = uintptr(sp) 1005 gp.sigcode1 = pc 1006 mcall(recovery) 1007 throw("recovery failed") // mcall should not return 1008 } 1009 } 1010 1011 // ran out of deferred calls - old-school panic now 1012 // Because it is unsafe to call arbitrary user code after freezing 1013 // the world, we call preprintpanics to invoke all necessary Error 1014 // and String methods to prepare the panic strings before startpanic. 1015 preprintpanics(gp._panic) 1016 1017 fatalpanic(gp._panic) // should not return 1018 *(*int)(nil) = 0 // not reached 1019 } 1020 1021 // getargp returns the location where the caller 1022 // writes outgoing function call arguments. 1023 // 1024 //go:nosplit 1025 //go:noinline 1026 func getargp() uintptr { 1027 return getcallersp() + sys.MinFrameSize 1028 } 1029 1030 // The implementation of the predeclared function recover. 1031 // Cannot split the stack because it needs to reliably 1032 // find the stack segment of its caller. 1033 // 1034 // TODO(rsc): Once we commit to CopyStackAlways, 1035 // this doesn't need to be nosplit. 1036 // 1037 //go:nosplit 1038 func gorecover(argp uintptr) any { 1039 // Must be in a function running as part of a deferred call during the panic. 1040 // Must be called from the topmost function of the call 1041 // (the function used in the defer statement). 1042 // p.argp is the argument pointer of that topmost deferred function call. 1043 // Compare against argp reported by caller. 1044 // If they match, the caller is the one who can recover. 1045 gp := getg() 1046 p := gp._panic 1047 if p != nil && !p.goexit && !p.recovered && argp == uintptr(p.argp) { 1048 p.recovered = true 1049 return p.arg 1050 } 1051 return nil 1052 } 1053 1054 //go:linkname sync_throw sync.throw 1055 func sync_throw(s string) { 1056 throw(s) 1057 } 1058 1059 //go:linkname sync_fatal sync.fatal 1060 func sync_fatal(s string) { 1061 fatal(s) 1062 } 1063 1064 // throw triggers a fatal error that dumps a stack trace and exits. 1065 // 1066 // throw should be used for runtime-internal fatal errors where Go itself, 1067 // rather than user code, may be at fault for the failure. 1068 // 1069 //go:nosplit 1070 func throw(s string) { 1071 // Everything throw does should be recursively nosplit so it 1072 // can be called even when it's unsafe to grow the stack. 1073 systemstack(func() { 1074 print("fatal error: ", s, "\n") 1075 }) 1076 1077 fatalthrow(throwTypeRuntime) 1078 } 1079 1080 // fatal triggers a fatal error that dumps a stack trace and exits. 1081 // 1082 // fatal is equivalent to throw, but is used when user code is expected to be 1083 // at fault for the failure, such as racing map writes. 1084 // 1085 // fatal does not include runtime frames, system goroutines, or frame metadata 1086 // (fp, sp, pc) in the stack trace unless GOTRACEBACK=system or higher. 1087 // 1088 //go:nosplit 1089 func fatal(s string) { 1090 // Everything fatal does should be recursively nosplit so it 1091 // can be called even when it's unsafe to grow the stack. 1092 systemstack(func() { 1093 print("fatal error: ", s, "\n") 1094 }) 1095 1096 fatalthrow(throwTypeUser) 1097 } 1098 1099 // runningPanicDefers is non-zero while running deferred functions for panic. 1100 // This is used to try hard to get a panic stack trace out when exiting. 1101 var runningPanicDefers atomic.Uint32 1102 1103 // panicking is non-zero when crashing the program for an unrecovered panic. 1104 var panicking atomic.Uint32 1105 1106 // paniclk is held while printing the panic information and stack trace, 1107 // so that two concurrent panics don't overlap their output. 1108 var paniclk mutex 1109 1110 // Unwind the stack after a deferred function calls recover 1111 // after a panic. Then arrange to continue running as though 1112 // the caller of the deferred function returned normally. 1113 func recovery(gp *g) { 1114 // Info about defer passed in G struct. 1115 sp := gp.sigcode0 1116 pc := gp.sigcode1 1117 1118 // d's arguments need to be in the stack. 1119 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 1120 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 1121 throw("bad recovery") 1122 } 1123 1124 // Make the deferproc for this d return again, 1125 // this time returning 1. The calling function will 1126 // jump to the standard return epilogue. 1127 gp.sched.sp = sp 1128 gp.sched.pc = pc 1129 gp.sched.lr = 0 1130 gp.sched.ret = 1 1131 gogo(&gp.sched) 1132 } 1133 1134 // fatalthrow implements an unrecoverable runtime throw. It freezes the 1135 // system, prints stack traces starting from its caller, and terminates the 1136 // process. 1137 // 1138 //go:nosplit 1139 func fatalthrow(t throwType) { 1140 pc := getcallerpc() 1141 sp := getcallersp() 1142 gp := getg() 1143 1144 if gp.m.throwing == throwTypeNone { 1145 gp.m.throwing = t 1146 } 1147 1148 // Switch to the system stack to avoid any stack growth, which may make 1149 // things worse if the runtime is in a bad state. 1150 systemstack(func() { 1151 startpanic_m() 1152 1153 if dopanic_m(gp, pc, sp) { 1154 // crash uses a decent amount of nosplit stack and we're already 1155 // low on stack in throw, so crash on the system stack (unlike 1156 // fatalpanic). 1157 crash() 1158 } 1159 1160 exit(2) 1161 }) 1162 1163 *(*int)(nil) = 0 // not reached 1164 } 1165 1166 // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except 1167 // that if msgs != nil, fatalpanic also prints panic messages and decrements 1168 // runningPanicDefers once main is blocked from exiting. 1169 // 1170 //go:nosplit 1171 func fatalpanic(msgs *_panic) { 1172 pc := getcallerpc() 1173 sp := getcallersp() 1174 gp := getg() 1175 var docrash bool 1176 // Switch to the system stack to avoid any stack growth, which 1177 // may make things worse if the runtime is in a bad state. 1178 systemstack(func() { 1179 if startpanic_m() && msgs != nil { 1180 // There were panic messages and startpanic_m 1181 // says it's okay to try to print them. 1182 1183 // startpanic_m set panicking, which will 1184 // block main from exiting, so now OK to 1185 // decrement runningPanicDefers. 1186 runningPanicDefers.Add(-1) 1187 1188 printpanics(msgs) 1189 } 1190 1191 docrash = dopanic_m(gp, pc, sp) 1192 }) 1193 1194 if docrash { 1195 // By crashing outside the above systemstack call, debuggers 1196 // will not be confused when generating a backtrace. 1197 // Function crash is marked nosplit to avoid stack growth. 1198 crash() 1199 } 1200 1201 systemstack(func() { 1202 exit(2) 1203 }) 1204 1205 *(*int)(nil) = 0 // not reached 1206 } 1207 1208 // startpanic_m prepares for an unrecoverable panic. 1209 // 1210 // It returns true if panic messages should be printed, or false if 1211 // the runtime is in bad shape and should just print stacks. 1212 // 1213 // It must not have write barriers even though the write barrier 1214 // explicitly ignores writes once dying > 0. Write barriers still 1215 // assume that g.m.p != nil, and this function may not have P 1216 // in some contexts (e.g. a panic in a signal handler for a signal 1217 // sent to an M with no P). 1218 // 1219 //go:nowritebarrierrec 1220 func startpanic_m() bool { 1221 gp := getg() 1222 if mheap_.cachealloc.size == 0 { // very early 1223 print("runtime: panic before malloc heap initialized\n") 1224 } 1225 // Disallow malloc during an unrecoverable panic. A panic 1226 // could happen in a signal handler, or in a throw, or inside 1227 // malloc itself. We want to catch if an allocation ever does 1228 // happen (even if we're not in one of these situations). 1229 gp.m.mallocing++ 1230 1231 // If we're dying because of a bad lock count, set it to a 1232 // good lock count so we don't recursively panic below. 1233 if gp.m.locks < 0 { 1234 gp.m.locks = 1 1235 } 1236 1237 switch gp.m.dying { 1238 case 0: 1239 // Setting dying >0 has the side-effect of disabling this G's writebuf. 1240 gp.m.dying = 1 1241 panicking.Add(1) 1242 lock(&paniclk) 1243 if debug.schedtrace > 0 || debug.scheddetail > 0 { 1244 schedtrace(true) 1245 } 1246 if debug.dontfreezetheworld > 0 { 1247 return true 1248 } 1249 freezetheworld() 1250 return true 1251 case 1: 1252 // Something failed while panicking. 1253 // Just print a stack trace and exit. 1254 gp.m.dying = 2 1255 print("panic during panic\n") 1256 return false 1257 case 2: 1258 // This is a genuine bug in the runtime, we couldn't even 1259 // print the stack trace successfully. 1260 gp.m.dying = 3 1261 print("stack trace unavailable\n") 1262 exit(4) 1263 fallthrough 1264 default: 1265 // Can't even print! Just exit. 1266 exit(5) 1267 return false // Need to return something. 1268 } 1269 } 1270 1271 var didothers bool 1272 var deadlock mutex 1273 1274 // gp is the crashing g running on this M, but may be a user G, while getg() is 1275 // always g0. 1276 func dopanic_m(gp *g, pc, sp uintptr) bool { 1277 if gp.sig != 0 { 1278 signame := signame(gp.sig) 1279 if signame != "" { 1280 print("[signal ", signame) 1281 } else { 1282 print("[signal ", hex(gp.sig)) 1283 } 1284 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 1285 } 1286 1287 level, all, docrash := gotraceback() 1288 if level > 0 { 1289 if gp != gp.m.curg { 1290 all = true 1291 } 1292 if gp != gp.m.g0 { 1293 print("\n") 1294 goroutineheader(gp) 1295 traceback(pc, sp, 0, gp) 1296 } else if level >= 2 || gp.m.throwing >= throwTypeRuntime { 1297 print("\nruntime stack:\n") 1298 traceback(pc, sp, 0, gp) 1299 } 1300 if !didothers && all { 1301 didothers = true 1302 tracebackothers(gp) 1303 } 1304 } 1305 unlock(&paniclk) 1306 1307 if panicking.Add(-1) != 0 { 1308 // Some other m is panicking too. 1309 // Let it print what it needs to print. 1310 // Wait forever without chewing up cpu. 1311 // It will exit when it's done. 1312 lock(&deadlock) 1313 lock(&deadlock) 1314 } 1315 1316 printDebugLog() 1317 1318 return docrash 1319 } 1320 1321 // canpanic returns false if a signal should throw instead of 1322 // panicking. 1323 // 1324 //go:nosplit 1325 func canpanic() bool { 1326 gp := getg() 1327 mp := acquirem() 1328 1329 // Is it okay for gp to panic instead of crashing the program? 1330 // Yes, as long as it is running Go code, not runtime code, 1331 // and not stuck in a system call. 1332 if gp != mp.curg { 1333 releasem(mp) 1334 return false 1335 } 1336 // N.B. mp.locks != 1 instead of 0 to account for acquirem. 1337 if mp.locks != 1 || mp.mallocing != 0 || mp.throwing != throwTypeNone || mp.preemptoff != "" || mp.dying != 0 { 1338 releasem(mp) 1339 return false 1340 } 1341 status := readgstatus(gp) 1342 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 1343 releasem(mp) 1344 return false 1345 } 1346 if GOOS == "windows" && mp.libcallsp != 0 { 1347 releasem(mp) 1348 return false 1349 } 1350 releasem(mp) 1351 return true 1352 } 1353 1354 // shouldPushSigpanic reports whether pc should be used as sigpanic's 1355 // return PC (pushing a frame for the call). Otherwise, it should be 1356 // left alone so that LR is used as sigpanic's return PC, effectively 1357 // replacing the top-most frame with sigpanic. This is used by 1358 // preparePanic. 1359 func shouldPushSigpanic(gp *g, pc, lr uintptr) bool { 1360 if pc == 0 { 1361 // Probably a call to a nil func. The old LR is more 1362 // useful in the stack trace. Not pushing the frame 1363 // will make the trace look like a call to sigpanic 1364 // instead. (Otherwise the trace will end at sigpanic 1365 // and we won't get to see who faulted.) 1366 return false 1367 } 1368 // If we don't recognize the PC as code, but we do recognize 1369 // the link register as code, then this assumes the panic was 1370 // caused by a call to non-code. In this case, we want to 1371 // ignore this call to make unwinding show the context. 1372 // 1373 // If we running C code, we're not going to recognize pc as a 1374 // Go function, so just assume it's good. Otherwise, traceback 1375 // may try to read a stale LR that looks like a Go code 1376 // pointer and wander into the woods. 1377 if gp.m.incgo || findfunc(pc).valid() { 1378 // This wasn't a bad call, so use PC as sigpanic's 1379 // return PC. 1380 return true 1381 } 1382 if findfunc(lr).valid() { 1383 // This was a bad call, but the LR is good, so use the 1384 // LR as sigpanic's return PC. 1385 return false 1386 } 1387 // Neither the PC or LR is good. Hopefully pushing a frame 1388 // will work. 1389 return true 1390 } 1391 1392 // isAbortPC reports whether pc is the program counter at which 1393 // runtime.abort raises a signal. 1394 // 1395 // It is nosplit because it's part of the isgoexception 1396 // implementation. 1397 // 1398 //go:nosplit 1399 func isAbortPC(pc uintptr) bool { 1400 f := findfunc(pc) 1401 if !f.valid() { 1402 return false 1403 } 1404 return f.funcID == abi.FuncID_abort 1405 }