github.com/goproxy0/go@v0.0.0-20171111080102-49cc0c489d2c/src/runtime/panic.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "runtime/internal/sys" 10 "unsafe" 11 ) 12 13 // Calling panic with one of the errors below will call errorString.Error 14 // which will call mallocgc to concatenate strings. That will fail if 15 // malloc is locked, causing a confusing error message. Throw a better 16 // error message instead. 17 func panicCheckMalloc(err error) { 18 gp := getg() 19 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 20 throw(string(err.(errorString))) 21 } 22 } 23 24 var indexError = error(errorString("index out of range")) 25 26 func panicindex() { 27 panicCheckMalloc(indexError) 28 panic(indexError) 29 } 30 31 var sliceError = error(errorString("slice bounds out of range")) 32 33 func panicslice() { 34 panicCheckMalloc(sliceError) 35 panic(sliceError) 36 } 37 38 var divideError = error(errorString("integer divide by zero")) 39 40 func panicdivide() { 41 panicCheckMalloc(divideError) 42 panic(divideError) 43 } 44 45 var overflowError = error(errorString("integer overflow")) 46 47 func panicoverflow() { 48 panicCheckMalloc(overflowError) 49 panic(overflowError) 50 } 51 52 var floatError = error(errorString("floating point error")) 53 54 func panicfloat() { 55 panicCheckMalloc(floatError) 56 panic(floatError) 57 } 58 59 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 60 61 func panicmem() { 62 panicCheckMalloc(memoryError) 63 panic(memoryError) 64 } 65 66 func throwinit() { 67 throw("recursive call during initialization - linker skew") 68 } 69 70 // Create a new deferred function fn with siz bytes of arguments. 71 // The compiler turns a defer statement into a call to this. 72 //go:nosplit 73 func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn 74 if getg().m.curg != getg() { 75 // go code on the system stack can't defer 76 throw("defer on system stack") 77 } 78 79 // the arguments of fn are in a perilous state. The stack map 80 // for deferproc does not describe them. So we can't let garbage 81 // collection or stack copying trigger until we've copied them out 82 // to somewhere safe. The memmove below does that. 83 // Until the copy completes, we can only call nosplit routines. 84 sp := getcallersp(unsafe.Pointer(&siz)) 85 argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn) 86 callerpc := getcallerpc() 87 88 d := newdefer(siz) 89 if d._panic != nil { 90 throw("deferproc: d.panic != nil after newdefer") 91 } 92 d.fn = fn 93 d.pc = callerpc 94 d.sp = sp 95 switch siz { 96 case 0: 97 // Do nothing. 98 case sys.PtrSize: 99 *(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp)) 100 default: 101 memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz)) 102 } 103 104 // deferproc returns 0 normally. 105 // a deferred func that stops a panic 106 // makes the deferproc return 1. 107 // the code the compiler generates always 108 // checks the return value and jumps to the 109 // end of the function if deferproc returns != 0. 110 return0() 111 // No code can go here - the C return register has 112 // been set and must not be clobbered. 113 } 114 115 // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... 116 // Each P holds a pool for defers with small arg sizes. 117 // Assign defer allocations to pools by rounding to 16, to match malloc size classes. 118 119 const ( 120 deferHeaderSize = unsafe.Sizeof(_defer{}) 121 minDeferAlloc = (deferHeaderSize + 15) &^ 15 122 minDeferArgs = minDeferAlloc - deferHeaderSize 123 ) 124 125 // defer size class for arg size sz 126 //go:nosplit 127 func deferclass(siz uintptr) uintptr { 128 if siz <= minDeferArgs { 129 return 0 130 } 131 return (siz - minDeferArgs + 15) / 16 132 } 133 134 // total size of memory block for defer with arg size sz 135 func totaldefersize(siz uintptr) uintptr { 136 if siz <= minDeferArgs { 137 return minDeferAlloc 138 } 139 return deferHeaderSize + siz 140 } 141 142 // Ensure that defer arg sizes that map to the same defer size class 143 // also map to the same malloc size class. 144 func testdefersizes() { 145 var m [len(p{}.deferpool)]int32 146 147 for i := range m { 148 m[i] = -1 149 } 150 for i := uintptr(0); ; i++ { 151 defersc := deferclass(i) 152 if defersc >= uintptr(len(m)) { 153 break 154 } 155 siz := roundupsize(totaldefersize(i)) 156 if m[defersc] < 0 { 157 m[defersc] = int32(siz) 158 continue 159 } 160 if m[defersc] != int32(siz) { 161 print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") 162 throw("bad defer size class") 163 } 164 } 165 } 166 167 // The arguments associated with a deferred call are stored 168 // immediately after the _defer header in memory. 169 //go:nosplit 170 func deferArgs(d *_defer) unsafe.Pointer { 171 if d.siz == 0 { 172 // Avoid pointer past the defer allocation. 173 return nil 174 } 175 return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) 176 } 177 178 var deferType *_type // type of _defer struct 179 180 func init() { 181 var x interface{} 182 x = (*_defer)(nil) 183 deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem 184 } 185 186 // Allocate a Defer, usually using per-P pool. 187 // Each defer must be released with freedefer. 188 // 189 // This must not grow the stack because there may be a frame without 190 // stack map information when this is called. 191 // 192 //go:nosplit 193 func newdefer(siz int32) *_defer { 194 var d *_defer 195 sc := deferclass(uintptr(siz)) 196 gp := getg() 197 if sc < uintptr(len(p{}.deferpool)) { 198 pp := gp.m.p.ptr() 199 if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil { 200 // Take the slow path on the system stack so 201 // we don't grow newdefer's stack. 202 systemstack(func() { 203 lock(&sched.deferlock) 204 for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil { 205 d := sched.deferpool[sc] 206 sched.deferpool[sc] = d.link 207 d.link = nil 208 pp.deferpool[sc] = append(pp.deferpool[sc], d) 209 } 210 unlock(&sched.deferlock) 211 }) 212 } 213 if n := len(pp.deferpool[sc]); n > 0 { 214 d = pp.deferpool[sc][n-1] 215 pp.deferpool[sc][n-1] = nil 216 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 217 } 218 } 219 if d == nil { 220 // Allocate new defer+args. 221 systemstack(func() { 222 total := roundupsize(totaldefersize(uintptr(siz))) 223 d = (*_defer)(mallocgc(total, deferType, true)) 224 }) 225 } 226 d.siz = siz 227 d.link = gp._defer 228 gp._defer = d 229 return d 230 } 231 232 // Free the given defer. 233 // The defer cannot be used after this call. 234 // 235 // This must not grow the stack because there may be a frame without a 236 // stack map when this is called. 237 // 238 //go:nosplit 239 func freedefer(d *_defer) { 240 if d._panic != nil { 241 freedeferpanic() 242 } 243 if d.fn != nil { 244 freedeferfn() 245 } 246 sc := deferclass(uintptr(d.siz)) 247 if sc >= uintptr(len(p{}.deferpool)) { 248 return 249 } 250 pp := getg().m.p.ptr() 251 if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) { 252 // Transfer half of local cache to the central cache. 253 // 254 // Take this slow path on the system stack so 255 // we don't grow freedefer's stack. 256 systemstack(func() { 257 var first, last *_defer 258 for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 { 259 n := len(pp.deferpool[sc]) 260 d := pp.deferpool[sc][n-1] 261 pp.deferpool[sc][n-1] = nil 262 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 263 if first == nil { 264 first = d 265 } else { 266 last.link = d 267 } 268 last = d 269 } 270 lock(&sched.deferlock) 271 last.link = sched.deferpool[sc] 272 sched.deferpool[sc] = first 273 unlock(&sched.deferlock) 274 }) 275 } 276 277 // These lines used to be simply `*d = _defer{}` but that 278 // started causing a nosplit stack overflow via typedmemmove. 279 d.siz = 0 280 d.started = false 281 d.sp = 0 282 d.pc = 0 283 d.fn = nil 284 d._panic = nil 285 d.link = nil 286 287 pp.deferpool[sc] = append(pp.deferpool[sc], d) 288 } 289 290 // Separate function so that it can split stack. 291 // Windows otherwise runs out of stack space. 292 func freedeferpanic() { 293 // _panic must be cleared before d is unlinked from gp. 294 throw("freedefer with d._panic != nil") 295 } 296 297 func freedeferfn() { 298 // fn must be cleared before d is unlinked from gp. 299 throw("freedefer with d.fn != nil") 300 } 301 302 // Run a deferred function if there is one. 303 // The compiler inserts a call to this at the end of any 304 // function which calls defer. 305 // If there is a deferred function, this will call runtime·jmpdefer, 306 // which will jump to the deferred function such that it appears 307 // to have been called by the caller of deferreturn at the point 308 // just before deferreturn was called. The effect is that deferreturn 309 // is called again and again until there are no more deferred functions. 310 // Cannot split the stack because we reuse the caller's frame to 311 // call the deferred function. 312 313 // The single argument isn't actually used - it just has its address 314 // taken so it can be matched against pending defers. 315 //go:nosplit 316 func deferreturn(arg0 uintptr) { 317 gp := getg() 318 d := gp._defer 319 if d == nil { 320 return 321 } 322 sp := getcallersp(unsafe.Pointer(&arg0)) 323 if d.sp != sp { 324 return 325 } 326 327 // Moving arguments around. 328 // 329 // Everything called after this point must be recursively 330 // nosplit because the garbage collector won't know the form 331 // of the arguments until the jmpdefer can flip the PC over to 332 // fn. 333 switch d.siz { 334 case 0: 335 // Do nothing. 336 case sys.PtrSize: 337 *(*uintptr)(unsafe.Pointer(&arg0)) = *(*uintptr)(deferArgs(d)) 338 default: 339 memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz)) 340 } 341 fn := d.fn 342 d.fn = nil 343 gp._defer = d.link 344 freedefer(d) 345 jmpdefer(fn, uintptr(unsafe.Pointer(&arg0))) 346 } 347 348 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 349 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 350 // is not panic, however, any recover calls in those deferred functions will return nil. 351 // 352 // Calling Goexit from the main goroutine terminates that goroutine 353 // without func main returning. Since func main has not returned, 354 // the program continues execution of other goroutines. 355 // If all other goroutines exit, the program crashes. 356 func Goexit() { 357 // Run all deferred functions for the current goroutine. 358 // This code is similar to gopanic, see that implementation 359 // for detailed comments. 360 gp := getg() 361 for { 362 d := gp._defer 363 if d == nil { 364 break 365 } 366 if d.started { 367 if d._panic != nil { 368 d._panic.aborted = true 369 d._panic = nil 370 } 371 d.fn = nil 372 gp._defer = d.link 373 freedefer(d) 374 continue 375 } 376 d.started = true 377 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 378 if gp._defer != d { 379 throw("bad defer entry in Goexit") 380 } 381 d._panic = nil 382 d.fn = nil 383 gp._defer = d.link 384 freedefer(d) 385 // Note: we ignore recovers here because Goexit isn't a panic 386 } 387 goexit1() 388 } 389 390 // Call all Error and String methods before freezing the world. 391 // Used when crashing with panicking. 392 // This must match types handled by printany. 393 func preprintpanics(p *_panic) { 394 defer func() { 395 if recover() != nil { 396 throw("panic while printing panic value") 397 } 398 }() 399 for p != nil { 400 switch v := p.arg.(type) { 401 case error: 402 p.arg = v.Error() 403 case stringer: 404 p.arg = v.String() 405 } 406 p = p.link 407 } 408 } 409 410 // Print all currently active panics. Used when crashing. 411 func printpanics(p *_panic) { 412 if p.link != nil { 413 printpanics(p.link) 414 print("\t") 415 } 416 print("panic: ") 417 printany(p.arg) 418 if p.recovered { 419 print(" [recovered]") 420 } 421 print("\n") 422 } 423 424 // The implementation of the predeclared function panic. 425 func gopanic(e interface{}) { 426 gp := getg() 427 if gp.m.curg != gp { 428 print("panic: ") 429 printany(e) 430 print("\n") 431 throw("panic on system stack") 432 } 433 434 // m.softfloat is set during software floating point. 435 // It increments m.locks to avoid preemption. 436 // We moved the memory loads out, so there shouldn't be 437 // any reason for it to panic anymore. 438 if gp.m.softfloat != 0 { 439 gp.m.locks-- 440 gp.m.softfloat = 0 441 throw("panic during softfloat") 442 } 443 if gp.m.mallocing != 0 { 444 print("panic: ") 445 printany(e) 446 print("\n") 447 throw("panic during malloc") 448 } 449 if gp.m.preemptoff != "" { 450 print("panic: ") 451 printany(e) 452 print("\n") 453 print("preempt off reason: ") 454 print(gp.m.preemptoff) 455 print("\n") 456 throw("panic during preemptoff") 457 } 458 if gp.m.locks != 0 { 459 print("panic: ") 460 printany(e) 461 print("\n") 462 throw("panic holding locks") 463 } 464 465 var p _panic 466 p.arg = e 467 p.link = gp._panic 468 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 469 470 atomic.Xadd(&runningPanicDefers, 1) 471 472 for { 473 d := gp._defer 474 if d == nil { 475 break 476 } 477 478 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), 479 // take defer off list. The earlier panic or Goexit will not continue running. 480 if d.started { 481 if d._panic != nil { 482 d._panic.aborted = true 483 } 484 d._panic = nil 485 d.fn = nil 486 gp._defer = d.link 487 freedefer(d) 488 continue 489 } 490 491 // Mark defer as started, but keep on list, so that traceback 492 // can find and update the defer's argument frame if stack growth 493 // or a garbage collection happens before reflectcall starts executing d.fn. 494 d.started = true 495 496 // Record the panic that is running the defer. 497 // If there is a new panic during the deferred call, that panic 498 // will find d in the list and will mark d._panic (this panic) aborted. 499 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 500 501 p.argp = unsafe.Pointer(getargp(0)) 502 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 503 p.argp = nil 504 505 // reflectcall did not panic. Remove d. 506 if gp._defer != d { 507 throw("bad defer entry in panic") 508 } 509 d._panic = nil 510 d.fn = nil 511 gp._defer = d.link 512 513 // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic 514 //GC() 515 516 pc := d.pc 517 sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy 518 freedefer(d) 519 if p.recovered { 520 atomic.Xadd(&runningPanicDefers, -1) 521 522 gp._panic = p.link 523 // Aborted panics are marked but remain on the g.panic list. 524 // Remove them from the list. 525 for gp._panic != nil && gp._panic.aborted { 526 gp._panic = gp._panic.link 527 } 528 if gp._panic == nil { // must be done with signal 529 gp.sig = 0 530 } 531 // Pass information about recovering frame to recovery. 532 gp.sigcode0 = uintptr(sp) 533 gp.sigcode1 = pc 534 mcall(recovery) 535 throw("recovery failed") // mcall should not return 536 } 537 } 538 539 // ran out of deferred calls - old-school panic now 540 // Because it is unsafe to call arbitrary user code after freezing 541 // the world, we call preprintpanics to invoke all necessary Error 542 // and String methods to prepare the panic strings before startpanic. 543 preprintpanics(gp._panic) 544 startpanic() 545 546 // startpanic set panicking, which will block main from exiting, 547 // so now OK to decrement runningPanicDefers. 548 atomic.Xadd(&runningPanicDefers, -1) 549 550 printpanics(gp._panic) 551 dopanic(0) // should not return 552 *(*int)(nil) = 0 // not reached 553 } 554 555 // getargp returns the location where the caller 556 // writes outgoing function call arguments. 557 //go:nosplit 558 //go:noinline 559 func getargp(x int) uintptr { 560 // x is an argument mainly so that we can return its address. 561 return uintptr(noescape(unsafe.Pointer(&x))) 562 } 563 564 // The implementation of the predeclared function recover. 565 // Cannot split the stack because it needs to reliably 566 // find the stack segment of its caller. 567 // 568 // TODO(rsc): Once we commit to CopyStackAlways, 569 // this doesn't need to be nosplit. 570 //go:nosplit 571 func gorecover(argp uintptr) interface{} { 572 // Must be in a function running as part of a deferred call during the panic. 573 // Must be called from the topmost function of the call 574 // (the function used in the defer statement). 575 // p.argp is the argument pointer of that topmost deferred function call. 576 // Compare against argp reported by caller. 577 // If they match, the caller is the one who can recover. 578 gp := getg() 579 p := gp._panic 580 if p != nil && !p.recovered && argp == uintptr(p.argp) { 581 p.recovered = true 582 return p.arg 583 } 584 return nil 585 } 586 587 //go:nosplit 588 func startpanic() { 589 systemstack(startpanic_m) 590 } 591 592 //go:nosplit 593 func dopanic(unused int) { 594 pc := getcallerpc() 595 sp := getcallersp(unsafe.Pointer(&unused)) 596 gp := getg() 597 systemstack(func() { 598 dopanic_m(gp, pc, sp) // should never return 599 }) 600 *(*int)(nil) = 0 601 } 602 603 //go:linkname sync_throw sync.throw 604 func sync_throw(s string) { 605 throw(s) 606 } 607 608 //go:nosplit 609 func throw(s string) { 610 print("fatal error: ", s, "\n") 611 gp := getg() 612 if gp.m.throwing == 0 { 613 gp.m.throwing = 1 614 } 615 startpanic() 616 dopanic(0) 617 *(*int)(nil) = 0 // not reached 618 } 619 620 // runningPanicDefers is non-zero while running deferred functions for panic. 621 // runningPanicDefers is incremented and decremented atomically. 622 // This is used to try hard to get a panic stack trace out when exiting. 623 var runningPanicDefers uint32 624 625 // panicking is non-zero when crashing the program for an unrecovered panic. 626 // panicking is incremented and decremented atomically. 627 var panicking uint32 628 629 // paniclk is held while printing the panic information and stack trace, 630 // so that two concurrent panics don't overlap their output. 631 var paniclk mutex 632 633 // Unwind the stack after a deferred function calls recover 634 // after a panic. Then arrange to continue running as though 635 // the caller of the deferred function returned normally. 636 func recovery(gp *g) { 637 // Info about defer passed in G struct. 638 sp := gp.sigcode0 639 pc := gp.sigcode1 640 641 // d's arguments need to be in the stack. 642 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 643 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 644 throw("bad recovery") 645 } 646 647 // Make the deferproc for this d return again, 648 // this time returning 1. The calling function will 649 // jump to the standard return epilogue. 650 gp.sched.sp = sp 651 gp.sched.pc = pc 652 gp.sched.lr = 0 653 gp.sched.ret = 1 654 gogo(&gp.sched) 655 } 656 657 // startpanic_m implements unrecoverable panic. 658 // 659 // It can have write barriers because the write barrier explicitly 660 // ignores writes once dying > 0. 661 // 662 //go:yeswritebarrierrec 663 func startpanic_m() { 664 _g_ := getg() 665 if mheap_.cachealloc.size == 0 { // very early 666 print("runtime: panic before malloc heap initialized\n") 667 _g_.m.mallocing = 1 // tell rest of panic not to try to malloc 668 } else if _g_.m.mcache == nil { // can happen if called from signal handler or throw 669 _g_.m.mcache = allocmcache() 670 } 671 672 switch _g_.m.dying { 673 case 0: 674 _g_.m.dying = 1 675 _g_.writebuf = nil 676 atomic.Xadd(&panicking, 1) 677 lock(&paniclk) 678 if debug.schedtrace > 0 || debug.scheddetail > 0 { 679 schedtrace(true) 680 } 681 freezetheworld() 682 return 683 case 1: 684 // Something failed while panicking, probably the print of the 685 // argument to panic(). Just print a stack trace and exit. 686 _g_.m.dying = 2 687 print("panic during panic\n") 688 dopanic(0) 689 exit(3) 690 fallthrough 691 case 2: 692 // This is a genuine bug in the runtime, we couldn't even 693 // print the stack trace successfully. 694 _g_.m.dying = 3 695 print("stack trace unavailable\n") 696 exit(4) 697 fallthrough 698 default: 699 // Can't even print! Just exit. 700 exit(5) 701 } 702 } 703 704 var didothers bool 705 var deadlock mutex 706 707 func dopanic_m(gp *g, pc, sp uintptr) { 708 if gp.sig != 0 { 709 signame := signame(gp.sig) 710 if signame != "" { 711 print("[signal ", signame) 712 } else { 713 print("[signal ", hex(gp.sig)) 714 } 715 print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 716 } 717 718 level, all, docrash := gotraceback() 719 _g_ := getg() 720 if level > 0 { 721 if gp != gp.m.curg { 722 all = true 723 } 724 if gp != gp.m.g0 { 725 print("\n") 726 goroutineheader(gp) 727 traceback(pc, sp, 0, gp) 728 } else if level >= 2 || _g_.m.throwing > 0 { 729 print("\nruntime stack:\n") 730 traceback(pc, sp, 0, gp) 731 } 732 if !didothers && all { 733 didothers = true 734 tracebackothers(gp) 735 } 736 } 737 unlock(&paniclk) 738 739 if atomic.Xadd(&panicking, -1) != 0 { 740 // Some other m is panicking too. 741 // Let it print what it needs to print. 742 // Wait forever without chewing up cpu. 743 // It will exit when it's done. 744 lock(&deadlock) 745 lock(&deadlock) 746 } 747 748 if docrash { 749 crash() 750 } 751 752 exit(2) 753 } 754 755 //go:nosplit 756 func canpanic(gp *g) bool { 757 // Note that g is m->gsignal, different from gp. 758 // Note also that g->m can change at preemption, so m can go stale 759 // if this function ever makes a function call. 760 _g_ := getg() 761 _m_ := _g_.m 762 763 // Is it okay for gp to panic instead of crashing the program? 764 // Yes, as long as it is running Go code, not runtime code, 765 // and not stuck in a system call. 766 if gp == nil || gp != _m_.curg { 767 return false 768 } 769 if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 { 770 return false 771 } 772 status := readgstatus(gp) 773 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 774 return false 775 } 776 if GOOS == "windows" && _m_.libcallsp != 0 { 777 return false 778 } 779 return true 780 }