github.com/aloncn/graphics-go@v0.0.1/src/runtime/panic.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import ( 8 "runtime/internal/atomic" 9 "unsafe" 10 ) 11 12 // Calling panic with one of the errors below will call errorString.Error 13 // which will call mallocgc to concatenate strings. That will fail if 14 // malloc is locked, causing a confusing error message. Throw a better 15 // error message instead. 16 func panicCheckMalloc(err error) { 17 gp := getg() 18 if gp != nil && gp.m != nil && gp.m.mallocing != 0 { 19 throw(string(err.(errorString))) 20 } 21 } 22 23 var indexError = error(errorString("index out of range")) 24 25 func panicindex() { 26 panicCheckMalloc(indexError) 27 panic(indexError) 28 } 29 30 var sliceError = error(errorString("slice bounds out of range")) 31 32 func panicslice() { 33 panicCheckMalloc(sliceError) 34 panic(sliceError) 35 } 36 37 var divideError = error(errorString("integer divide by zero")) 38 39 func panicdivide() { 40 panicCheckMalloc(divideError) 41 panic(divideError) 42 } 43 44 var overflowError = error(errorString("integer overflow")) 45 46 func panicoverflow() { 47 panicCheckMalloc(overflowError) 48 panic(overflowError) 49 } 50 51 var floatError = error(errorString("floating point error")) 52 53 func panicfloat() { 54 panicCheckMalloc(floatError) 55 panic(floatError) 56 } 57 58 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 59 60 func panicmem() { 61 panicCheckMalloc(memoryError) 62 panic(memoryError) 63 } 64 65 func throwreturn() { 66 throw("no return at end of a typed function - compiler is broken") 67 } 68 69 func throwinit() { 70 throw("recursive call during initialization - linker skew") 71 } 72 73 // Create a new deferred function fn with siz bytes of arguments. 74 // The compiler turns a defer statement into a call to this. 75 //go:nosplit 76 func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn 77 if getg().m.curg != getg() { 78 // go code on the system stack can't defer 79 throw("defer on system stack") 80 } 81 82 // the arguments of fn are in a perilous state. The stack map 83 // for deferproc does not describe them. So we can't let garbage 84 // collection or stack copying trigger until we've copied them out 85 // to somewhere safe. The memmove below does that. 86 // Until the copy completes, we can only call nosplit routines. 87 sp := getcallersp(unsafe.Pointer(&siz)) 88 argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn) 89 callerpc := getcallerpc(unsafe.Pointer(&siz)) 90 91 systemstack(func() { 92 d := newdefer(siz) 93 if d._panic != nil { 94 throw("deferproc: d.panic != nil after newdefer") 95 } 96 d.fn = fn 97 d.pc = callerpc 98 d.sp = sp 99 memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz)) 100 }) 101 102 // deferproc returns 0 normally. 103 // a deferred func that stops a panic 104 // makes the deferproc return 1. 105 // the code the compiler generates always 106 // checks the return value and jumps to the 107 // end of the function if deferproc returns != 0. 108 return0() 109 // No code can go here - the C return register has 110 // been set and must not be clobbered. 111 } 112 113 // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... 114 // Each P holds a pool for defers with small arg sizes. 115 // Assign defer allocations to pools by rounding to 16, to match malloc size classes. 116 117 const ( 118 deferHeaderSize = unsafe.Sizeof(_defer{}) 119 minDeferAlloc = (deferHeaderSize + 15) &^ 15 120 minDeferArgs = minDeferAlloc - deferHeaderSize 121 ) 122 123 // defer size class for arg size sz 124 //go:nosplit 125 func deferclass(siz uintptr) uintptr { 126 if siz <= minDeferArgs { 127 return 0 128 } 129 return (siz - minDeferArgs + 15) / 16 130 } 131 132 // total size of memory block for defer with arg size sz 133 func totaldefersize(siz uintptr) uintptr { 134 if siz <= minDeferArgs { 135 return minDeferAlloc 136 } 137 return deferHeaderSize + siz 138 } 139 140 // Ensure that defer arg sizes that map to the same defer size class 141 // also map to the same malloc size class. 142 func testdefersizes() { 143 var m [len(p{}.deferpool)]int32 144 145 for i := range m { 146 m[i] = -1 147 } 148 for i := uintptr(0); ; i++ { 149 defersc := deferclass(i) 150 if defersc >= uintptr(len(m)) { 151 break 152 } 153 siz := roundupsize(totaldefersize(i)) 154 if m[defersc] < 0 { 155 m[defersc] = int32(siz) 156 continue 157 } 158 if m[defersc] != int32(siz) { 159 print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") 160 throw("bad defer size class") 161 } 162 } 163 } 164 165 // The arguments associated with a deferred call are stored 166 // immediately after the _defer header in memory. 167 //go:nosplit 168 func deferArgs(d *_defer) unsafe.Pointer { 169 return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) 170 } 171 172 var deferType *_type // type of _defer struct 173 174 func init() { 175 var x interface{} 176 x = (*_defer)(nil) 177 deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem 178 } 179 180 // Allocate a Defer, usually using per-P pool. 181 // Each defer must be released with freedefer. 182 // Note: runs on g0 stack 183 func newdefer(siz int32) *_defer { 184 var d *_defer 185 sc := deferclass(uintptr(siz)) 186 mp := acquirem() 187 if sc < uintptr(len(p{}.deferpool)) { 188 pp := mp.p.ptr() 189 if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil { 190 lock(&sched.deferlock) 191 for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil { 192 d := sched.deferpool[sc] 193 sched.deferpool[sc] = d.link 194 d.link = nil 195 pp.deferpool[sc] = append(pp.deferpool[sc], d) 196 } 197 unlock(&sched.deferlock) 198 } 199 if n := len(pp.deferpool[sc]); n > 0 { 200 d = pp.deferpool[sc][n-1] 201 pp.deferpool[sc][n-1] = nil 202 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 203 } 204 } 205 if d == nil { 206 // Allocate new defer+args. 207 total := roundupsize(totaldefersize(uintptr(siz))) 208 d = (*_defer)(mallocgc(total, deferType, 0)) 209 } 210 d.siz = siz 211 gp := mp.curg 212 d.link = gp._defer 213 gp._defer = d 214 releasem(mp) 215 return d 216 } 217 218 // Free the given defer. 219 // The defer cannot be used after this call. 220 func freedefer(d *_defer) { 221 if d._panic != nil { 222 freedeferpanic() 223 } 224 if d.fn != nil { 225 freedeferfn() 226 } 227 sc := deferclass(uintptr(d.siz)) 228 if sc < uintptr(len(p{}.deferpool)) { 229 mp := acquirem() 230 pp := mp.p.ptr() 231 if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) { 232 // Transfer half of local cache to the central cache. 233 var first, last *_defer 234 for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 { 235 n := len(pp.deferpool[sc]) 236 d := pp.deferpool[sc][n-1] 237 pp.deferpool[sc][n-1] = nil 238 pp.deferpool[sc] = pp.deferpool[sc][:n-1] 239 if first == nil { 240 first = d 241 } else { 242 last.link = d 243 } 244 last = d 245 } 246 lock(&sched.deferlock) 247 last.link = sched.deferpool[sc] 248 sched.deferpool[sc] = first 249 unlock(&sched.deferlock) 250 } 251 *d = _defer{} 252 pp.deferpool[sc] = append(pp.deferpool[sc], d) 253 releasem(mp) 254 } 255 } 256 257 // Separate function so that it can split stack. 258 // Windows otherwise runs out of stack space. 259 func freedeferpanic() { 260 // _panic must be cleared before d is unlinked from gp. 261 throw("freedefer with d._panic != nil") 262 } 263 264 func freedeferfn() { 265 // fn must be cleared before d is unlinked from gp. 266 throw("freedefer with d.fn != nil") 267 } 268 269 // Run a deferred function if there is one. 270 // The compiler inserts a call to this at the end of any 271 // function which calls defer. 272 // If there is a deferred function, this will call runtime·jmpdefer, 273 // which will jump to the deferred function such that it appears 274 // to have been called by the caller of deferreturn at the point 275 // just before deferreturn was called. The effect is that deferreturn 276 // is called again and again until there are no more deferred functions. 277 // Cannot split the stack because we reuse the caller's frame to 278 // call the deferred function. 279 280 // The single argument isn't actually used - it just has its address 281 // taken so it can be matched against pending defers. 282 //go:nosplit 283 func deferreturn(arg0 uintptr) { 284 gp := getg() 285 d := gp._defer 286 if d == nil { 287 return 288 } 289 sp := getcallersp(unsafe.Pointer(&arg0)) 290 if d.sp != sp { 291 return 292 } 293 294 // Moving arguments around. 295 // Do not allow preemption here, because the garbage collector 296 // won't know the form of the arguments until the jmpdefer can 297 // flip the PC over to fn. 298 mp := acquirem() 299 memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz)) 300 fn := d.fn 301 d.fn = nil 302 gp._defer = d.link 303 // Switch to systemstack merely to save nosplit stack space. 304 systemstack(func() { 305 freedefer(d) 306 }) 307 releasem(mp) 308 jmpdefer(fn, uintptr(unsafe.Pointer(&arg0))) 309 } 310 311 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 312 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 313 // is not panic, however, any recover calls in those deferred functions will return nil. 314 // 315 // Calling Goexit from the main goroutine terminates that goroutine 316 // without func main returning. Since func main has not returned, 317 // the program continues execution of other goroutines. 318 // If all other goroutines exit, the program crashes. 319 func Goexit() { 320 // Run all deferred functions for the current goroutine. 321 // This code is similar to gopanic, see that implementation 322 // for detailed comments. 323 gp := getg() 324 for { 325 d := gp._defer 326 if d == nil { 327 break 328 } 329 if d.started { 330 if d._panic != nil { 331 d._panic.aborted = true 332 d._panic = nil 333 } 334 d.fn = nil 335 gp._defer = d.link 336 freedefer(d) 337 continue 338 } 339 d.started = true 340 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 341 if gp._defer != d { 342 throw("bad defer entry in Goexit") 343 } 344 d._panic = nil 345 d.fn = nil 346 gp._defer = d.link 347 freedefer(d) 348 // Note: we ignore recovers here because Goexit isn't a panic 349 } 350 goexit1() 351 } 352 353 // Print all currently active panics. Used when crashing. 354 func printpanics(p *_panic) { 355 if p.link != nil { 356 printpanics(p.link) 357 print("\t") 358 } 359 print("panic: ") 360 printany(p.arg) 361 if p.recovered { 362 print(" [recovered]") 363 } 364 print("\n") 365 } 366 367 // The implementation of the predeclared function panic. 368 func gopanic(e interface{}) { 369 gp := getg() 370 if gp.m.curg != gp { 371 print("panic: ") 372 printany(e) 373 print("\n") 374 throw("panic on system stack") 375 } 376 377 // m.softfloat is set during software floating point. 378 // It increments m.locks to avoid preemption. 379 // We moved the memory loads out, so there shouldn't be 380 // any reason for it to panic anymore. 381 if gp.m.softfloat != 0 { 382 gp.m.locks-- 383 gp.m.softfloat = 0 384 throw("panic during softfloat") 385 } 386 if gp.m.mallocing != 0 { 387 print("panic: ") 388 printany(e) 389 print("\n") 390 throw("panic during malloc") 391 } 392 if gp.m.preemptoff != "" { 393 print("panic: ") 394 printany(e) 395 print("\n") 396 print("preempt off reason: ") 397 print(gp.m.preemptoff) 398 print("\n") 399 throw("panic during preemptoff") 400 } 401 if gp.m.locks != 0 { 402 print("panic: ") 403 printany(e) 404 print("\n") 405 throw("panic holding locks") 406 } 407 408 var p _panic 409 p.arg = e 410 p.link = gp._panic 411 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 412 413 for { 414 d := gp._defer 415 if d == nil { 416 break 417 } 418 419 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), 420 // take defer off list. The earlier panic or Goexit will not continue running. 421 if d.started { 422 if d._panic != nil { 423 d._panic.aborted = true 424 } 425 d._panic = nil 426 d.fn = nil 427 gp._defer = d.link 428 freedefer(d) 429 continue 430 } 431 432 // Mark defer as started, but keep on list, so that traceback 433 // can find and update the defer's argument frame if stack growth 434 // or a garbage collection happens before reflectcall starts executing d.fn. 435 d.started = true 436 437 // Record the panic that is running the defer. 438 // If there is a new panic during the deferred call, that panic 439 // will find d in the list and will mark d._panic (this panic) aborted. 440 d._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 441 442 p.argp = unsafe.Pointer(getargp(0)) 443 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 444 p.argp = nil 445 446 // reflectcall did not panic. Remove d. 447 if gp._defer != d { 448 throw("bad defer entry in panic") 449 } 450 d._panic = nil 451 d.fn = nil 452 gp._defer = d.link 453 454 // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic 455 //GC() 456 457 pc := d.pc 458 sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy 459 freedefer(d) 460 if p.recovered { 461 gp._panic = p.link 462 // Aborted panics are marked but remain on the g.panic list. 463 // Remove them from the list. 464 for gp._panic != nil && gp._panic.aborted { 465 gp._panic = gp._panic.link 466 } 467 if gp._panic == nil { // must be done with signal 468 gp.sig = 0 469 } 470 // Pass information about recovering frame to recovery. 471 gp.sigcode0 = uintptr(sp) 472 gp.sigcode1 = pc 473 mcall(recovery) 474 throw("recovery failed") // mcall should not return 475 } 476 } 477 478 // ran out of deferred calls - old-school panic now 479 startpanic() 480 printpanics(gp._panic) 481 dopanic(0) // should not return 482 *(*int)(nil) = 0 // not reached 483 } 484 485 // getargp returns the location where the caller 486 // writes outgoing function call arguments. 487 //go:nosplit 488 func getargp(x int) uintptr { 489 // x is an argument mainly so that we can return its address. 490 // However, we need to make the function complex enough 491 // that it won't be inlined. We always pass x = 0, so this code 492 // does nothing other than keep the compiler from thinking 493 // the function is simple enough to inline. 494 if x > 0 { 495 return getcallersp(unsafe.Pointer(&x)) * 0 496 } 497 return uintptr(noescape(unsafe.Pointer(&x))) 498 } 499 500 // The implementation of the predeclared function recover. 501 // Cannot split the stack because it needs to reliably 502 // find the stack segment of its caller. 503 // 504 // TODO(rsc): Once we commit to CopyStackAlways, 505 // this doesn't need to be nosplit. 506 //go:nosplit 507 func gorecover(argp uintptr) interface{} { 508 // Must be in a function running as part of a deferred call during the panic. 509 // Must be called from the topmost function of the call 510 // (the function used in the defer statement). 511 // p.argp is the argument pointer of that topmost deferred function call. 512 // Compare against argp reported by caller. 513 // If they match, the caller is the one who can recover. 514 gp := getg() 515 p := gp._panic 516 if p != nil && !p.recovered && argp == uintptr(p.argp) { 517 p.recovered = true 518 return p.arg 519 } 520 return nil 521 } 522 523 //go:nosplit 524 func startpanic() { 525 systemstack(startpanic_m) 526 } 527 528 //go:nosplit 529 func dopanic(unused int) { 530 pc := getcallerpc(unsafe.Pointer(&unused)) 531 sp := getcallersp(unsafe.Pointer(&unused)) 532 gp := getg() 533 systemstack(func() { 534 dopanic_m(gp, pc, sp) // should never return 535 }) 536 *(*int)(nil) = 0 537 } 538 539 //go:nosplit 540 func throw(s string) { 541 print("fatal error: ", s, "\n") 542 gp := getg() 543 if gp.m.throwing == 0 { 544 gp.m.throwing = 1 545 } 546 startpanic() 547 dopanic(0) 548 *(*int)(nil) = 0 // not reached 549 } 550 551 //uint32 runtime·panicking; 552 var paniclk mutex 553 554 // Unwind the stack after a deferred function calls recover 555 // after a panic. Then arrange to continue running as though 556 // the caller of the deferred function returned normally. 557 func recovery(gp *g) { 558 // Info about defer passed in G struct. 559 sp := gp.sigcode0 560 pc := gp.sigcode1 561 562 // d's arguments need to be in the stack. 563 if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) { 564 print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 565 throw("bad recovery") 566 } 567 568 // Make the deferproc for this d return again, 569 // this time returning 1. The calling function will 570 // jump to the standard return epilogue. 571 gcUnwindBarriers(gp, sp) 572 gp.sched.sp = sp 573 gp.sched.pc = pc 574 gp.sched.lr = 0 575 gp.sched.ret = 1 576 gogo(&gp.sched) 577 } 578 579 func startpanic_m() { 580 _g_ := getg() 581 if mheap_.cachealloc.size == 0 { // very early 582 print("runtime: panic before malloc heap initialized\n") 583 _g_.m.mallocing = 1 // tell rest of panic not to try to malloc 584 } else if _g_.m.mcache == nil { // can happen if called from signal handler or throw 585 _g_.m.mcache = allocmcache() 586 } 587 588 switch _g_.m.dying { 589 case 0: 590 _g_.m.dying = 1 591 _g_.writebuf = nil 592 atomic.Xadd(&panicking, 1) 593 lock(&paniclk) 594 if debug.schedtrace > 0 || debug.scheddetail > 0 { 595 schedtrace(true) 596 } 597 freezetheworld() 598 return 599 case 1: 600 // Something failed while panicing, probably the print of the 601 // argument to panic(). Just print a stack trace and exit. 602 _g_.m.dying = 2 603 print("panic during panic\n") 604 dopanic(0) 605 exit(3) 606 fallthrough 607 case 2: 608 // This is a genuine bug in the runtime, we couldn't even 609 // print the stack trace successfully. 610 _g_.m.dying = 3 611 print("stack trace unavailable\n") 612 exit(4) 613 fallthrough 614 default: 615 // Can't even print! Just exit. 616 exit(5) 617 } 618 } 619 620 var didothers bool 621 var deadlock mutex 622 623 func dopanic_m(gp *g, pc, sp uintptr) { 624 if gp.sig != 0 { 625 print("[signal ", hex(gp.sig), " code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n") 626 } 627 628 level, all, docrash := gotraceback() 629 _g_ := getg() 630 if level > 0 { 631 if gp != gp.m.curg { 632 all = true 633 } 634 if gp != gp.m.g0 { 635 print("\n") 636 goroutineheader(gp) 637 traceback(pc, sp, 0, gp) 638 } else if level >= 2 || _g_.m.throwing > 0 { 639 print("\nruntime stack:\n") 640 traceback(pc, sp, 0, gp) 641 } 642 if !didothers && all { 643 didothers = true 644 tracebackothers(gp) 645 } 646 } 647 unlock(&paniclk) 648 649 if atomic.Xadd(&panicking, -1) != 0 { 650 // Some other m is panicking too. 651 // Let it print what it needs to print. 652 // Wait forever without chewing up cpu. 653 // It will exit when it's done. 654 lock(&deadlock) 655 lock(&deadlock) 656 } 657 658 if docrash { 659 crash() 660 } 661 662 exit(2) 663 } 664 665 //go:nosplit 666 func canpanic(gp *g) bool { 667 // Note that g is m->gsignal, different from gp. 668 // Note also that g->m can change at preemption, so m can go stale 669 // if this function ever makes a function call. 670 _g_ := getg() 671 _m_ := _g_.m 672 673 // Is it okay for gp to panic instead of crashing the program? 674 // Yes, as long as it is running Go code, not runtime code, 675 // and not stuck in a system call. 676 if gp == nil || gp != _m_.curg { 677 return false 678 } 679 if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 { 680 return false 681 } 682 status := readgstatus(gp) 683 if status&^_Gscan != _Grunning || gp.syscallsp != 0 { 684 return false 685 } 686 if GOOS == "windows" && _m_.libcallsp != 0 { 687 return false 688 } 689 return true 690 }