github.com/zach-klippenstein/go@v0.0.0-20150108044943-fcfbeb3adf58/src/runtime/panic.go (about) 1 // Copyright 2014 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 package runtime 6 7 import "unsafe" 8 9 var indexError = error(errorString("index out of range")) 10 11 func panicindex() { 12 panic(indexError) 13 } 14 15 var sliceError = error(errorString("slice bounds out of range")) 16 17 func panicslice() { 18 panic(sliceError) 19 } 20 21 var divideError = error(errorString("integer divide by zero")) 22 23 func panicdivide() { 24 panic(divideError) 25 } 26 27 var overflowError = error(errorString("integer overflow")) 28 29 func panicoverflow() { 30 panic(overflowError) 31 } 32 33 var floatError = error(errorString("floating point error")) 34 35 func panicfloat() { 36 panic(floatError) 37 } 38 39 var memoryError = error(errorString("invalid memory address or nil pointer dereference")) 40 41 func panicmem() { 42 panic(memoryError) 43 } 44 45 func throwreturn() { 46 throw("no return at end of a typed function - compiler is broken") 47 } 48 49 func throwinit() { 50 throw("recursive call during initialization - linker skew") 51 } 52 53 // Create a new deferred function fn with siz bytes of arguments. 54 // The compiler turns a defer statement into a call to this. 55 //go:nosplit 56 func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn 57 if getg().m.curg != getg() { 58 // go code on the system stack can't defer 59 throw("defer on system stack") 60 } 61 62 // the arguments of fn are in a perilous state. The stack map 63 // for deferproc does not describe them. So we can't let garbage 64 // collection or stack copying trigger until we've copied them out 65 // to somewhere safe. The memmove below does that. 66 // Until the copy completes, we can only call nosplit routines. 67 sp := getcallersp(unsafe.Pointer(&siz)) 68 argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn) 69 callerpc := getcallerpc(unsafe.Pointer(&siz)) 70 71 systemstack(func() { 72 d := newdefer(siz) 73 if d._panic != nil { 74 throw("deferproc: d.panic != nil after newdefer") 75 } 76 d.fn = fn 77 d.pc = callerpc 78 d.sp = sp 79 memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz)) 80 }) 81 82 // deferproc returns 0 normally. 83 // a deferred func that stops a panic 84 // makes the deferproc return 1. 85 // the code the compiler generates always 86 // checks the return value and jumps to the 87 // end of the function if deferproc returns != 0. 88 return0() 89 // No code can go here - the C return register has 90 // been set and must not be clobbered. 91 } 92 93 // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ... 94 // Each P holds a pool for defers with small arg sizes. 95 // Assign defer allocations to pools by rounding to 16, to match malloc size classes. 96 97 const ( 98 deferHeaderSize = unsafe.Sizeof(_defer{}) 99 minDeferAlloc = (deferHeaderSize + 15) &^ 15 100 minDeferArgs = minDeferAlloc - deferHeaderSize 101 ) 102 103 // defer size class for arg size sz 104 //go:nosplit 105 func deferclass(siz uintptr) uintptr { 106 if siz <= minDeferArgs { 107 return 0 108 } 109 return (siz - minDeferArgs + 15) / 16 110 } 111 112 // total size of memory block for defer with arg size sz 113 func totaldefersize(siz uintptr) uintptr { 114 if siz <= minDeferArgs { 115 return minDeferAlloc 116 } 117 return deferHeaderSize + siz 118 } 119 120 // Ensure that defer arg sizes that map to the same defer size class 121 // also map to the same malloc size class. 122 func testdefersizes() { 123 var m [len(p{}.deferpool)]int32 124 125 for i := range m { 126 m[i] = -1 127 } 128 for i := uintptr(0); ; i++ { 129 defersc := deferclass(i) 130 if defersc >= uintptr(len(m)) { 131 break 132 } 133 siz := roundupsize(totaldefersize(i)) 134 if m[defersc] < 0 { 135 m[defersc] = int32(siz) 136 continue 137 } 138 if m[defersc] != int32(siz) { 139 print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n") 140 throw("bad defer size class") 141 } 142 } 143 } 144 145 // The arguments associated with a deferred call are stored 146 // immediately after the _defer header in memory. 147 //go:nosplit 148 func deferArgs(d *_defer) unsafe.Pointer { 149 return add(unsafe.Pointer(d), unsafe.Sizeof(*d)) 150 } 151 152 var deferType *_type // type of _defer struct 153 154 func init() { 155 var x interface{} 156 x = (*_defer)(nil) 157 deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem 158 } 159 160 // Allocate a Defer, usually using per-P pool. 161 // Each defer must be released with freedefer. 162 // Note: runs on g0 stack 163 func newdefer(siz int32) *_defer { 164 var d *_defer 165 sc := deferclass(uintptr(siz)) 166 mp := acquirem() 167 if sc < uintptr(len(p{}.deferpool)) { 168 pp := mp.p 169 d = pp.deferpool[sc] 170 if d != nil { 171 pp.deferpool[sc] = d.link 172 } 173 } 174 if d == nil { 175 // Allocate new defer+args. 176 total := roundupsize(totaldefersize(uintptr(siz))) 177 d = (*_defer)(mallocgc(total, deferType, 0)) 178 } 179 d.siz = siz 180 if mheap_.shadow_enabled { 181 // This memory will be written directly, with no write barrier, 182 // and then scanned like stacks during collection. 183 // Unlike real stacks, it is from heap spans, so mark the 184 // shadow as explicitly unusable. 185 p := deferArgs(d) 186 for i := uintptr(0); i+ptrSize <= uintptr(siz); i += ptrSize { 187 writebarrierptr_noshadow((*uintptr)(add(p, i))) 188 } 189 } 190 gp := mp.curg 191 d.link = gp._defer 192 gp._defer = d 193 releasem(mp) 194 return d 195 } 196 197 // Free the given defer. 198 // The defer cannot be used after this call. 199 //go:nosplit 200 func freedefer(d *_defer) { 201 if d._panic != nil { 202 freedeferpanic() 203 } 204 if d.fn != nil { 205 freedeferfn() 206 } 207 if mheap_.shadow_enabled { 208 // Undo the marking in newdefer. 209 systemstack(func() { 210 clearshadow(uintptr(deferArgs(d)), uintptr(d.siz)) 211 }) 212 } 213 sc := deferclass(uintptr(d.siz)) 214 if sc < uintptr(len(p{}.deferpool)) { 215 mp := acquirem() 216 pp := mp.p 217 *d = _defer{} 218 d.link = pp.deferpool[sc] 219 pp.deferpool[sc] = d 220 releasem(mp) 221 } 222 } 223 224 // Separate function so that it can split stack. 225 // Windows otherwise runs out of stack space. 226 func freedeferpanic() { 227 // _panic must be cleared before d is unlinked from gp. 228 throw("freedefer with d._panic != nil") 229 } 230 231 func freedeferfn() { 232 // fn must be cleared before d is unlinked from gp. 233 throw("freedefer with d.fn != nil") 234 } 235 236 // Run a deferred function if there is one. 237 // The compiler inserts a call to this at the end of any 238 // function which calls defer. 239 // If there is a deferred function, this will call runtime·jmpdefer, 240 // which will jump to the deferred function such that it appears 241 // to have been called by the caller of deferreturn at the point 242 // just before deferreturn was called. The effect is that deferreturn 243 // is called again and again until there are no more deferred functions. 244 // Cannot split the stack because we reuse the caller's frame to 245 // call the deferred function. 246 247 // The single argument isn't actually used - it just has its address 248 // taken so it can be matched against pending defers. 249 //go:nosplit 250 func deferreturn(arg0 uintptr) { 251 gp := getg() 252 d := gp._defer 253 if d == nil { 254 return 255 } 256 sp := getcallersp(unsafe.Pointer(&arg0)) 257 if d.sp != sp { 258 return 259 } 260 261 // Moving arguments around. 262 // Do not allow preemption here, because the garbage collector 263 // won't know the form of the arguments until the jmpdefer can 264 // flip the PC over to fn. 265 mp := acquirem() 266 memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz)) 267 fn := d.fn 268 d.fn = nil 269 gp._defer = d.link 270 freedefer(d) 271 releasem(mp) 272 jmpdefer(fn, uintptr(unsafe.Pointer(&arg0))) 273 } 274 275 // Goexit terminates the goroutine that calls it. No other goroutine is affected. 276 // Goexit runs all deferred calls before terminating the goroutine. Because Goexit 277 // is not panic, however, any recover calls in those deferred functions will return nil. 278 // 279 // Calling Goexit from the main goroutine terminates that goroutine 280 // without func main returning. Since func main has not returned, 281 // the program continues execution of other goroutines. 282 // If all other goroutines exit, the program crashes. 283 func Goexit() { 284 // Run all deferred functions for the current goroutine. 285 // This code is similar to gopanic, see that implementation 286 // for detailed comments. 287 gp := getg() 288 for { 289 d := gp._defer 290 if d == nil { 291 break 292 } 293 if d.started { 294 if d._panic != nil { 295 d._panic.aborted = true 296 d._panic = nil 297 } 298 d.fn = nil 299 gp._defer = d.link 300 freedefer(d) 301 continue 302 } 303 d.started = true 304 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 305 if gp._defer != d { 306 throw("bad defer entry in Goexit") 307 } 308 d._panic = nil 309 d.fn = nil 310 gp._defer = d.link 311 freedefer(d) 312 // Note: we ignore recovers here because Goexit isn't a panic 313 } 314 goexit() 315 } 316 317 // Print all currently active panics. Used when crashing. 318 func printpanics(p *_panic) { 319 if p.link != nil { 320 printpanics(p.link) 321 print("\t") 322 } 323 print("panic: ") 324 printany(p.arg) 325 if p.recovered { 326 print(" [recovered]") 327 } 328 print("\n") 329 } 330 331 // The implementation of the predeclared function panic. 332 func gopanic(e interface{}) { 333 gp := getg() 334 if gp.m.curg != gp { 335 print("panic: ") 336 printany(e) 337 print("\n") 338 throw("panic on system stack") 339 } 340 341 // m.softfloat is set during software floating point. 342 // It increments m.locks to avoid preemption. 343 // We moved the memory loads out, so there shouldn't be 344 // any reason for it to panic anymore. 345 if gp.m.softfloat != 0 { 346 gp.m.locks-- 347 gp.m.softfloat = 0 348 throw("panic during softfloat") 349 } 350 if gp.m.mallocing != 0 { 351 print("panic: ") 352 printany(e) 353 print("\n") 354 throw("panic during malloc") 355 } 356 if gp.m.gcing != 0 { 357 print("panic: ") 358 printany(e) 359 print("\n") 360 throw("panic during gc") 361 } 362 if gp.m.locks != 0 { 363 print("panic: ") 364 printany(e) 365 print("\n") 366 throw("panic holding locks") 367 } 368 369 var p _panic 370 p.arg = e 371 p.link = gp._panic 372 gp._panic = (*_panic)(noescape(unsafe.Pointer(&p))) 373 374 for { 375 d := gp._defer 376 if d == nil { 377 break 378 } 379 380 // If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic), 381 // take defer off list. The earlier panic or Goexit will not continue running. 382 if d.started { 383 if d._panic != nil { 384 d._panic.aborted = true 385 } 386 d._panic = nil 387 d.fn = nil 388 gp._defer = d.link 389 freedefer(d) 390 continue 391 } 392 393 // Mark defer as started, but keep on list, so that traceback 394 // can find and update the defer's argument frame if stack growth 395 // or a garbage collection hapens before reflectcall starts executing d.fn. 396 d.started = true 397 398 // Record the panic that is running the defer. 399 // If there is a new panic during the deferred call, that panic 400 // will find d in the list and will mark d._panic (this panic) aborted. 401 d._panic = (*_panic)(noescape((unsafe.Pointer)(&p))) 402 403 p.argp = unsafe.Pointer(getargp(0)) 404 reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz)) 405 p.argp = nil 406 407 // reflectcall did not panic. Remove d. 408 if gp._defer != d { 409 throw("bad defer entry in panic") 410 } 411 d._panic = nil 412 d.fn = nil 413 gp._defer = d.link 414 415 // trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic 416 //GC() 417 418 pc := d.pc 419 sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy 420 freedefer(d) 421 if p.recovered { 422 gp._panic = p.link 423 // Aborted panics are marked but remain on the g.panic list. 424 // Remove them from the list. 425 for gp._panic != nil && gp._panic.aborted { 426 gp._panic = gp._panic.link 427 } 428 if gp._panic == nil { // must be done with signal 429 gp.sig = 0 430 } 431 // Pass information about recovering frame to recovery. 432 gp.sigcode0 = uintptr(sp) 433 gp.sigcode1 = pc 434 mcall(recovery) 435 throw("recovery failed") // mcall should not return 436 } 437 } 438 439 // ran out of deferred calls - old-school panic now 440 startpanic() 441 printpanics(gp._panic) 442 dopanic(0) // should not return 443 *(*int)(nil) = 0 // not reached 444 } 445 446 // getargp returns the location where the caller 447 // writes outgoing function call arguments. 448 //go:nosplit 449 func getargp(x int) uintptr { 450 // x is an argument mainly so that we can return its address. 451 // However, we need to make the function complex enough 452 // that it won't be inlined. We always pass x = 0, so this code 453 // does nothing other than keep the compiler from thinking 454 // the function is simple enough to inline. 455 if x > 0 { 456 return getcallersp(unsafe.Pointer(&x)) * 0 457 } 458 return uintptr(noescape(unsafe.Pointer(&x))) 459 } 460 461 // The implementation of the predeclared function recover. 462 // Cannot split the stack because it needs to reliably 463 // find the stack segment of its caller. 464 // 465 // TODO(rsc): Once we commit to CopyStackAlways, 466 // this doesn't need to be nosplit. 467 //go:nosplit 468 func gorecover(argp uintptr) interface{} { 469 // Must be in a function running as part of a deferred call during the panic. 470 // Must be called from the topmost function of the call 471 // (the function used in the defer statement). 472 // p.argp is the argument pointer of that topmost deferred function call. 473 // Compare against argp reported by caller. 474 // If they match, the caller is the one who can recover. 475 gp := getg() 476 p := gp._panic 477 if p != nil && !p.recovered && argp == uintptr(p.argp) { 478 p.recovered = true 479 return p.arg 480 } 481 return nil 482 } 483 484 //go:nosplit 485 func startpanic() { 486 systemstack(startpanic_m) 487 } 488 489 //go:nosplit 490 func dopanic(unused int) { 491 pc := getcallerpc(unsafe.Pointer(&unused)) 492 sp := getcallersp(unsafe.Pointer(&unused)) 493 gp := getg() 494 systemstack(func() { 495 dopanic_m(gp, pc, sp) // should never return 496 }) 497 *(*int)(nil) = 0 498 } 499 500 //go:nosplit 501 func throw(s string) { 502 print("fatal error: ", s, "\n") 503 gp := getg() 504 if gp.m.throwing == 0 { 505 gp.m.throwing = 1 506 } 507 startpanic() 508 dopanic(0) 509 *(*int)(nil) = 0 // not reached 510 }