github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/runtime/panic.c (about) 1 // Copyright 2012 The Go Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style 3 // license that can be found in the LICENSE file. 4 5 #include "runtime.h" 6 #include "arch_GOARCH.h" 7 #include "stack.h" 8 #include "malloc.h" 9 10 // Code related to defer, panic and recover. 11 12 uint32 runtime·panicking; 13 static Lock paniclk; 14 15 enum 16 { 17 DeferChunkSize = 2048 18 }; 19 20 // Allocate a Defer, usually as part of the larger frame of deferred functions. 21 // Each defer must be released with both popdefer and freedefer. 22 static Defer* 23 newdefer(int32 siz) 24 { 25 int32 total; 26 DeferChunk *c; 27 Defer *d; 28 29 c = g->dchunk; 30 total = sizeof(*d) + ROUND(siz, sizeof(uintptr)) - sizeof(d->args); 31 if(c == nil || total > DeferChunkSize - c->off) { 32 if(total > DeferChunkSize / 2) { 33 // Not worth putting in any chunk. 34 // Allocate a separate block. 35 d = runtime·malloc(total); 36 d->siz = siz; 37 d->special = 1; 38 d->free = 1; 39 d->link = g->defer; 40 g->defer = d; 41 return d; 42 } 43 44 // Cannot fit in current chunk. 45 // Switch to next chunk, allocating if necessary. 46 c = g->dchunknext; 47 if(c == nil) 48 c = runtime·malloc(DeferChunkSize); 49 c->prev = g->dchunk; 50 c->off = sizeof(*c); 51 g->dchunk = c; 52 g->dchunknext = nil; 53 } 54 55 d = (Defer*)((byte*)c + c->off); 56 c->off += total; 57 d->siz = siz; 58 d->special = 0; 59 d->free = 0; 60 d->link = g->defer; 61 g->defer = d; 62 return d; 63 } 64 65 // Pop the current defer from the defer stack. 66 // Its contents are still valid until the goroutine begins executing again. 67 // In particular it is safe to call reflect.call(d->fn, d->argp, d->siz) after 68 // popdefer returns. 69 static void 70 popdefer(void) 71 { 72 Defer *d; 73 DeferChunk *c; 74 int32 total; 75 76 d = g->defer; 77 if(d == nil) 78 runtime·throw("runtime: popdefer nil"); 79 g->defer = d->link; 80 if(d->special) { 81 // Nothing else to do. 82 return; 83 } 84 total = sizeof(*d) + ROUND(d->siz, sizeof(uintptr)) - sizeof(d->args); 85 c = g->dchunk; 86 if(c == nil || (byte*)d+total != (byte*)c+c->off) 87 runtime·throw("runtime: popdefer phase error"); 88 c->off -= total; 89 if(c->off == sizeof(*c)) { 90 // Chunk now empty, so pop from stack. 91 // Save in dchunknext both to help with pingponging between frames 92 // and to make sure d is still valid on return. 93 if(g->dchunknext != nil) 94 runtime·free(g->dchunknext); 95 g->dchunknext = c; 96 g->dchunk = c->prev; 97 } 98 } 99 100 // Free the given defer. 101 // For defers in the per-goroutine chunk this just clears the saved arguments. 102 // For large defers allocated on the heap, this frees them. 103 // The defer cannot be used after this call. 104 static void 105 freedefer(Defer *d) 106 { 107 if(d->special) { 108 if(d->free) 109 runtime·free(d); 110 } else { 111 runtime·memclr((byte*)d->args, d->siz); 112 } 113 } 114 115 // Create a new deferred function fn with siz bytes of arguments. 116 // The compiler turns a defer statement into a call to this. 117 // Cannot split the stack because it assumes that the arguments 118 // are available sequentially after &fn; they would not be 119 // copied if a stack split occurred. It's OK for this to call 120 // functions that split the stack. 121 #pragma textflag 7 122 uintptr 123 runtime·deferproc(int32 siz, FuncVal *fn, ...) 124 { 125 Defer *d; 126 127 d = newdefer(siz); 128 d->fn = fn; 129 d->pc = runtime·getcallerpc(&siz); 130 if(thechar == '5') 131 d->argp = (byte*)(&fn+2); // skip caller's saved link register 132 else 133 d->argp = (byte*)(&fn+1); 134 runtime·memmove(d->args, d->argp, d->siz); 135 136 // deferproc returns 0 normally. 137 // a deferred func that stops a panic 138 // makes the deferproc return 1. 139 // the code the compiler generates always 140 // checks the return value and jumps to the 141 // end of the function if deferproc returns != 0. 142 return 0; 143 } 144 145 // Run a deferred function if there is one. 146 // The compiler inserts a call to this at the end of any 147 // function which calls defer. 148 // If there is a deferred function, this will call runtime·jmpdefer, 149 // which will jump to the deferred function such that it appears 150 // to have been called by the caller of deferreturn at the point 151 // just before deferreturn was called. The effect is that deferreturn 152 // is called again and again until there are no more deferred functions. 153 // Cannot split the stack because we reuse the caller's frame to 154 // call the deferred function. 155 #pragma textflag 7 156 void 157 runtime·deferreturn(uintptr arg0) 158 { 159 Defer *d; 160 byte *argp; 161 FuncVal *fn; 162 163 d = g->defer; 164 if(d == nil) 165 return; 166 argp = (byte*)&arg0; 167 if(d->argp != argp) 168 return; 169 runtime·memmove(argp, d->args, d->siz); 170 fn = d->fn; 171 popdefer(); 172 freedefer(d); 173 runtime·jmpdefer(fn, argp); 174 } 175 176 // Run all deferred functions for the current goroutine. 177 static void 178 rundefer(void) 179 { 180 Defer *d; 181 182 while((d = g->defer) != nil) { 183 popdefer(); 184 reflect·call(d->fn, (byte*)d->args, d->siz); 185 freedefer(d); 186 } 187 } 188 189 // Print all currently active panics. Used when crashing. 190 static void 191 printpanics(Panic *p) 192 { 193 if(p->link) { 194 printpanics(p->link); 195 runtime·printf("\t"); 196 } 197 runtime·printf("panic: "); 198 runtime·printany(p->arg); 199 if(p->recovered) 200 runtime·printf(" [recovered]"); 201 runtime·printf("\n"); 202 } 203 204 static void recovery(G*); 205 206 // The implementation of the predeclared function panic. 207 void 208 runtime·panic(Eface e) 209 { 210 Defer *d; 211 Panic *p; 212 void *pc, *argp; 213 214 p = runtime·mal(sizeof *p); 215 p->arg = e; 216 p->link = g->panic; 217 p->stackbase = (byte*)g->stackbase; 218 g->panic = p; 219 220 for(;;) { 221 d = g->defer; 222 if(d == nil) 223 break; 224 // take defer off list in case of recursive panic 225 popdefer(); 226 g->ispanic = true; // rock for newstack, where reflect.call ends up 227 argp = d->argp; 228 pc = d->pc; 229 reflect·call(d->fn, (byte*)d->args, d->siz); 230 freedefer(d); 231 if(p->recovered) { 232 g->panic = p->link; 233 if(g->panic == nil) // must be done with signal 234 g->sig = 0; 235 runtime·free(p); 236 // Pass information about recovering frame to recovery. 237 g->sigcode0 = (uintptr)argp; 238 g->sigcode1 = (uintptr)pc; 239 runtime·mcall(recovery); 240 runtime·throw("recovery failed"); // mcall should not return 241 } 242 } 243 244 // ran out of deferred calls - old-school panic now 245 runtime·startpanic(); 246 printpanics(g->panic); 247 runtime·dopanic(0); 248 } 249 250 // Unwind the stack after a deferred function calls recover 251 // after a panic. Then arrange to continue running as though 252 // the caller of the deferred function returned normally. 253 static void 254 recovery(G *gp) 255 { 256 void *argp; 257 void *pc; 258 259 // Info about defer passed in G struct. 260 argp = (void*)gp->sigcode0; 261 pc = (void*)gp->sigcode1; 262 263 // Unwind to the stack frame with d's arguments in it. 264 runtime·unwindstack(gp, argp); 265 266 // Make the deferproc for this d return again, 267 // this time returning 1. The calling function will 268 // jump to the standard return epilogue. 269 // The -2*sizeof(uintptr) makes up for the 270 // two extra words that are on the stack at 271 // each call to deferproc. 272 // (The pc we're returning to does pop pop 273 // before it tests the return value.) 274 // On the arm there are 2 saved LRs mixed in too. 275 if(thechar == '5') 276 gp->sched.sp = (uintptr)argp - 4*sizeof(uintptr); 277 else 278 gp->sched.sp = (uintptr)argp - 2*sizeof(uintptr); 279 gp->sched.pc = pc; 280 runtime·gogo(&gp->sched, 1); 281 } 282 283 // Free stack frames until we hit the last one 284 // or until we find the one that contains the sp. 285 void 286 runtime·unwindstack(G *gp, byte *sp) 287 { 288 Stktop *top; 289 byte *stk; 290 291 // Must be called from a different goroutine, usually m->g0. 292 if(g == gp) 293 runtime·throw("unwindstack on self"); 294 295 while((top = (Stktop*)gp->stackbase) != nil && top->stackbase != nil) { 296 stk = (byte*)gp->stackguard - StackGuard; 297 if(stk <= sp && sp < (byte*)gp->stackbase) 298 break; 299 gp->stackbase = (uintptr)top->stackbase; 300 gp->stackguard = (uintptr)top->stackguard; 301 if(top->free != 0) 302 runtime·stackfree(stk, top->free); 303 } 304 305 if(sp != nil && (sp < (byte*)gp->stackguard - StackGuard || (byte*)gp->stackbase < sp)) { 306 runtime·printf("recover: %p not in [%p, %p]\n", sp, gp->stackguard - StackGuard, gp->stackbase); 307 runtime·throw("bad unwindstack"); 308 } 309 } 310 311 // The implementation of the predeclared function recover. 312 // Cannot split the stack because it needs to reliably 313 // find the stack segment of its caller. 314 #pragma textflag 7 315 void 316 runtime·recover(byte *argp, Eface ret) 317 { 318 Stktop *top, *oldtop; 319 Panic *p; 320 321 // Must be a panic going on. 322 if((p = g->panic) == nil || p->recovered) 323 goto nomatch; 324 325 // Frame must be at the top of the stack segment, 326 // because each deferred call starts a new stack 327 // segment as a side effect of using reflect.call. 328 // (There has to be some way to remember the 329 // variable argument frame size, and the segment 330 // code already takes care of that for us, so we 331 // reuse it.) 332 // 333 // As usual closures complicate things: the fp that 334 // the closure implementation function claims to have 335 // is where the explicit arguments start, after the 336 // implicit pointer arguments and PC slot. 337 // If we're on the first new segment for a closure, 338 // then fp == top - top->args is correct, but if 339 // the closure has its own big argument frame and 340 // allocated a second segment (see below), 341 // the fp is slightly above top - top->args. 342 // That condition can't happen normally though 343 // (stack pointers go down, not up), so we can accept 344 // any fp between top and top - top->args as 345 // indicating the top of the segment. 346 top = (Stktop*)g->stackbase; 347 if(argp < (byte*)top - top->argsize || (byte*)top < argp) 348 goto nomatch; 349 350 // The deferred call makes a new segment big enough 351 // for the argument frame but not necessarily big 352 // enough for the function's local frame (size unknown 353 // at the time of the call), so the function might have 354 // made its own segment immediately. If that's the 355 // case, back top up to the older one, the one that 356 // reflect.call would have made for the panic. 357 // 358 // The fp comparison here checks that the argument 359 // frame that was copied during the split (the top->args 360 // bytes above top->fp) abuts the old top of stack. 361 // This is a correct test for both closure and non-closure code. 362 oldtop = (Stktop*)top->stackbase; 363 if(oldtop != nil && top->argp == (byte*)oldtop - top->argsize) 364 top = oldtop; 365 366 // Now we have the segment that was created to 367 // run this call. It must have been marked as a panic segment. 368 if(!top->panic) 369 goto nomatch; 370 371 // Okay, this is the top frame of a deferred call 372 // in response to a panic. It can see the panic argument. 373 p->recovered = 1; 374 ret = p->arg; 375 FLUSH(&ret); 376 return; 377 378 nomatch: 379 ret.type = nil; 380 ret.data = nil; 381 FLUSH(&ret); 382 } 383 384 void 385 runtime·startpanic(void) 386 { 387 if(runtime·mheap == 0 || runtime·mheap->cachealloc.size == 0) { // very early 388 runtime·printf("runtime: panic before malloc heap initialized\n"); 389 m->mallocing = 1; // tell rest of panic not to try to malloc 390 } else if(m->mcache == nil) // can happen if called from signal handler or throw 391 m->mcache = runtime·allocmcache(); 392 if(m->dying) { 393 runtime·printf("panic during panic\n"); 394 runtime·exit(3); 395 } 396 m->dying = 1; 397 runtime·xadd(&runtime·panicking, 1); 398 runtime·lock(&paniclk); 399 } 400 401 void 402 runtime·dopanic(int32 unused) 403 { 404 static bool didothers; 405 bool crash; 406 407 if(g->sig != 0) 408 runtime·printf("[signal %x code=%p addr=%p pc=%p]\n", 409 g->sig, g->sigcode0, g->sigcode1, g->sigpc); 410 411 if(runtime·gotraceback(&crash)){ 412 if(g != m->g0) { 413 runtime·printf("\n"); 414 runtime·goroutineheader(g); 415 runtime·traceback(runtime·getcallerpc(&unused), runtime·getcallersp(&unused), 0, g); 416 } 417 if(!didothers) { 418 didothers = true; 419 runtime·tracebackothers(g); 420 } 421 } 422 runtime·unlock(&paniclk); 423 if(runtime·xadd(&runtime·panicking, -1) != 0) { 424 // Some other m is panicking too. 425 // Let it print what it needs to print. 426 // Wait forever without chewing up cpu. 427 // It will exit when it's done. 428 static Lock deadlock; 429 runtime·lock(&deadlock); 430 runtime·lock(&deadlock); 431 } 432 433 if(crash) 434 runtime·crash(); 435 436 runtime·exit(2); 437 } 438 439 void 440 runtime·panicindex(void) 441 { 442 runtime·panicstring("index out of range"); 443 } 444 445 void 446 runtime·panicslice(void) 447 { 448 runtime·panicstring("slice bounds out of range"); 449 } 450 451 void 452 runtime·throwreturn(void) 453 { 454 // can only happen if compiler is broken 455 runtime·throw("no return at end of a typed function - compiler is broken"); 456 } 457 458 void 459 runtime·throwinit(void) 460 { 461 // can only happen with linker skew 462 runtime·throw("recursive call during initialization - linker skew"); 463 } 464 465 void 466 runtime·throw(int8 *s) 467 { 468 if(m->throwing == 0) 469 m->throwing = 1; 470 runtime·startpanic(); 471 runtime·printf("fatal error: %s\n", s); 472 runtime·dopanic(0); 473 *(int32*)0 = 0; // not reached 474 runtime·exit(1); // even more not reached 475 } 476 477 void 478 runtime·panicstring(int8 *s) 479 { 480 Eface err; 481 482 if(m->gcing) { 483 runtime·printf("panic: %s\n", s); 484 runtime·throw("panic during gc"); 485 } 486 runtime·newErrorString(runtime·gostringnocopy((byte*)s), &err); 487 runtime·panic(err); 488 } 489 490 void 491 runtime·Goexit(void) 492 { 493 rundefer(); 494 runtime·goexit(); 495 }