github.com/fjballest/golang@v0.0.0-20151209143359-e4c5fe594ca8/src/runtime/panic.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"unsafe"
    10  )
    11  
    12  var indexError = error(errorString("index out of range"))
    13  
    14  func panicindex() {
    15  	panic(indexError)
    16  }
    17  
    18  var sliceError = error(errorString("slice bounds out of range"))
    19  
    20  func panicslice() {
    21  	panic(sliceError)
    22  }
    23  
    24  var divideError = error(errorString("integer divide by zero"))
    25  
    26  func panicdivide() {
    27  	panic(divideError)
    28  }
    29  
    30  var overflowError = error(errorString("integer overflow"))
    31  
    32  func panicoverflow() {
    33  	panic(overflowError)
    34  }
    35  
    36  var floatError = error(errorString("floating point error"))
    37  
    38  func panicfloat() {
    39  	panic(floatError)
    40  }
    41  
    42  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
    43  
    44  func panicmem() {
    45  	panic(memoryError)
    46  }
    47  
    48  func throwreturn() {
    49  	throw("no return at end of a typed function - compiler is broken")
    50  }
    51  
    52  func throwinit() {
    53  	throw("recursive call during initialization - linker skew")
    54  }
    55  
    56  // Create a new deferred function fn with siz bytes of arguments.
    57  // The compiler turns a defer statement into a call to this.
    58  //go:nosplit
    59  func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
    60  	if getg().m.curg != getg() {
    61  		// go code on the system stack can't defer
    62  		throw("defer on system stack")
    63  	}
    64  
    65  	// the arguments of fn are in a perilous state.  The stack map
    66  	// for deferproc does not describe them.  So we can't let garbage
    67  	// collection or stack copying trigger until we've copied them out
    68  	// to somewhere safe.  The memmove below does that.
    69  	// Until the copy completes, we can only call nosplit routines.
    70  	sp := getcallersp(unsafe.Pointer(&siz))
    71  	argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
    72  	callerpc := getcallerpc(unsafe.Pointer(&siz))
    73  
    74  	systemstack(func() {
    75  		d := newdefer(siz)
    76  		if d._panic != nil {
    77  			throw("deferproc: d.panic != nil after newdefer")
    78  		}
    79  		d.fn = fn
    80  		d.pc = callerpc
    81  		d.sp = sp
    82  		memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz))
    83  	})
    84  
    85  	// deferproc returns 0 normally.
    86  	// a deferred func that stops a panic
    87  	// makes the deferproc return 1.
    88  	// the code the compiler generates always
    89  	// checks the return value and jumps to the
    90  	// end of the function if deferproc returns != 0.
    91  	return0()
    92  	// No code can go here - the C return register has
    93  	// been set and must not be clobbered.
    94  }
    95  
    96  // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
    97  // Each P holds a pool for defers with small arg sizes.
    98  // Assign defer allocations to pools by rounding to 16, to match malloc size classes.
    99  
   100  const (
   101  	deferHeaderSize = unsafe.Sizeof(_defer{})
   102  	minDeferAlloc   = (deferHeaderSize + 15) &^ 15
   103  	minDeferArgs    = minDeferAlloc - deferHeaderSize
   104  )
   105  
   106  // defer size class for arg size sz
   107  //go:nosplit
   108  func deferclass(siz uintptr) uintptr {
   109  	if siz <= minDeferArgs {
   110  		return 0
   111  	}
   112  	return (siz - minDeferArgs + 15) / 16
   113  }
   114  
   115  // total size of memory block for defer with arg size sz
   116  func totaldefersize(siz uintptr) uintptr {
   117  	if siz <= minDeferArgs {
   118  		return minDeferAlloc
   119  	}
   120  	return deferHeaderSize + siz
   121  }
   122  
   123  // Ensure that defer arg sizes that map to the same defer size class
   124  // also map to the same malloc size class.
   125  func testdefersizes() {
   126  	var m [len(p{}.deferpool)]int32
   127  
   128  	for i := range m {
   129  		m[i] = -1
   130  	}
   131  	for i := uintptr(0); ; i++ {
   132  		defersc := deferclass(i)
   133  		if defersc >= uintptr(len(m)) {
   134  			break
   135  		}
   136  		siz := roundupsize(totaldefersize(i))
   137  		if m[defersc] < 0 {
   138  			m[defersc] = int32(siz)
   139  			continue
   140  		}
   141  		if m[defersc] != int32(siz) {
   142  			print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
   143  			throw("bad defer size class")
   144  		}
   145  	}
   146  }
   147  
   148  // The arguments associated with a deferred call are stored
   149  // immediately after the _defer header in memory.
   150  //go:nosplit
   151  func deferArgs(d *_defer) unsafe.Pointer {
   152  	return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
   153  }
   154  
   155  var deferType *_type // type of _defer struct
   156  
   157  func init() {
   158  	var x interface{}
   159  	x = (*_defer)(nil)
   160  	deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
   161  }
   162  
   163  // Allocate a Defer, usually using per-P pool.
   164  // Each defer must be released with freedefer.
   165  // Note: runs on g0 stack
   166  func newdefer(siz int32) *_defer {
   167  	var d *_defer
   168  	sc := deferclass(uintptr(siz))
   169  	mp := acquirem()
   170  	if sc < uintptr(len(p{}.deferpool)) {
   171  		pp := mp.p.ptr()
   172  		if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
   173  			lock(&sched.deferlock)
   174  			for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
   175  				d := sched.deferpool[sc]
   176  				sched.deferpool[sc] = d.link
   177  				d.link = nil
   178  				pp.deferpool[sc] = append(pp.deferpool[sc], d)
   179  			}
   180  			unlock(&sched.deferlock)
   181  		}
   182  		if n := len(pp.deferpool[sc]); n > 0 {
   183  			d = pp.deferpool[sc][n-1]
   184  			pp.deferpool[sc][n-1] = nil
   185  			pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   186  		}
   187  	}
   188  	if d == nil {
   189  		// Allocate new defer+args.
   190  		total := roundupsize(totaldefersize(uintptr(siz)))
   191  		d = (*_defer)(mallocgc(total, deferType, 0))
   192  	}
   193  	d.siz = siz
   194  	gp := mp.curg
   195  	d.link = gp._defer
   196  	gp._defer = d
   197  	releasem(mp)
   198  	return d
   199  }
   200  
   201  // Free the given defer.
   202  // The defer cannot be used after this call.
   203  func freedefer(d *_defer) {
   204  	if d._panic != nil {
   205  		freedeferpanic()
   206  	}
   207  	if d.fn != nil {
   208  		freedeferfn()
   209  	}
   210  	sc := deferclass(uintptr(d.siz))
   211  	if sc < uintptr(len(p{}.deferpool)) {
   212  		mp := acquirem()
   213  		pp := mp.p.ptr()
   214  		if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
   215  			// Transfer half of local cache to the central cache.
   216  			var first, last *_defer
   217  			for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
   218  				n := len(pp.deferpool[sc])
   219  				d := pp.deferpool[sc][n-1]
   220  				pp.deferpool[sc][n-1] = nil
   221  				pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   222  				if first == nil {
   223  					first = d
   224  				} else {
   225  					last.link = d
   226  				}
   227  				last = d
   228  			}
   229  			lock(&sched.deferlock)
   230  			last.link = sched.deferpool[sc]
   231  			sched.deferpool[sc] = first
   232  			unlock(&sched.deferlock)
   233  		}
   234  		*d = _defer{}
   235  		pp.deferpool[sc] = append(pp.deferpool[sc], d)
   236  		releasem(mp)
   237  	}
   238  }
   239  
   240  // Separate function so that it can split stack.
   241  // Windows otherwise runs out of stack space.
   242  func freedeferpanic() {
   243  	// _panic must be cleared before d is unlinked from gp.
   244  	throw("freedefer with d._panic != nil")
   245  }
   246  
   247  func freedeferfn() {
   248  	// fn must be cleared before d is unlinked from gp.
   249  	throw("freedefer with d.fn != nil")
   250  }
   251  
   252  // Run a deferred function if there is one.
   253  // The compiler inserts a call to this at the end of any
   254  // function which calls defer.
   255  // If there is a deferred function, this will call runtime·jmpdefer,
   256  // which will jump to the deferred function such that it appears
   257  // to have been called by the caller of deferreturn at the point
   258  // just before deferreturn was called.  The effect is that deferreturn
   259  // is called again and again until there are no more deferred functions.
   260  // Cannot split the stack because we reuse the caller's frame to
   261  // call the deferred function.
   262  
   263  // The single argument isn't actually used - it just has its address
   264  // taken so it can be matched against pending defers.
   265  //go:nosplit
   266  func deferreturn(arg0 uintptr) {
   267  	gp := getg()
   268  	d := gp._defer
   269  	if d == nil {
   270  		return
   271  	}
   272  	sp := getcallersp(unsafe.Pointer(&arg0))
   273  	if d.sp != sp {
   274  		return
   275  	}
   276  
   277  	// Moving arguments around.
   278  	// Do not allow preemption here, because the garbage collector
   279  	// won't know the form of the arguments until the jmpdefer can
   280  	// flip the PC over to fn.
   281  	mp := acquirem()
   282  	memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
   283  	fn := d.fn
   284  	d.fn = nil
   285  	gp._defer = d.link
   286  	// Switch to systemstack merely to save nosplit stack space.
   287  	systemstack(func() {
   288  		freedefer(d)
   289  	})
   290  	releasem(mp)
   291  	jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
   292  }
   293  
   294  // Goexit terminates the goroutine that calls it.  No other goroutine is affected.
   295  // Goexit runs all deferred calls before terminating the goroutine.  Because Goexit
   296  // is not panic, however, any recover calls in those deferred functions will return nil.
   297  //
   298  // Calling Goexit from the main goroutine terminates that goroutine
   299  // without func main returning. Since func main has not returned,
   300  // the program continues execution of other goroutines.
   301  // If all other goroutines exit, the program crashes.
   302  func Goexit() {
   303  	// Run all deferred functions for the current goroutine.
   304  	// This code is similar to gopanic, see that implementation
   305  	// for detailed comments.
   306  	gp := getg()
   307  	for {
   308  		d := gp._defer
   309  		if d == nil {
   310  			break
   311  		}
   312  		if d.started {
   313  			if d._panic != nil {
   314  				d._panic.aborted = true
   315  				d._panic = nil
   316  			}
   317  			d.fn = nil
   318  			gp._defer = d.link
   319  			freedefer(d)
   320  			continue
   321  		}
   322  		d.started = true
   323  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   324  		if gp._defer != d {
   325  			throw("bad defer entry in Goexit")
   326  		}
   327  		d._panic = nil
   328  		d.fn = nil
   329  		gp._defer = d.link
   330  		freedefer(d)
   331  		// Note: we ignore recovers here because Goexit isn't a panic
   332  	}
   333  	goexit1()
   334  }
   335  
   336  // Print all currently active panics.  Used when crashing.
   337  func printpanics(p *_panic) {
   338  	if p.link != nil {
   339  		printpanics(p.link)
   340  		print("\t")
   341  	}
   342  	print("panic: ")
   343  	printany(p.arg)
   344  	if p.recovered {
   345  		print(" [recovered]")
   346  	}
   347  	print("\n")
   348  }
   349  
   350  // The implementation of the predeclared function panic.
   351  func gopanic(e interface{}) {
   352  	gp := getg()
   353  	if gp.m.curg != gp {
   354  		print("panic: ")
   355  		printany(e)
   356  		print("\n")
   357  		throw("panic on system stack")
   358  	}
   359  
   360  	// m.softfloat is set during software floating point.
   361  	// It increments m.locks to avoid preemption.
   362  	// We moved the memory loads out, so there shouldn't be
   363  	// any reason for it to panic anymore.
   364  	if gp.m.softfloat != 0 {
   365  		gp.m.locks--
   366  		gp.m.softfloat = 0
   367  		throw("panic during softfloat")
   368  	}
   369  	if gp.m.mallocing != 0 {
   370  		print("panic: ")
   371  		printany(e)
   372  		print("\n")
   373  		throw("panic during malloc")
   374  	}
   375  	if gp.m.preemptoff != "" {
   376  		print("panic: ")
   377  		printany(e)
   378  		print("\n")
   379  		print("preempt off reason: ")
   380  		print(gp.m.preemptoff)
   381  		print("\n")
   382  		throw("panic during preemptoff")
   383  	}
   384  	if gp.m.locks != 0 {
   385  		print("panic: ")
   386  		printany(e)
   387  		print("\n")
   388  		throw("panic holding locks")
   389  	}
   390  
   391  	var p _panic
   392  	p.arg = e
   393  	p.link = gp._panic
   394  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   395  
   396  	for {
   397  		d := gp._defer
   398  		if d == nil {
   399  			break
   400  		}
   401  
   402  		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   403  		// take defer off list. The earlier panic or Goexit will not continue running.
   404  		if d.started {
   405  			if d._panic != nil {
   406  				d._panic.aborted = true
   407  			}
   408  			d._panic = nil
   409  			d.fn = nil
   410  			gp._defer = d.link
   411  			freedefer(d)
   412  			continue
   413  		}
   414  
   415  		// Mark defer as started, but keep on list, so that traceback
   416  		// can find and update the defer's argument frame if stack growth
   417  		// or a garbage collection happens before reflectcall starts executing d.fn.
   418  		d.started = true
   419  
   420  		// Record the panic that is running the defer.
   421  		// If there is a new panic during the deferred call, that panic
   422  		// will find d in the list and will mark d._panic (this panic) aborted.
   423  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   424  
   425  		p.argp = unsafe.Pointer(getargp(0))
   426  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   427  		p.argp = nil
   428  
   429  		// reflectcall did not panic. Remove d.
   430  		if gp._defer != d {
   431  			throw("bad defer entry in panic")
   432  		}
   433  		d._panic = nil
   434  		d.fn = nil
   435  		gp._defer = d.link
   436  
   437  		// trigger shrinkage to test stack copy.  See stack_test.go:TestStackPanic
   438  		//GC()
   439  
   440  		pc := d.pc
   441  		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   442  		freedefer(d)
   443  		if p.recovered {
   444  			gp._panic = p.link
   445  			// Aborted panics are marked but remain on the g.panic list.
   446  			// Remove them from the list.
   447  			for gp._panic != nil && gp._panic.aborted {
   448  				gp._panic = gp._panic.link
   449  			}
   450  			if gp._panic == nil { // must be done with signal
   451  				gp.sig = 0
   452  			}
   453  			// Pass information about recovering frame to recovery.
   454  			gp.sigcode0 = uintptr(sp)
   455  			gp.sigcode1 = pc
   456  			mcall(recovery)
   457  			throw("recovery failed") // mcall should not return
   458  		}
   459  	}
   460  
   461  	// ran out of deferred calls - old-school panic now
   462  	startpanic()
   463  	printpanics(gp._panic)
   464  	dopanic(0)       // should not return
   465  	*(*int)(nil) = 0 // not reached
   466  }
   467  
   468  // getargp returns the location where the caller
   469  // writes outgoing function call arguments.
   470  //go:nosplit
   471  func getargp(x int) uintptr {
   472  	// x is an argument mainly so that we can return its address.
   473  	// However, we need to make the function complex enough
   474  	// that it won't be inlined. We always pass x = 0, so this code
   475  	// does nothing other than keep the compiler from thinking
   476  	// the function is simple enough to inline.
   477  	if x > 0 {
   478  		return getcallersp(unsafe.Pointer(&x)) * 0
   479  	}
   480  	return uintptr(noescape(unsafe.Pointer(&x)))
   481  }
   482  
   483  // The implementation of the predeclared function recover.
   484  // Cannot split the stack because it needs to reliably
   485  // find the stack segment of its caller.
   486  //
   487  // TODO(rsc): Once we commit to CopyStackAlways,
   488  // this doesn't need to be nosplit.
   489  //go:nosplit
   490  func gorecover(argp uintptr) interface{} {
   491  	// Must be in a function running as part of a deferred call during the panic.
   492  	// Must be called from the topmost function of the call
   493  	// (the function used in the defer statement).
   494  	// p.argp is the argument pointer of that topmost deferred function call.
   495  	// Compare against argp reported by caller.
   496  	// If they match, the caller is the one who can recover.
   497  	gp := getg()
   498  	p := gp._panic
   499  	if p != nil && !p.recovered && argp == uintptr(p.argp) {
   500  		p.recovered = true
   501  		return p.arg
   502  	}
   503  	return nil
   504  }
   505  
   506  //go:nosplit
   507  func startpanic() {
   508  	systemstack(startpanic_m)
   509  }
   510  
   511  //go:nosplit
   512  func dopanic(unused int) {
   513  	pc := getcallerpc(unsafe.Pointer(&unused))
   514  	sp := getcallersp(unsafe.Pointer(&unused))
   515  	gp := getg()
   516  	systemstack(func() {
   517  		dopanic_m(gp, pc, sp) // should never return
   518  	})
   519  	*(*int)(nil) = 0
   520  }
   521  
   522  //go:nosplit
   523  func throw(s string) {
   524  	print("fatal error: ", s, "\n")
   525  	gp := getg()
   526  	if gp.m.throwing == 0 {
   527  		gp.m.throwing = 1
   528  	}
   529  	startpanic()
   530  	dopanic(0)
   531  	*(*int)(nil) = 0 // not reached
   532  }
   533  
   534  //uint32 runtime·panicking;
   535  var paniclk mutex
   536  
   537  // Unwind the stack after a deferred function calls recover
   538  // after a panic.  Then arrange to continue running as though
   539  // the caller of the deferred function returned normally.
   540  func recovery(gp *g) {
   541  	// Info about defer passed in G struct.
   542  	sp := gp.sigcode0
   543  	pc := gp.sigcode1
   544  
   545  	// d's arguments need to be in the stack.
   546  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
   547  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
   548  		throw("bad recovery")
   549  	}
   550  
   551  	// Make the deferproc for this d return again,
   552  	// this time returning 1.  The calling function will
   553  	// jump to the standard return epilogue.
   554  	gcUnwindBarriers(gp, sp)
   555  	gp.sched.sp = sp
   556  	gp.sched.pc = pc
   557  	gp.sched.lr = 0
   558  	gp.sched.ret = 1
   559  	gogo(&gp.sched)
   560  }
   561  
   562  func startpanic_m() {
   563  	_g_ := getg()
   564  	if mheap_.cachealloc.size == 0 { // very early
   565  		print("runtime: panic before malloc heap initialized\n")
   566  		_g_.m.mallocing = 1 // tell rest of panic not to try to malloc
   567  	} else if _g_.m.mcache == nil { // can happen if called from signal handler or throw
   568  		_g_.m.mcache = allocmcache()
   569  	}
   570  
   571  	switch _g_.m.dying {
   572  	case 0:
   573  		_g_.m.dying = 1
   574  		_g_.writebuf = nil
   575  		atomic.Xadd(&panicking, 1)
   576  		lock(&paniclk)
   577  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
   578  			schedtrace(true)
   579  		}
   580  		freezetheworld()
   581  		return
   582  	case 1:
   583  		// Something failed while panicing, probably the print of the
   584  		// argument to panic().  Just print a stack trace and exit.
   585  		_g_.m.dying = 2
   586  		print("panic during panic\n")
   587  		dopanic(0)
   588  		exit(3)
   589  		fallthrough
   590  	case 2:
   591  		// This is a genuine bug in the runtime, we couldn't even
   592  		// print the stack trace successfully.
   593  		_g_.m.dying = 3
   594  		print("stack trace unavailable\n")
   595  		exit(4)
   596  		fallthrough
   597  	default:
   598  		// Can't even print!  Just exit.
   599  		exit(5)
   600  	}
   601  }
   602  
   603  var didothers bool
   604  var deadlock mutex
   605  
   606  func dopanic_m(gp *g, pc, sp uintptr) {
   607  	if gp.sig != 0 {
   608  		print("[signal ", hex(gp.sig), " code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
   609  	}
   610  
   611  	level, all, docrash := gotraceback()
   612  	_g_ := getg()
   613  	if level > 0 {
   614  		if gp != gp.m.curg {
   615  			all = true
   616  		}
   617  		if gp != gp.m.g0 {
   618  			print("\n")
   619  			goroutineheader(gp)
   620  			traceback(pc, sp, 0, gp)
   621  		} else if level >= 2 || _g_.m.throwing > 0 {
   622  			print("\nruntime stack:\n")
   623  			traceback(pc, sp, 0, gp)
   624  		}
   625  		if !didothers && all {
   626  			didothers = true
   627  			tracebackothers(gp)
   628  		}
   629  	}
   630  	unlock(&paniclk)
   631  
   632  	if atomic.Xadd(&panicking, -1) != 0 {
   633  		// Some other m is panicking too.
   634  		// Let it print what it needs to print.
   635  		// Wait forever without chewing up cpu.
   636  		// It will exit when it's done.
   637  		lock(&deadlock)
   638  		lock(&deadlock)
   639  	}
   640  
   641  	if docrash {
   642  		crash()
   643  	}
   644  
   645  	exit(2)
   646  }
   647  
   648  //go:nosplit
   649  func canpanic(gp *g) bool {
   650  	// Note that g is m->gsignal, different from gp.
   651  	// Note also that g->m can change at preemption, so m can go stale
   652  	// if this function ever makes a function call.
   653  	_g_ := getg()
   654  	_m_ := _g_.m
   655  
   656  	// Is it okay for gp to panic instead of crashing the program?
   657  	// Yes, as long as it is running Go code, not runtime code,
   658  	// and not stuck in a system call.
   659  	if gp == nil || gp != _m_.curg {
   660  		return false
   661  	}
   662  	if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
   663  		return false
   664  	}
   665  	status := readgstatus(gp)
   666  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
   667  		return false
   668  	}
   669  	if GOOS == "windows" && _m_.libcallsp != 0 {
   670  		return false
   671  	}
   672  	return true
   673  }