github.com/rsc/go@v0.0.0-20150416155037-e040fd465409/src/runtime/panic.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  var indexError = error(errorString("index out of range"))
    10  
    11  func panicindex() {
    12  	panic(indexError)
    13  }
    14  
    15  var sliceError = error(errorString("slice bounds out of range"))
    16  
    17  func panicslice() {
    18  	panic(sliceError)
    19  }
    20  
    21  var divideError = error(errorString("integer divide by zero"))
    22  
    23  func panicdivide() {
    24  	panic(divideError)
    25  }
    26  
    27  var overflowError = error(errorString("integer overflow"))
    28  
    29  func panicoverflow() {
    30  	panic(overflowError)
    31  }
    32  
    33  var floatError = error(errorString("floating point error"))
    34  
    35  func panicfloat() {
    36  	panic(floatError)
    37  }
    38  
    39  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
    40  
    41  func panicmem() {
    42  	panic(memoryError)
    43  }
    44  
    45  func throwreturn() {
    46  	throw("no return at end of a typed function - compiler is broken")
    47  }
    48  
    49  func throwinit() {
    50  	throw("recursive call during initialization - linker skew")
    51  }
    52  
    53  // Create a new deferred function fn with siz bytes of arguments.
    54  // The compiler turns a defer statement into a call to this.
    55  //go:nosplit
    56  func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
    57  	if getg().m.curg != getg() {
    58  		// go code on the system stack can't defer
    59  		throw("defer on system stack")
    60  	}
    61  
    62  	// the arguments of fn are in a perilous state.  The stack map
    63  	// for deferproc does not describe them.  So we can't let garbage
    64  	// collection or stack copying trigger until we've copied them out
    65  	// to somewhere safe.  The memmove below does that.
    66  	// Until the copy completes, we can only call nosplit routines.
    67  	sp := getcallersp(unsafe.Pointer(&siz))
    68  	argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
    69  	callerpc := getcallerpc(unsafe.Pointer(&siz))
    70  
    71  	systemstack(func() {
    72  		d := newdefer(siz)
    73  		if d._panic != nil {
    74  			throw("deferproc: d.panic != nil after newdefer")
    75  		}
    76  		d.fn = fn
    77  		d.pc = callerpc
    78  		d.sp = sp
    79  		memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz))
    80  	})
    81  
    82  	// deferproc returns 0 normally.
    83  	// a deferred func that stops a panic
    84  	// makes the deferproc return 1.
    85  	// the code the compiler generates always
    86  	// checks the return value and jumps to the
    87  	// end of the function if deferproc returns != 0.
    88  	return0()
    89  	// No code can go here - the C return register has
    90  	// been set and must not be clobbered.
    91  }
    92  
    93  // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
    94  // Each P holds a pool for defers with small arg sizes.
    95  // Assign defer allocations to pools by rounding to 16, to match malloc size classes.
    96  
    97  const (
    98  	deferHeaderSize = unsafe.Sizeof(_defer{})
    99  	minDeferAlloc   = (deferHeaderSize + 15) &^ 15
   100  	minDeferArgs    = minDeferAlloc - deferHeaderSize
   101  )
   102  
   103  // defer size class for arg size sz
   104  //go:nosplit
   105  func deferclass(siz uintptr) uintptr {
   106  	if siz <= minDeferArgs {
   107  		return 0
   108  	}
   109  	return (siz - minDeferArgs + 15) / 16
   110  }
   111  
   112  // total size of memory block for defer with arg size sz
   113  func totaldefersize(siz uintptr) uintptr {
   114  	if siz <= minDeferArgs {
   115  		return minDeferAlloc
   116  	}
   117  	return deferHeaderSize + siz
   118  }
   119  
   120  // Ensure that defer arg sizes that map to the same defer size class
   121  // also map to the same malloc size class.
   122  func testdefersizes() {
   123  	var m [len(p{}.deferpool)]int32
   124  
   125  	for i := range m {
   126  		m[i] = -1
   127  	}
   128  	for i := uintptr(0); ; i++ {
   129  		defersc := deferclass(i)
   130  		if defersc >= uintptr(len(m)) {
   131  			break
   132  		}
   133  		siz := roundupsize(totaldefersize(i))
   134  		if m[defersc] < 0 {
   135  			m[defersc] = int32(siz)
   136  			continue
   137  		}
   138  		if m[defersc] != int32(siz) {
   139  			print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
   140  			throw("bad defer size class")
   141  		}
   142  	}
   143  }
   144  
   145  // The arguments associated with a deferred call are stored
   146  // immediately after the _defer header in memory.
   147  //go:nosplit
   148  func deferArgs(d *_defer) unsafe.Pointer {
   149  	return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
   150  }
   151  
   152  var deferType *_type // type of _defer struct
   153  
   154  func init() {
   155  	var x interface{}
   156  	x = (*_defer)(nil)
   157  	deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
   158  }
   159  
   160  // Allocate a Defer, usually using per-P pool.
   161  // Each defer must be released with freedefer.
   162  // Note: runs on g0 stack
   163  func newdefer(siz int32) *_defer {
   164  	var d *_defer
   165  	sc := deferclass(uintptr(siz))
   166  	mp := acquirem()
   167  	if sc < uintptr(len(p{}.deferpool)) {
   168  		pp := mp.p
   169  		if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
   170  			lock(&sched.deferlock)
   171  			for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
   172  				d := sched.deferpool[sc]
   173  				sched.deferpool[sc] = d.link
   174  				d.link = nil
   175  				pp.deferpool[sc] = append(pp.deferpool[sc], d)
   176  			}
   177  			unlock(&sched.deferlock)
   178  		}
   179  		if n := len(pp.deferpool[sc]); n > 0 {
   180  			d = pp.deferpool[sc][n-1]
   181  			pp.deferpool[sc][n-1] = nil
   182  			pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   183  		}
   184  	}
   185  	if d == nil {
   186  		// Allocate new defer+args.
   187  		total := roundupsize(totaldefersize(uintptr(siz)))
   188  		d = (*_defer)(mallocgc(total, deferType, 0))
   189  	}
   190  	d.siz = siz
   191  	if mheap_.shadow_enabled {
   192  		// This memory will be written directly, with no write barrier,
   193  		// and then scanned like stacks during collection.
   194  		// Unlike real stacks, it is from heap spans, so mark the
   195  		// shadow as explicitly unusable.
   196  		p := deferArgs(d)
   197  		for i := uintptr(0); i+ptrSize <= uintptr(siz); i += ptrSize {
   198  			writebarrierptr_noshadow((*uintptr)(add(p, i)))
   199  		}
   200  	}
   201  	gp := mp.curg
   202  	d.link = gp._defer
   203  	gp._defer = d
   204  	releasem(mp)
   205  	return d
   206  }
   207  
   208  // Free the given defer.
   209  // The defer cannot be used after this call.
   210  func freedefer(d *_defer) {
   211  	if d._panic != nil {
   212  		freedeferpanic()
   213  	}
   214  	if d.fn != nil {
   215  		freedeferfn()
   216  	}
   217  	if mheap_.shadow_enabled {
   218  		// Undo the marking in newdefer.
   219  		systemstack(func() {
   220  			clearshadow(uintptr(deferArgs(d)), uintptr(d.siz))
   221  		})
   222  	}
   223  	sc := deferclass(uintptr(d.siz))
   224  	if sc < uintptr(len(p{}.deferpool)) {
   225  		mp := acquirem()
   226  		pp := mp.p
   227  		if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
   228  			// Transfer half of local cache to the central cache.
   229  			var first, last *_defer
   230  			for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
   231  				n := len(pp.deferpool[sc])
   232  				d := pp.deferpool[sc][n-1]
   233  				pp.deferpool[sc][n-1] = nil
   234  				pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   235  				if first == nil {
   236  					first = d
   237  				} else {
   238  					last.link = d
   239  				}
   240  				last = d
   241  			}
   242  			lock(&sched.deferlock)
   243  			last.link = sched.deferpool[sc]
   244  			sched.deferpool[sc] = first
   245  			unlock(&sched.deferlock)
   246  		}
   247  		*d = _defer{}
   248  		pp.deferpool[sc] = append(pp.deferpool[sc], d)
   249  		releasem(mp)
   250  	}
   251  }
   252  
   253  // Separate function so that it can split stack.
   254  // Windows otherwise runs out of stack space.
   255  func freedeferpanic() {
   256  	// _panic must be cleared before d is unlinked from gp.
   257  	throw("freedefer with d._panic != nil")
   258  }
   259  
   260  func freedeferfn() {
   261  	// fn must be cleared before d is unlinked from gp.
   262  	throw("freedefer with d.fn != nil")
   263  }
   264  
   265  // Run a deferred function if there is one.
   266  // The compiler inserts a call to this at the end of any
   267  // function which calls defer.
   268  // If there is a deferred function, this will call runtime·jmpdefer,
   269  // which will jump to the deferred function such that it appears
   270  // to have been called by the caller of deferreturn at the point
   271  // just before deferreturn was called.  The effect is that deferreturn
   272  // is called again and again until there are no more deferred functions.
   273  // Cannot split the stack because we reuse the caller's frame to
   274  // call the deferred function.
   275  
   276  // The single argument isn't actually used - it just has its address
   277  // taken so it can be matched against pending defers.
   278  //go:nosplit
   279  func deferreturn(arg0 uintptr) {
   280  	gp := getg()
   281  	d := gp._defer
   282  	if d == nil {
   283  		return
   284  	}
   285  	sp := getcallersp(unsafe.Pointer(&arg0))
   286  	if d.sp != sp {
   287  		return
   288  	}
   289  
   290  	// Moving arguments around.
   291  	// Do not allow preemption here, because the garbage collector
   292  	// won't know the form of the arguments until the jmpdefer can
   293  	// flip the PC over to fn.
   294  	mp := acquirem()
   295  	memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
   296  	fn := d.fn
   297  	d.fn = nil
   298  	gp._defer = d.link
   299  	// Switch to systemstack merely to save nosplit stack space.
   300  	systemstack(func() {
   301  		freedefer(d)
   302  	})
   303  	releasem(mp)
   304  	jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
   305  }
   306  
   307  // Goexit terminates the goroutine that calls it.  No other goroutine is affected.
   308  // Goexit runs all deferred calls before terminating the goroutine.  Because Goexit
   309  // is not panic, however, any recover calls in those deferred functions will return nil.
   310  //
   311  // Calling Goexit from the main goroutine terminates that goroutine
   312  // without func main returning. Since func main has not returned,
   313  // the program continues execution of other goroutines.
   314  // If all other goroutines exit, the program crashes.
   315  func Goexit() {
   316  	// Run all deferred functions for the current goroutine.
   317  	// This code is similar to gopanic, see that implementation
   318  	// for detailed comments.
   319  	gp := getg()
   320  	for {
   321  		d := gp._defer
   322  		if d == nil {
   323  			break
   324  		}
   325  		if d.started {
   326  			if d._panic != nil {
   327  				d._panic.aborted = true
   328  				d._panic = nil
   329  			}
   330  			d.fn = nil
   331  			gp._defer = d.link
   332  			freedefer(d)
   333  			continue
   334  		}
   335  		d.started = true
   336  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   337  		if gp._defer != d {
   338  			throw("bad defer entry in Goexit")
   339  		}
   340  		d._panic = nil
   341  		d.fn = nil
   342  		gp._defer = d.link
   343  		freedefer(d)
   344  		// Note: we ignore recovers here because Goexit isn't a panic
   345  	}
   346  	goexit()
   347  }
   348  
   349  // Print all currently active panics.  Used when crashing.
   350  func printpanics(p *_panic) {
   351  	if p.link != nil {
   352  		printpanics(p.link)
   353  		print("\t")
   354  	}
   355  	print("panic: ")
   356  	printany(p.arg)
   357  	if p.recovered {
   358  		print(" [recovered]")
   359  	}
   360  	print("\n")
   361  }
   362  
   363  // The implementation of the predeclared function panic.
   364  func gopanic(e interface{}) {
   365  	gp := getg()
   366  	if gp.m.curg != gp {
   367  		print("panic: ")
   368  		printany(e)
   369  		print("\n")
   370  		throw("panic on system stack")
   371  	}
   372  
   373  	// m.softfloat is set during software floating point.
   374  	// It increments m.locks to avoid preemption.
   375  	// We moved the memory loads out, so there shouldn't be
   376  	// any reason for it to panic anymore.
   377  	if gp.m.softfloat != 0 {
   378  		gp.m.locks--
   379  		gp.m.softfloat = 0
   380  		throw("panic during softfloat")
   381  	}
   382  	if gp.m.mallocing != 0 {
   383  		print("panic: ")
   384  		printany(e)
   385  		print("\n")
   386  		throw("panic during malloc")
   387  	}
   388  	if gp.m.preemptoff != "" {
   389  		print("panic: ")
   390  		printany(e)
   391  		print("\n")
   392  		print("preempt off reason: ")
   393  		print(gp.m.preemptoff)
   394  		print("\n")
   395  		throw("panic during preemptoff")
   396  	}
   397  	if gp.m.locks != 0 {
   398  		print("panic: ")
   399  		printany(e)
   400  		print("\n")
   401  		throw("panic holding locks")
   402  	}
   403  
   404  	var p _panic
   405  	p.arg = e
   406  	p.link = gp._panic
   407  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   408  
   409  	for {
   410  		d := gp._defer
   411  		if d == nil {
   412  			break
   413  		}
   414  
   415  		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   416  		// take defer off list. The earlier panic or Goexit will not continue running.
   417  		if d.started {
   418  			if d._panic != nil {
   419  				d._panic.aborted = true
   420  			}
   421  			d._panic = nil
   422  			d.fn = nil
   423  			gp._defer = d.link
   424  			freedefer(d)
   425  			continue
   426  		}
   427  
   428  		// Mark defer as started, but keep on list, so that traceback
   429  		// can find and update the defer's argument frame if stack growth
   430  		// or a garbage collection hapens before reflectcall starts executing d.fn.
   431  		d.started = true
   432  
   433  		// Record the panic that is running the defer.
   434  		// If there is a new panic during the deferred call, that panic
   435  		// will find d in the list and will mark d._panic (this panic) aborted.
   436  		d._panic = (*_panic)(noescape((unsafe.Pointer)(&p)))
   437  
   438  		p.argp = unsafe.Pointer(getargp(0))
   439  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   440  		p.argp = nil
   441  
   442  		// reflectcall did not panic. Remove d.
   443  		if gp._defer != d {
   444  			throw("bad defer entry in panic")
   445  		}
   446  		d._panic = nil
   447  		d.fn = nil
   448  		gp._defer = d.link
   449  
   450  		// trigger shrinkage to test stack copy.  See stack_test.go:TestStackPanic
   451  		//GC()
   452  
   453  		pc := d.pc
   454  		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   455  		freedefer(d)
   456  		if p.recovered {
   457  			gp._panic = p.link
   458  			// Aborted panics are marked but remain on the g.panic list.
   459  			// Remove them from the list.
   460  			for gp._panic != nil && gp._panic.aborted {
   461  				gp._panic = gp._panic.link
   462  			}
   463  			if gp._panic == nil { // must be done with signal
   464  				gp.sig = 0
   465  			}
   466  			// Pass information about recovering frame to recovery.
   467  			gp.sigcode0 = uintptr(sp)
   468  			gp.sigcode1 = pc
   469  			mcall(recovery)
   470  			throw("recovery failed") // mcall should not return
   471  		}
   472  	}
   473  
   474  	// ran out of deferred calls - old-school panic now
   475  	startpanic()
   476  	printpanics(gp._panic)
   477  	dopanic(0)       // should not return
   478  	*(*int)(nil) = 0 // not reached
   479  }
   480  
   481  // getargp returns the location where the caller
   482  // writes outgoing function call arguments.
   483  //go:nosplit
   484  func getargp(x int) uintptr {
   485  	// x is an argument mainly so that we can return its address.
   486  	// However, we need to make the function complex enough
   487  	// that it won't be inlined. We always pass x = 0, so this code
   488  	// does nothing other than keep the compiler from thinking
   489  	// the function is simple enough to inline.
   490  	if x > 0 {
   491  		return getcallersp(unsafe.Pointer(&x)) * 0
   492  	}
   493  	return uintptr(noescape(unsafe.Pointer(&x)))
   494  }
   495  
   496  // The implementation of the predeclared function recover.
   497  // Cannot split the stack because it needs to reliably
   498  // find the stack segment of its caller.
   499  //
   500  // TODO(rsc): Once we commit to CopyStackAlways,
   501  // this doesn't need to be nosplit.
   502  //go:nosplit
   503  func gorecover(argp uintptr) interface{} {
   504  	// Must be in a function running as part of a deferred call during the panic.
   505  	// Must be called from the topmost function of the call
   506  	// (the function used in the defer statement).
   507  	// p.argp is the argument pointer of that topmost deferred function call.
   508  	// Compare against argp reported by caller.
   509  	// If they match, the caller is the one who can recover.
   510  	gp := getg()
   511  	p := gp._panic
   512  	if p != nil && !p.recovered && argp == uintptr(p.argp) {
   513  		p.recovered = true
   514  		return p.arg
   515  	}
   516  	return nil
   517  }
   518  
   519  //go:nosplit
   520  func startpanic() {
   521  	systemstack(startpanic_m)
   522  }
   523  
   524  //go:nosplit
   525  func dopanic(unused int) {
   526  	pc := getcallerpc(unsafe.Pointer(&unused))
   527  	sp := getcallersp(unsafe.Pointer(&unused))
   528  	gp := getg()
   529  	systemstack(func() {
   530  		dopanic_m(gp, pc, sp) // should never return
   531  	})
   532  	*(*int)(nil) = 0
   533  }
   534  
   535  //go:nosplit
   536  func throw(s string) {
   537  	print("fatal error: ", s, "\n")
   538  	gp := getg()
   539  	if gp.m.throwing == 0 {
   540  		gp.m.throwing = 1
   541  	}
   542  	startpanic()
   543  	dopanic(0)
   544  	*(*int)(nil) = 0 // not reached
   545  }