github.com/euank/go@v0.0.0-20160829210321-495514729181/src/runtime/panic.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"unsafe"
    10  )
    11  
    12  // Calling panic with one of the errors below will call errorString.Error
    13  // which will call mallocgc to concatenate strings. That will fail if
    14  // malloc is locked, causing a confusing error message. Throw a better
    15  // error message instead.
    16  func panicCheckMalloc(err error) {
    17  	gp := getg()
    18  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    19  		throw(string(err.(errorString)))
    20  	}
    21  }
    22  
    23  var indexError = error(errorString("index out of range"))
    24  
    25  func panicindex() {
    26  	panicCheckMalloc(indexError)
    27  	panic(indexError)
    28  }
    29  
    30  var sliceError = error(errorString("slice bounds out of range"))
    31  
    32  func panicslice() {
    33  	panicCheckMalloc(sliceError)
    34  	panic(sliceError)
    35  }
    36  
    37  var divideError = error(errorString("integer divide by zero"))
    38  
    39  func panicdivide() {
    40  	panicCheckMalloc(divideError)
    41  	panic(divideError)
    42  }
    43  
    44  var overflowError = error(errorString("integer overflow"))
    45  
    46  func panicoverflow() {
    47  	panicCheckMalloc(overflowError)
    48  	panic(overflowError)
    49  }
    50  
    51  var floatError = error(errorString("floating point error"))
    52  
    53  func panicfloat() {
    54  	panicCheckMalloc(floatError)
    55  	panic(floatError)
    56  }
    57  
    58  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
    59  
    60  func panicmem() {
    61  	panicCheckMalloc(memoryError)
    62  	panic(memoryError)
    63  }
    64  
    65  func throwreturn() {
    66  	throw("no return at end of a typed function - compiler is broken")
    67  }
    68  
    69  func throwinit() {
    70  	throw("recursive call during initialization - linker skew")
    71  }
    72  
    73  // Create a new deferred function fn with siz bytes of arguments.
    74  // The compiler turns a defer statement into a call to this.
    75  //go:nosplit
    76  func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
    77  	if getg().m.curg != getg() {
    78  		// go code on the system stack can't defer
    79  		throw("defer on system stack")
    80  	}
    81  
    82  	// the arguments of fn are in a perilous state. The stack map
    83  	// for deferproc does not describe them. So we can't let garbage
    84  	// collection or stack copying trigger until we've copied them out
    85  	// to somewhere safe. The memmove below does that.
    86  	// Until the copy completes, we can only call nosplit routines.
    87  	sp := getcallersp(unsafe.Pointer(&siz))
    88  	argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
    89  	callerpc := getcallerpc(unsafe.Pointer(&siz))
    90  
    91  	systemstack(func() {
    92  		d := newdefer(siz)
    93  		if d._panic != nil {
    94  			throw("deferproc: d.panic != nil after newdefer")
    95  		}
    96  		d.fn = fn
    97  		d.pc = callerpc
    98  		d.sp = sp
    99  		memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz))
   100  	})
   101  
   102  	// deferproc returns 0 normally.
   103  	// a deferred func that stops a panic
   104  	// makes the deferproc return 1.
   105  	// the code the compiler generates always
   106  	// checks the return value and jumps to the
   107  	// end of the function if deferproc returns != 0.
   108  	return0()
   109  	// No code can go here - the C return register has
   110  	// been set and must not be clobbered.
   111  }
   112  
   113  // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
   114  // Each P holds a pool for defers with small arg sizes.
   115  // Assign defer allocations to pools by rounding to 16, to match malloc size classes.
   116  
   117  const (
   118  	deferHeaderSize = unsafe.Sizeof(_defer{})
   119  	minDeferAlloc   = (deferHeaderSize + 15) &^ 15
   120  	minDeferArgs    = minDeferAlloc - deferHeaderSize
   121  )
   122  
   123  // defer size class for arg size sz
   124  //go:nosplit
   125  func deferclass(siz uintptr) uintptr {
   126  	if siz <= minDeferArgs {
   127  		return 0
   128  	}
   129  	return (siz - minDeferArgs + 15) / 16
   130  }
   131  
   132  // total size of memory block for defer with arg size sz
   133  func totaldefersize(siz uintptr) uintptr {
   134  	if siz <= minDeferArgs {
   135  		return minDeferAlloc
   136  	}
   137  	return deferHeaderSize + siz
   138  }
   139  
   140  // Ensure that defer arg sizes that map to the same defer size class
   141  // also map to the same malloc size class.
   142  func testdefersizes() {
   143  	var m [len(p{}.deferpool)]int32
   144  
   145  	for i := range m {
   146  		m[i] = -1
   147  	}
   148  	for i := uintptr(0); ; i++ {
   149  		defersc := deferclass(i)
   150  		if defersc >= uintptr(len(m)) {
   151  			break
   152  		}
   153  		siz := roundupsize(totaldefersize(i))
   154  		if m[defersc] < 0 {
   155  			m[defersc] = int32(siz)
   156  			continue
   157  		}
   158  		if m[defersc] != int32(siz) {
   159  			print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
   160  			throw("bad defer size class")
   161  		}
   162  	}
   163  }
   164  
   165  // The arguments associated with a deferred call are stored
   166  // immediately after the _defer header in memory.
   167  //go:nosplit
   168  func deferArgs(d *_defer) unsafe.Pointer {
   169  	return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
   170  }
   171  
   172  var deferType *_type // type of _defer struct
   173  
   174  func init() {
   175  	var x interface{}
   176  	x = (*_defer)(nil)
   177  	deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
   178  }
   179  
   180  // Allocate a Defer, usually using per-P pool.
   181  // Each defer must be released with freedefer.
   182  // Note: runs on g0 stack
   183  func newdefer(siz int32) *_defer {
   184  	var d *_defer
   185  	sc := deferclass(uintptr(siz))
   186  	mp := acquirem()
   187  	if sc < uintptr(len(p{}.deferpool)) {
   188  		pp := mp.p.ptr()
   189  		if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
   190  			lock(&sched.deferlock)
   191  			for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
   192  				d := sched.deferpool[sc]
   193  				sched.deferpool[sc] = d.link
   194  				d.link = nil
   195  				pp.deferpool[sc] = append(pp.deferpool[sc], d)
   196  			}
   197  			unlock(&sched.deferlock)
   198  		}
   199  		if n := len(pp.deferpool[sc]); n > 0 {
   200  			d = pp.deferpool[sc][n-1]
   201  			pp.deferpool[sc][n-1] = nil
   202  			pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   203  		}
   204  	}
   205  	if d == nil {
   206  		// Allocate new defer+args.
   207  		total := roundupsize(totaldefersize(uintptr(siz)))
   208  		d = (*_defer)(mallocgc(total, deferType, true))
   209  	}
   210  	d.siz = siz
   211  	gp := mp.curg
   212  	d.link = gp._defer
   213  	gp._defer = d
   214  	releasem(mp)
   215  	return d
   216  }
   217  
   218  // Free the given defer.
   219  // The defer cannot be used after this call.
   220  func freedefer(d *_defer) {
   221  	if d._panic != nil {
   222  		freedeferpanic()
   223  	}
   224  	if d.fn != nil {
   225  		freedeferfn()
   226  	}
   227  	sc := deferclass(uintptr(d.siz))
   228  	if sc < uintptr(len(p{}.deferpool)) {
   229  		mp := acquirem()
   230  		pp := mp.p.ptr()
   231  		if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
   232  			// Transfer half of local cache to the central cache.
   233  			var first, last *_defer
   234  			for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
   235  				n := len(pp.deferpool[sc])
   236  				d := pp.deferpool[sc][n-1]
   237  				pp.deferpool[sc][n-1] = nil
   238  				pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   239  				if first == nil {
   240  					first = d
   241  				} else {
   242  					last.link = d
   243  				}
   244  				last = d
   245  			}
   246  			lock(&sched.deferlock)
   247  			last.link = sched.deferpool[sc]
   248  			sched.deferpool[sc] = first
   249  			unlock(&sched.deferlock)
   250  		}
   251  		*d = _defer{}
   252  		pp.deferpool[sc] = append(pp.deferpool[sc], d)
   253  		releasem(mp)
   254  	}
   255  }
   256  
   257  // Separate function so that it can split stack.
   258  // Windows otherwise runs out of stack space.
   259  func freedeferpanic() {
   260  	// _panic must be cleared before d is unlinked from gp.
   261  	throw("freedefer with d._panic != nil")
   262  }
   263  
   264  func freedeferfn() {
   265  	// fn must be cleared before d is unlinked from gp.
   266  	throw("freedefer with d.fn != nil")
   267  }
   268  
   269  // Run a deferred function if there is one.
   270  // The compiler inserts a call to this at the end of any
   271  // function which calls defer.
   272  // If there is a deferred function, this will call runtime·jmpdefer,
   273  // which will jump to the deferred function such that it appears
   274  // to have been called by the caller of deferreturn at the point
   275  // just before deferreturn was called. The effect is that deferreturn
   276  // is called again and again until there are no more deferred functions.
   277  // Cannot split the stack because we reuse the caller's frame to
   278  // call the deferred function.
   279  
   280  // The single argument isn't actually used - it just has its address
   281  // taken so it can be matched against pending defers.
   282  //go:nosplit
   283  func deferreturn(arg0 uintptr) {
   284  	gp := getg()
   285  	d := gp._defer
   286  	if d == nil {
   287  		return
   288  	}
   289  	sp := getcallersp(unsafe.Pointer(&arg0))
   290  	if d.sp != sp {
   291  		return
   292  	}
   293  
   294  	// Moving arguments around.
   295  	// Do not allow preemption here, because the garbage collector
   296  	// won't know the form of the arguments until the jmpdefer can
   297  	// flip the PC over to fn.
   298  	mp := acquirem()
   299  	memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
   300  	fn := d.fn
   301  	d.fn = nil
   302  	gp._defer = d.link
   303  	// Switch to systemstack merely to save nosplit stack space.
   304  	systemstack(func() {
   305  		freedefer(d)
   306  	})
   307  	releasem(mp)
   308  	jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
   309  }
   310  
   311  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   312  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   313  // is not panic, however, any recover calls in those deferred functions will return nil.
   314  //
   315  // Calling Goexit from the main goroutine terminates that goroutine
   316  // without func main returning. Since func main has not returned,
   317  // the program continues execution of other goroutines.
   318  // If all other goroutines exit, the program crashes.
   319  func Goexit() {
   320  	// Run all deferred functions for the current goroutine.
   321  	// This code is similar to gopanic, see that implementation
   322  	// for detailed comments.
   323  	gp := getg()
   324  	for {
   325  		d := gp._defer
   326  		if d == nil {
   327  			break
   328  		}
   329  		if d.started {
   330  			if d._panic != nil {
   331  				d._panic.aborted = true
   332  				d._panic = nil
   333  			}
   334  			d.fn = nil
   335  			gp._defer = d.link
   336  			freedefer(d)
   337  			continue
   338  		}
   339  		d.started = true
   340  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   341  		if gp._defer != d {
   342  			throw("bad defer entry in Goexit")
   343  		}
   344  		d._panic = nil
   345  		d.fn = nil
   346  		gp._defer = d.link
   347  		freedefer(d)
   348  		// Note: we ignore recovers here because Goexit isn't a panic
   349  	}
   350  	goexit1()
   351  }
   352  
   353  // Call all Error and String methods before freezing the world.
   354  // Used when crashing with panicking.
   355  // This must match types handled by printany.
   356  func preprintpanics(p *_panic) {
   357  	for p != nil {
   358  		switch v := p.arg.(type) {
   359  		case error:
   360  			p.arg = v.Error()
   361  		case stringer:
   362  			p.arg = v.String()
   363  		}
   364  		p = p.link
   365  	}
   366  }
   367  
   368  // Print all currently active panics. Used when crashing.
   369  func printpanics(p *_panic) {
   370  	if p.link != nil {
   371  		printpanics(p.link)
   372  		print("\t")
   373  	}
   374  	print("panic: ")
   375  	printany(p.arg)
   376  	if p.recovered {
   377  		print(" [recovered]")
   378  	}
   379  	print("\n")
   380  }
   381  
   382  // The implementation of the predeclared function panic.
   383  func gopanic(e interface{}) {
   384  	gp := getg()
   385  	if gp.m.curg != gp {
   386  		print("panic: ")
   387  		printany(e)
   388  		print("\n")
   389  		throw("panic on system stack")
   390  	}
   391  
   392  	// m.softfloat is set during software floating point.
   393  	// It increments m.locks to avoid preemption.
   394  	// We moved the memory loads out, so there shouldn't be
   395  	// any reason for it to panic anymore.
   396  	if gp.m.softfloat != 0 {
   397  		gp.m.locks--
   398  		gp.m.softfloat = 0
   399  		throw("panic during softfloat")
   400  	}
   401  	if gp.m.mallocing != 0 {
   402  		print("panic: ")
   403  		printany(e)
   404  		print("\n")
   405  		throw("panic during malloc")
   406  	}
   407  	if gp.m.preemptoff != "" {
   408  		print("panic: ")
   409  		printany(e)
   410  		print("\n")
   411  		print("preempt off reason: ")
   412  		print(gp.m.preemptoff)
   413  		print("\n")
   414  		throw("panic during preemptoff")
   415  	}
   416  	if gp.m.locks != 0 {
   417  		print("panic: ")
   418  		printany(e)
   419  		print("\n")
   420  		throw("panic holding locks")
   421  	}
   422  
   423  	var p _panic
   424  	p.arg = e
   425  	p.link = gp._panic
   426  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   427  
   428  	for {
   429  		d := gp._defer
   430  		if d == nil {
   431  			break
   432  		}
   433  
   434  		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   435  		// take defer off list. The earlier panic or Goexit will not continue running.
   436  		if d.started {
   437  			if d._panic != nil {
   438  				d._panic.aborted = true
   439  			}
   440  			d._panic = nil
   441  			d.fn = nil
   442  			gp._defer = d.link
   443  			freedefer(d)
   444  			continue
   445  		}
   446  
   447  		// Mark defer as started, but keep on list, so that traceback
   448  		// can find and update the defer's argument frame if stack growth
   449  		// or a garbage collection happens before reflectcall starts executing d.fn.
   450  		d.started = true
   451  
   452  		// Record the panic that is running the defer.
   453  		// If there is a new panic during the deferred call, that panic
   454  		// will find d in the list and will mark d._panic (this panic) aborted.
   455  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   456  
   457  		p.argp = unsafe.Pointer(getargp(0))
   458  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   459  		p.argp = nil
   460  
   461  		// reflectcall did not panic. Remove d.
   462  		if gp._defer != d {
   463  			throw("bad defer entry in panic")
   464  		}
   465  		d._panic = nil
   466  		d.fn = nil
   467  		gp._defer = d.link
   468  
   469  		// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
   470  		//GC()
   471  
   472  		pc := d.pc
   473  		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   474  		freedefer(d)
   475  		if p.recovered {
   476  			gp._panic = p.link
   477  			// Aborted panics are marked but remain on the g.panic list.
   478  			// Remove them from the list.
   479  			for gp._panic != nil && gp._panic.aborted {
   480  				gp._panic = gp._panic.link
   481  			}
   482  			if gp._panic == nil { // must be done with signal
   483  				gp.sig = 0
   484  			}
   485  			// Pass information about recovering frame to recovery.
   486  			gp.sigcode0 = uintptr(sp)
   487  			gp.sigcode1 = pc
   488  			mcall(recovery)
   489  			throw("recovery failed") // mcall should not return
   490  		}
   491  	}
   492  
   493  	// ran out of deferred calls - old-school panic now
   494  	// Because it is unsafe to call arbitrary user code after freezing
   495  	// the world, we call preprintpanics to invoke all necessary Error
   496  	// and String methods to prepare the panic strings before startpanic.
   497  	preprintpanics(gp._panic)
   498  	startpanic()
   499  	printpanics(gp._panic)
   500  	dopanic(0)       // should not return
   501  	*(*int)(nil) = 0 // not reached
   502  }
   503  
   504  // getargp returns the location where the caller
   505  // writes outgoing function call arguments.
   506  //go:nosplit
   507  func getargp(x int) uintptr {
   508  	// x is an argument mainly so that we can return its address.
   509  	// However, we need to make the function complex enough
   510  	// that it won't be inlined. We always pass x = 0, so this code
   511  	// does nothing other than keep the compiler from thinking
   512  	// the function is simple enough to inline.
   513  	if x > 0 {
   514  		return getcallersp(unsafe.Pointer(&x)) * 0
   515  	}
   516  	return uintptr(noescape(unsafe.Pointer(&x)))
   517  }
   518  
   519  // The implementation of the predeclared function recover.
   520  // Cannot split the stack because it needs to reliably
   521  // find the stack segment of its caller.
   522  //
   523  // TODO(rsc): Once we commit to CopyStackAlways,
   524  // this doesn't need to be nosplit.
   525  //go:nosplit
   526  func gorecover(argp uintptr) interface{} {
   527  	// Must be in a function running as part of a deferred call during the panic.
   528  	// Must be called from the topmost function of the call
   529  	// (the function used in the defer statement).
   530  	// p.argp is the argument pointer of that topmost deferred function call.
   531  	// Compare against argp reported by caller.
   532  	// If they match, the caller is the one who can recover.
   533  	gp := getg()
   534  	p := gp._panic
   535  	if p != nil && !p.recovered && argp == uintptr(p.argp) {
   536  		p.recovered = true
   537  		return p.arg
   538  	}
   539  	return nil
   540  }
   541  
   542  //go:nosplit
   543  func startpanic() {
   544  	systemstack(startpanic_m)
   545  }
   546  
   547  //go:nosplit
   548  func dopanic(unused int) {
   549  	pc := getcallerpc(unsafe.Pointer(&unused))
   550  	sp := getcallersp(unsafe.Pointer(&unused))
   551  	gp := getg()
   552  	systemstack(func() {
   553  		dopanic_m(gp, pc, sp) // should never return
   554  	})
   555  	*(*int)(nil) = 0
   556  }
   557  
   558  //go:nosplit
   559  func throw(s string) {
   560  	print("fatal error: ", s, "\n")
   561  	gp := getg()
   562  	if gp.m.throwing == 0 {
   563  		gp.m.throwing = 1
   564  	}
   565  	startpanic()
   566  	dopanic(0)
   567  	*(*int)(nil) = 0 // not reached
   568  }
   569  
   570  //uint32 runtime·panicking;
   571  var paniclk mutex
   572  
   573  // Unwind the stack after a deferred function calls recover
   574  // after a panic. Then arrange to continue running as though
   575  // the caller of the deferred function returned normally.
   576  func recovery(gp *g) {
   577  	// Info about defer passed in G struct.
   578  	sp := gp.sigcode0
   579  	pc := gp.sigcode1
   580  
   581  	// d's arguments need to be in the stack.
   582  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
   583  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
   584  		throw("bad recovery")
   585  	}
   586  
   587  	// Make the deferproc for this d return again,
   588  	// this time returning 1.  The calling function will
   589  	// jump to the standard return epilogue.
   590  	gcUnwindBarriers(gp, sp)
   591  	gp.sched.sp = sp
   592  	gp.sched.pc = pc
   593  	gp.sched.lr = 0
   594  	gp.sched.ret = 1
   595  	gogo(&gp.sched)
   596  }
   597  
   598  func startpanic_m() {
   599  	_g_ := getg()
   600  	if mheap_.cachealloc.size == 0 { // very early
   601  		print("runtime: panic before malloc heap initialized\n")
   602  		_g_.m.mallocing = 1 // tell rest of panic not to try to malloc
   603  	} else if _g_.m.mcache == nil { // can happen if called from signal handler or throw
   604  		_g_.m.mcache = allocmcache()
   605  	}
   606  
   607  	switch _g_.m.dying {
   608  	case 0:
   609  		_g_.m.dying = 1
   610  		_g_.writebuf = nil
   611  		atomic.Xadd(&panicking, 1)
   612  		lock(&paniclk)
   613  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
   614  			schedtrace(true)
   615  		}
   616  		freezetheworld()
   617  		return
   618  	case 1:
   619  		// Something failed while panicing, probably the print of the
   620  		// argument to panic().  Just print a stack trace and exit.
   621  		_g_.m.dying = 2
   622  		print("panic during panic\n")
   623  		dopanic(0)
   624  		exit(3)
   625  		fallthrough
   626  	case 2:
   627  		// This is a genuine bug in the runtime, we couldn't even
   628  		// print the stack trace successfully.
   629  		_g_.m.dying = 3
   630  		print("stack trace unavailable\n")
   631  		exit(4)
   632  		fallthrough
   633  	default:
   634  		// Can't even print!  Just exit.
   635  		exit(5)
   636  	}
   637  }
   638  
   639  var didothers bool
   640  var deadlock mutex
   641  
   642  func dopanic_m(gp *g, pc, sp uintptr) {
   643  	if gp.sig != 0 {
   644  		signame := signame(gp.sig)
   645  		if signame != "" {
   646  			print("[signal ", signame)
   647  		} else {
   648  			print("[signal ", hex(gp.sig))
   649  		}
   650  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
   651  	}
   652  
   653  	level, all, docrash := gotraceback()
   654  	_g_ := getg()
   655  	if level > 0 {
   656  		if gp != gp.m.curg {
   657  			all = true
   658  		}
   659  		if gp != gp.m.g0 {
   660  			print("\n")
   661  			goroutineheader(gp)
   662  			traceback(pc, sp, 0, gp)
   663  		} else if level >= 2 || _g_.m.throwing > 0 {
   664  			print("\nruntime stack:\n")
   665  			traceback(pc, sp, 0, gp)
   666  		}
   667  		if !didothers && all {
   668  			didothers = true
   669  			tracebackothers(gp)
   670  		}
   671  	}
   672  	unlock(&paniclk)
   673  
   674  	if atomic.Xadd(&panicking, -1) != 0 {
   675  		// Some other m is panicking too.
   676  		// Let it print what it needs to print.
   677  		// Wait forever without chewing up cpu.
   678  		// It will exit when it's done.
   679  		lock(&deadlock)
   680  		lock(&deadlock)
   681  	}
   682  
   683  	if docrash {
   684  		crash()
   685  	}
   686  
   687  	exit(2)
   688  }
   689  
   690  //go:nosplit
   691  func canpanic(gp *g) bool {
   692  	// Note that g is m->gsignal, different from gp.
   693  	// Note also that g->m can change at preemption, so m can go stale
   694  	// if this function ever makes a function call.
   695  	_g_ := getg()
   696  	_m_ := _g_.m
   697  
   698  	// Is it okay for gp to panic instead of crashing the program?
   699  	// Yes, as long as it is running Go code, not runtime code,
   700  	// and not stuck in a system call.
   701  	if gp == nil || gp != _m_.curg {
   702  		return false
   703  	}
   704  	if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
   705  		return false
   706  	}
   707  	status := readgstatus(gp)
   708  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
   709  		return false
   710  	}
   711  	if GOOS == "windows" && _m_.libcallsp != 0 {
   712  		return false
   713  	}
   714  	return true
   715  }