github.com/4ad/go@v0.0.0-20161219182952-69a12818b605/src/runtime/panic.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Calling panic with one of the errors below will call errorString.Error
    14  // which will call mallocgc to concatenate strings. That will fail if
    15  // malloc is locked, causing a confusing error message. Throw a better
    16  // error message instead.
    17  func panicCheckMalloc(err error) {
    18  	gp := getg()
    19  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    20  		throw(string(err.(errorString)))
    21  	}
    22  }
    23  
    24  var indexError = error(errorString("index out of range"))
    25  
    26  func panicindex() {
    27  	panicCheckMalloc(indexError)
    28  	panic(indexError)
    29  }
    30  
    31  var sliceError = error(errorString("slice bounds out of range"))
    32  
    33  func panicslice() {
    34  	panicCheckMalloc(sliceError)
    35  	panic(sliceError)
    36  }
    37  
    38  var divideError = error(errorString("integer divide by zero"))
    39  
    40  func panicdivide() {
    41  	panicCheckMalloc(divideError)
    42  	panic(divideError)
    43  }
    44  
    45  var overflowError = error(errorString("integer overflow"))
    46  
    47  func panicoverflow() {
    48  	panicCheckMalloc(overflowError)
    49  	panic(overflowError)
    50  }
    51  
    52  var floatError = error(errorString("floating point error"))
    53  
    54  func panicfloat() {
    55  	panicCheckMalloc(floatError)
    56  	panic(floatError)
    57  }
    58  
    59  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
    60  
    61  func panicmem() {
    62  	panicCheckMalloc(memoryError)
    63  	panic(memoryError)
    64  }
    65  
    66  func throwreturn() {
    67  	throw("no return at end of a typed function - compiler is broken")
    68  }
    69  
    70  func throwinit() {
    71  	throw("recursive call during initialization - linker skew")
    72  }
    73  
    74  // Create a new deferred function fn with siz bytes of arguments.
    75  // The compiler turns a defer statement into a call to this.
    76  //go:nosplit
    77  func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
    78  	if getg().m.curg != getg() {
    79  		// go code on the system stack can't defer
    80  		throw("defer on system stack")
    81  	}
    82  
    83  	// the arguments of fn are in a perilous state. The stack map
    84  	// for deferproc does not describe them. So we can't let garbage
    85  	// collection or stack copying trigger until we've copied them out
    86  	// to somewhere safe. The memmove below does that.
    87  	// Until the copy completes, we can only call nosplit routines.
    88  	sp := getcallersp(unsafe.Pointer(&siz))
    89  	argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
    90  	callerpc := getcallerpc(unsafe.Pointer(&siz))
    91  
    92  	systemstack(func() {
    93  		d := newdefer(siz)
    94  		if d._panic != nil {
    95  			throw("deferproc: d.panic != nil after newdefer")
    96  		}
    97  		d.fn = fn
    98  		d.pc = callerpc
    99  		if sys.GoarchSparc64 == 1 {
   100  			// on SPARC64 the link register contains the address of the
   101  			// 4-byte CALL instruction, which is always follwed by a
   102  			// 4-byteNOP.
   103  			d.pc += 8
   104  		}
   105  		d.sp = sp
   106  		memmove(add(unsafe.Pointer(d), unsafe.Sizeof(*d)), unsafe.Pointer(argp), uintptr(siz))
   107  	})
   108  
   109  	// deferproc returns 0 normally.
   110  	// a deferred func that stops a panic
   111  	// makes the deferproc return 1.
   112  	// the code the compiler generates always
   113  	// checks the return value and jumps to the
   114  	// end of the function if deferproc returns != 0.
   115  	return0()
   116  	// No code can go here - the C return register has
   117  	// been set and must not be clobbered.
   118  }
   119  
   120  // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
   121  // Each P holds a pool for defers with small arg sizes.
   122  // Assign defer allocations to pools by rounding to 16, to match malloc size classes.
   123  
   124  const (
   125  	deferHeaderSize = unsafe.Sizeof(_defer{})
   126  	minDeferAlloc   = (deferHeaderSize + 15) &^ 15
   127  	minDeferArgs    = minDeferAlloc - deferHeaderSize
   128  )
   129  
   130  // defer size class for arg size sz
   131  //go:nosplit
   132  func deferclass(siz uintptr) uintptr {
   133  	if siz <= minDeferArgs {
   134  		return 0
   135  	}
   136  	return (siz - minDeferArgs + 15) / 16
   137  }
   138  
   139  // total size of memory block for defer with arg size sz
   140  func totaldefersize(siz uintptr) uintptr {
   141  	if siz <= minDeferArgs {
   142  		return minDeferAlloc
   143  	}
   144  	return deferHeaderSize + siz
   145  }
   146  
   147  // Ensure that defer arg sizes that map to the same defer size class
   148  // also map to the same malloc size class.
   149  func testdefersizes() {
   150  	var m [len(p{}.deferpool)]int32
   151  
   152  	for i := range m {
   153  		m[i] = -1
   154  	}
   155  	for i := uintptr(0); ; i++ {
   156  		defersc := deferclass(i)
   157  		if defersc >= uintptr(len(m)) {
   158  			break
   159  		}
   160  		siz := roundupsize(totaldefersize(i))
   161  		if m[defersc] < 0 {
   162  			m[defersc] = int32(siz)
   163  			continue
   164  		}
   165  		if m[defersc] != int32(siz) {
   166  			print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
   167  			throw("bad defer size class")
   168  		}
   169  	}
   170  }
   171  
   172  // The arguments associated with a deferred call are stored
   173  // immediately after the _defer header in memory.
   174  //go:nosplit
   175  func deferArgs(d *_defer) unsafe.Pointer {
   176  	return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
   177  }
   178  
   179  var deferType *_type // type of _defer struct
   180  
   181  func init() {
   182  	var x interface{}
   183  	x = (*_defer)(nil)
   184  	deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
   185  }
   186  
   187  // Allocate a Defer, usually using per-P pool.
   188  // Each defer must be released with freedefer.
   189  // Note: runs on g0 stack
   190  func newdefer(siz int32) *_defer {
   191  	var d *_defer
   192  	sc := deferclass(uintptr(siz))
   193  	mp := acquirem()
   194  	if sc < uintptr(len(p{}.deferpool)) {
   195  		pp := mp.p.ptr()
   196  		if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
   197  			lock(&sched.deferlock)
   198  			for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
   199  				d := sched.deferpool[sc]
   200  				sched.deferpool[sc] = d.link
   201  				d.link = nil
   202  				pp.deferpool[sc] = append(pp.deferpool[sc], d)
   203  			}
   204  			unlock(&sched.deferlock)
   205  		}
   206  		if n := len(pp.deferpool[sc]); n > 0 {
   207  			d = pp.deferpool[sc][n-1]
   208  			pp.deferpool[sc][n-1] = nil
   209  			pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   210  		}
   211  	}
   212  	if d == nil {
   213  		// Allocate new defer+args.
   214  		total := roundupsize(totaldefersize(uintptr(siz)))
   215  		d = (*_defer)(mallocgc(total, deferType, true))
   216  	}
   217  	d.siz = siz
   218  	gp := mp.curg
   219  	d.link = gp._defer
   220  	gp._defer = d
   221  	releasem(mp)
   222  	return d
   223  }
   224  
   225  // Free the given defer.
   226  // The defer cannot be used after this call.
   227  func freedefer(d *_defer) {
   228  	if d._panic != nil {
   229  		freedeferpanic()
   230  	}
   231  	if d.fn != nil {
   232  		freedeferfn()
   233  	}
   234  	sc := deferclass(uintptr(d.siz))
   235  	if sc < uintptr(len(p{}.deferpool)) {
   236  		mp := acquirem()
   237  		pp := mp.p.ptr()
   238  		if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
   239  			// Transfer half of local cache to the central cache.
   240  			var first, last *_defer
   241  			for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
   242  				n := len(pp.deferpool[sc])
   243  				d := pp.deferpool[sc][n-1]
   244  				pp.deferpool[sc][n-1] = nil
   245  				pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   246  				if first == nil {
   247  					first = d
   248  				} else {
   249  					last.link = d
   250  				}
   251  				last = d
   252  			}
   253  			lock(&sched.deferlock)
   254  			last.link = sched.deferpool[sc]
   255  			sched.deferpool[sc] = first
   256  			unlock(&sched.deferlock)
   257  		}
   258  		*d = _defer{}
   259  		pp.deferpool[sc] = append(pp.deferpool[sc], d)
   260  		releasem(mp)
   261  	}
   262  }
   263  
   264  // Separate function so that it can split stack.
   265  // Windows otherwise runs out of stack space.
   266  func freedeferpanic() {
   267  	// _panic must be cleared before d is unlinked from gp.
   268  	throw("freedefer with d._panic != nil")
   269  }
   270  
   271  func freedeferfn() {
   272  	// fn must be cleared before d is unlinked from gp.
   273  	throw("freedefer with d.fn != nil")
   274  }
   275  
   276  // Run a deferred function if there is one.
   277  // The compiler inserts a call to this at the end of any
   278  // function which calls defer.
   279  // If there is a deferred function, this will call runtime·jmpdefer,
   280  // which will jump to the deferred function such that it appears
   281  // to have been called by the caller of deferreturn at the point
   282  // just before deferreturn was called. The effect is that deferreturn
   283  // is called again and again until there are no more deferred functions.
   284  // Cannot split the stack because we reuse the caller's frame to
   285  // call the deferred function.
   286  
   287  // The single argument isn't actually used - it just has its address
   288  // taken so it can be matched against pending defers.
   289  //go:nosplit
   290  func deferreturn(arg0 uintptr) {
   291  	gp := getg()
   292  	d := gp._defer
   293  	if d == nil {
   294  		return
   295  	}
   296  	sp := getcallersp(unsafe.Pointer(&arg0))
   297  	if d.sp != sp {
   298  		return
   299  	}
   300  
   301  	// Moving arguments around.
   302  	// Do not allow preemption here, because the garbage collector
   303  	// won't know the form of the arguments until the jmpdefer can
   304  	// flip the PC over to fn.
   305  	mp := acquirem()
   306  	memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
   307  	fn := d.fn
   308  	d.fn = nil
   309  	gp._defer = d.link
   310  	// Switch to systemstack merely to save nosplit stack space.
   311  	systemstack(func() {
   312  		freedefer(d)
   313  	})
   314  	releasem(mp)
   315  	jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
   316  }
   317  
   318  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   319  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   320  // is not panic, however, any recover calls in those deferred functions will return nil.
   321  //
   322  // Calling Goexit from the main goroutine terminates that goroutine
   323  // without func main returning. Since func main has not returned,
   324  // the program continues execution of other goroutines.
   325  // If all other goroutines exit, the program crashes.
   326  func Goexit() {
   327  	// Run all deferred functions for the current goroutine.
   328  	// This code is similar to gopanic, see that implementation
   329  	// for detailed comments.
   330  	gp := getg()
   331  	for {
   332  		d := gp._defer
   333  		if d == nil {
   334  			break
   335  		}
   336  		if d.started {
   337  			if d._panic != nil {
   338  				d._panic.aborted = true
   339  				d._panic = nil
   340  			}
   341  			d.fn = nil
   342  			gp._defer = d.link
   343  			freedefer(d)
   344  			continue
   345  		}
   346  		d.started = true
   347  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   348  		if gp._defer != d {
   349  			throw("bad defer entry in Goexit")
   350  		}
   351  		d._panic = nil
   352  		d.fn = nil
   353  		gp._defer = d.link
   354  		freedefer(d)
   355  		// Note: we ignore recovers here because Goexit isn't a panic
   356  	}
   357  	goexit1()
   358  }
   359  
   360  // Call all Error and String methods before freezing the world.
   361  // Used when crashing with panicking.
   362  // This must match types handled by printany.
   363  func preprintpanics(p *_panic) {
   364  	for p != nil {
   365  		switch v := p.arg.(type) {
   366  		case error:
   367  			p.arg = v.Error()
   368  		case stringer:
   369  			p.arg = v.String()
   370  		}
   371  		p = p.link
   372  	}
   373  }
   374  
   375  // Print all currently active panics. Used when crashing.
   376  func printpanics(p *_panic) {
   377  	if p.link != nil {
   378  		printpanics(p.link)
   379  		print("\t")
   380  	}
   381  	print("panic: ")
   382  	printany(p.arg)
   383  	if p.recovered {
   384  		print(" [recovered]")
   385  	}
   386  	print("\n")
   387  }
   388  
   389  // The implementation of the predeclared function panic.
   390  func gopanic(e interface{}) {
   391  	gp := getg()
   392  	if gp.m.curg != gp {
   393  		print("panic: ")
   394  		printany(e)
   395  		print("\n")
   396  		throw("panic on system stack")
   397  	}
   398  
   399  	// m.softfloat is set during software floating point.
   400  	// It increments m.locks to avoid preemption.
   401  	// We moved the memory loads out, so there shouldn't be
   402  	// any reason for it to panic anymore.
   403  	if gp.m.softfloat != 0 {
   404  		gp.m.locks--
   405  		gp.m.softfloat = 0
   406  		throw("panic during softfloat")
   407  	}
   408  	if gp.m.mallocing != 0 {
   409  		print("panic: ")
   410  		printany(e)
   411  		print("\n")
   412  		throw("panic during malloc")
   413  	}
   414  	if gp.m.preemptoff != "" {
   415  		print("panic: ")
   416  		printany(e)
   417  		print("\n")
   418  		print("preempt off reason: ")
   419  		print(gp.m.preemptoff)
   420  		print("\n")
   421  		throw("panic during preemptoff")
   422  	}
   423  	if gp.m.locks != 0 {
   424  		print("panic: ")
   425  		printany(e)
   426  		print("\n")
   427  		throw("panic holding locks")
   428  	}
   429  
   430  	var p _panic
   431  	p.arg = e
   432  	p.link = gp._panic
   433  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   434  
   435  	for {
   436  		d := gp._defer
   437  		if d == nil {
   438  			break
   439  		}
   440  
   441  		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   442  		// take defer off list. The earlier panic or Goexit will not continue running.
   443  		if d.started {
   444  			if d._panic != nil {
   445  				d._panic.aborted = true
   446  			}
   447  			d._panic = nil
   448  			d.fn = nil
   449  			gp._defer = d.link
   450  			freedefer(d)
   451  			continue
   452  		}
   453  
   454  		// Mark defer as started, but keep on list, so that traceback
   455  		// can find and update the defer's argument frame if stack growth
   456  		// or a garbage collection happens before reflectcall starts executing d.fn.
   457  		d.started = true
   458  
   459  		// Record the panic that is running the defer.
   460  		// If there is a new panic during the deferred call, that panic
   461  		// will find d in the list and will mark d._panic (this panic) aborted.
   462  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   463  
   464  		p.argp = unsafe.Pointer(getargp(0))
   465  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   466  		p.argp = nil
   467  
   468  		// reflectcall did not panic. Remove d.
   469  		if gp._defer != d {
   470  			throw("bad defer entry in panic")
   471  		}
   472  		d._panic = nil
   473  		d.fn = nil
   474  		gp._defer = d.link
   475  
   476  		// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
   477  		//GC()
   478  
   479  		pc := d.pc
   480  		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   481  		freedefer(d)
   482  		if p.recovered {
   483  			gp._panic = p.link
   484  			// Aborted panics are marked but remain on the g.panic list.
   485  			// Remove them from the list.
   486  			for gp._panic != nil && gp._panic.aborted {
   487  				gp._panic = gp._panic.link
   488  			}
   489  			if gp._panic == nil { // must be done with signal
   490  				gp.sig = 0
   491  			}
   492  			// Pass information about recovering frame to recovery.
   493  			gp.sigcode0 = uintptr(sp)
   494  			gp.sigcode1 = pc
   495  			mcall(recovery)
   496  			throw("recovery failed") // mcall should not return
   497  		}
   498  	}
   499  
   500  	// ran out of deferred calls - old-school panic now
   501  	// Because it is unsafe to call arbitrary user code after freezing
   502  	// the world, we call preprintpanics to invoke all necessary Error
   503  	// and String methods to prepare the panic strings before startpanic.
   504  	preprintpanics(gp._panic)
   505  	startpanic()
   506  	printpanics(gp._panic)
   507  	dopanic(0)       // should not return
   508  	*(*int)(nil) = 0 // not reached
   509  }
   510  
   511  // getargp returns the location where the caller
   512  // writes outgoing function call arguments.
   513  //go:nosplit
   514  func getargp(x int) uintptr {
   515  	// x is an argument mainly so that we can return its address.
   516  	// However, we need to make the function complex enough
   517  	// that it won't be inlined. We always pass x = 0, so this code
   518  	// does nothing other than keep the compiler from thinking
   519  	// the function is simple enough to inline.
   520  	if x > 0 {
   521  		return getcallersp(unsafe.Pointer(&x)) * 0
   522  	}
   523  	return uintptr(noescape(unsafe.Pointer(&x)))
   524  }
   525  
   526  // The implementation of the predeclared function recover.
   527  // Cannot split the stack because it needs to reliably
   528  // find the stack segment of its caller.
   529  //
   530  // TODO(rsc): Once we commit to CopyStackAlways,
   531  // this doesn't need to be nosplit.
   532  //go:nosplit
   533  func gorecover(argp uintptr) interface{} {
   534  	// Must be in a function running as part of a deferred call during the panic.
   535  	// Must be called from the topmost function of the call
   536  	// (the function used in the defer statement).
   537  	// p.argp is the argument pointer of that topmost deferred function call.
   538  	// Compare against argp reported by caller.
   539  	// If they match, the caller is the one who can recover.
   540  	gp := getg()
   541  	p := gp._panic
   542  	if p != nil && !p.recovered && argp == uintptr(p.argp) {
   543  		p.recovered = true
   544  		return p.arg
   545  	}
   546  	return nil
   547  }
   548  
   549  //go:nosplit
   550  func startpanic() {
   551  	systemstack(startpanic_m)
   552  }
   553  
   554  //go:nosplit
   555  func dopanic(unused int) {
   556  	pc := getcallerpc(unsafe.Pointer(&unused))
   557  	sp := getcallersp(unsafe.Pointer(&unused))
   558  	gp := getg()
   559  	systemstack(func() {
   560  		dopanic_m(gp, pc, sp) // should never return
   561  	})
   562  	*(*int)(nil) = 0
   563  }
   564  
   565  //go:nosplit
   566  func throw(s string) {
   567  	print("fatal error: ", s, "\n")
   568  	gp := getg()
   569  	if gp.m.throwing == 0 {
   570  		gp.m.throwing = 1
   571  	}
   572  	startpanic()
   573  	dopanic(0)
   574  	*(*int)(nil) = 0 // not reached
   575  }
   576  
   577  //uint32 runtime·panicking;
   578  var paniclk mutex
   579  
   580  // Unwind the stack after a deferred function calls recover
   581  // after a panic. Then arrange to continue running as though
   582  // the caller of the deferred function returned normally.
   583  func recovery(gp *g) {
   584  	// Info about defer passed in G struct.
   585  	sp := gp.sigcode0
   586  	pc := gp.sigcode1
   587  
   588  	// d's arguments need to be in the stack.
   589  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
   590  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
   591  		throw("bad recovery")
   592  	}
   593  
   594  	// Make the deferproc for this d return again,
   595  	// this time returning 1.  The calling function will
   596  	// jump to the standard return epilogue.
   597  	gcUnwindBarriers(gp, sp)
   598  	gp.sched.sp = sp
   599  	gp.sched.pc = pc
   600  	if sys.GoarchSparc64 == 1 {
   601  		// Function prolog saves the FP here; sp already has
   602  		// bias applied, so we use it directly. The recovered
   603  		// fp doesn't have the bias applied. Since gogo expects
   604  		// biased input, we add the stack bias to the loaded fp.
   605  		gp.sched.bp = *((*uintptr)(unsafe.Pointer(sp+112))) + sys.StackBias
   606  	}
   607  	gp.sched.lr = 0
   608  	gp.sched.ret = 1
   609  	gogo(&gp.sched)
   610  }
   611  
   612  func startpanic_m() {
   613  	_g_ := getg()
   614  	if mheap_.cachealloc.size == 0 { // very early
   615  		print("runtime: panic before malloc heap initialized\n")
   616  		_g_.m.mallocing = 1 // tell rest of panic not to try to malloc
   617  	} else if _g_.m.mcache == nil { // can happen if called from signal handler or throw
   618  		_g_.m.mcache = allocmcache()
   619  	}
   620  
   621  	switch _g_.m.dying {
   622  	case 0:
   623  		_g_.m.dying = 1
   624  		_g_.writebuf = nil
   625  		atomic.Xadd(&panicking, 1)
   626  		lock(&paniclk)
   627  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
   628  			schedtrace(true)
   629  		}
   630  		freezetheworld()
   631  		return
   632  	case 1:
   633  		// Something failed while panicing, probably the print of the
   634  		// argument to panic().  Just print a stack trace and exit.
   635  		_g_.m.dying = 2
   636  		print("panic during panic\n")
   637  		dopanic(0)
   638  		exit(3)
   639  		fallthrough
   640  	case 2:
   641  		// This is a genuine bug in the runtime, we couldn't even
   642  		// print the stack trace successfully.
   643  		_g_.m.dying = 3
   644  		print("stack trace unavailable\n")
   645  		exit(4)
   646  		fallthrough
   647  	default:
   648  		// Can't even print!  Just exit.
   649  		exit(5)
   650  	}
   651  }
   652  
   653  var didothers bool
   654  var deadlock mutex
   655  
   656  func dopanic_m(gp *g, pc, sp uintptr) {
   657  	if gp.sig != 0 {
   658  		signame := signame(gp.sig)
   659  		if signame != "" {
   660  			print("[signal ", signame)
   661  		} else {
   662  			print("[signal ", hex(gp.sig))
   663  		}
   664  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
   665  	}
   666  
   667  	level, all, docrash := gotraceback()
   668  	_g_ := getg()
   669  	if level > 0 {
   670  		if gp != gp.m.curg {
   671  			all = true
   672  		}
   673  		if gp != gp.m.g0 {
   674  			print("\n")
   675  			goroutineheader(gp)
   676  			traceback(pc, sp, 0, gp)
   677  		} else if level >= 2 || _g_.m.throwing > 0 {
   678  			print("\nruntime stack:\n")
   679  			traceback(pc, sp, 0, gp)
   680  		}
   681  		if !didothers && all {
   682  			didothers = true
   683  			tracebackothers(gp)
   684  		}
   685  	}
   686  	unlock(&paniclk)
   687  
   688  	if atomic.Xadd(&panicking, -1) != 0 {
   689  		// Some other m is panicking too.
   690  		// Let it print what it needs to print.
   691  		// Wait forever without chewing up cpu.
   692  		// It will exit when it's done.
   693  		lock(&deadlock)
   694  		lock(&deadlock)
   695  	}
   696  
   697  	if docrash {
   698  		crash()
   699  	}
   700  
   701  	exit(2)
   702  }
   703  
   704  //go:nosplit
   705  func canpanic(gp *g) bool {
   706  	// Note that g is m->gsignal, different from gp.
   707  	// Note also that g->m can change at preemption, so m can go stale
   708  	// if this function ever makes a function call.
   709  	_g_ := getg()
   710  	_m_ := _g_.m
   711  
   712  	// Is it okay for gp to panic instead of crashing the program?
   713  	// Yes, as long as it is running Go code, not runtime code,
   714  	// and not stuck in a system call.
   715  	if gp == nil || gp != _m_.curg {
   716  		return false
   717  	}
   718  	if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
   719  		return false
   720  	}
   721  	status := readgstatus(gp)
   722  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
   723  		return false
   724  	}
   725  	if GOOS == "windows" && _m_.libcallsp != 0 {
   726  		return false
   727  	}
   728  	return true
   729  }