github.com/filosottile/go@v0.0.0-20170906193555-dbed9972d994/src/runtime/panic.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Calling panic with one of the errors below will call errorString.Error
    14  // which will call mallocgc to concatenate strings. That will fail if
    15  // malloc is locked, causing a confusing error message. Throw a better
    16  // error message instead.
    17  func panicCheckMalloc(err error) {
    18  	gp := getg()
    19  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    20  		throw(string(err.(errorString)))
    21  	}
    22  }
    23  
    24  var indexError = error(errorString("index out of range"))
    25  
    26  func panicindex() {
    27  	panicCheckMalloc(indexError)
    28  	panic(indexError)
    29  }
    30  
    31  var sliceError = error(errorString("slice bounds out of range"))
    32  
    33  func panicslice() {
    34  	panicCheckMalloc(sliceError)
    35  	panic(sliceError)
    36  }
    37  
    38  var divideError = error(errorString("integer divide by zero"))
    39  
    40  func panicdivide() {
    41  	panicCheckMalloc(divideError)
    42  	panic(divideError)
    43  }
    44  
    45  var overflowError = error(errorString("integer overflow"))
    46  
    47  func panicoverflow() {
    48  	panicCheckMalloc(overflowError)
    49  	panic(overflowError)
    50  }
    51  
    52  var floatError = error(errorString("floating point error"))
    53  
    54  func panicfloat() {
    55  	panicCheckMalloc(floatError)
    56  	panic(floatError)
    57  }
    58  
    59  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
    60  
    61  func panicmem() {
    62  	panicCheckMalloc(memoryError)
    63  	panic(memoryError)
    64  }
    65  
    66  func throwinit() {
    67  	throw("recursive call during initialization - linker skew")
    68  }
    69  
    70  // Create a new deferred function fn with siz bytes of arguments.
    71  // The compiler turns a defer statement into a call to this.
    72  //go:nosplit
    73  func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
    74  	if getg().m.curg != getg() {
    75  		// go code on the system stack can't defer
    76  		throw("defer on system stack")
    77  	}
    78  
    79  	// the arguments of fn are in a perilous state. The stack map
    80  	// for deferproc does not describe them. So we can't let garbage
    81  	// collection or stack copying trigger until we've copied them out
    82  	// to somewhere safe. The memmove below does that.
    83  	// Until the copy completes, we can only call nosplit routines.
    84  	sp := getcallersp(unsafe.Pointer(&siz))
    85  	argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
    86  	callerpc := getcallerpc(unsafe.Pointer(&siz))
    87  
    88  	d := newdefer(siz)
    89  	if d._panic != nil {
    90  		throw("deferproc: d.panic != nil after newdefer")
    91  	}
    92  	d.fn = fn
    93  	d.pc = callerpc
    94  	d.sp = sp
    95  	switch siz {
    96  	case 0:
    97  		// Do nothing.
    98  	case sys.PtrSize:
    99  		*(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp))
   100  	default:
   101  		memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz))
   102  	}
   103  
   104  	// deferproc returns 0 normally.
   105  	// a deferred func that stops a panic
   106  	// makes the deferproc return 1.
   107  	// the code the compiler generates always
   108  	// checks the return value and jumps to the
   109  	// end of the function if deferproc returns != 0.
   110  	return0()
   111  	// No code can go here - the C return register has
   112  	// been set and must not be clobbered.
   113  }
   114  
   115  // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
   116  // Each P holds a pool for defers with small arg sizes.
   117  // Assign defer allocations to pools by rounding to 16, to match malloc size classes.
   118  
   119  const (
   120  	deferHeaderSize = unsafe.Sizeof(_defer{})
   121  	minDeferAlloc   = (deferHeaderSize + 15) &^ 15
   122  	minDeferArgs    = minDeferAlloc - deferHeaderSize
   123  )
   124  
   125  // defer size class for arg size sz
   126  //go:nosplit
   127  func deferclass(siz uintptr) uintptr {
   128  	if siz <= minDeferArgs {
   129  		return 0
   130  	}
   131  	return (siz - minDeferArgs + 15) / 16
   132  }
   133  
   134  // total size of memory block for defer with arg size sz
   135  func totaldefersize(siz uintptr) uintptr {
   136  	if siz <= minDeferArgs {
   137  		return minDeferAlloc
   138  	}
   139  	return deferHeaderSize + siz
   140  }
   141  
   142  // Ensure that defer arg sizes that map to the same defer size class
   143  // also map to the same malloc size class.
   144  func testdefersizes() {
   145  	var m [len(p{}.deferpool)]int32
   146  
   147  	for i := range m {
   148  		m[i] = -1
   149  	}
   150  	for i := uintptr(0); ; i++ {
   151  		defersc := deferclass(i)
   152  		if defersc >= uintptr(len(m)) {
   153  			break
   154  		}
   155  		siz := roundupsize(totaldefersize(i))
   156  		if m[defersc] < 0 {
   157  			m[defersc] = int32(siz)
   158  			continue
   159  		}
   160  		if m[defersc] != int32(siz) {
   161  			print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
   162  			throw("bad defer size class")
   163  		}
   164  	}
   165  }
   166  
   167  // The arguments associated with a deferred call are stored
   168  // immediately after the _defer header in memory.
   169  //go:nosplit
   170  func deferArgs(d *_defer) unsafe.Pointer {
   171  	if d.siz == 0 {
   172  		// Avoid pointer past the defer allocation.
   173  		return nil
   174  	}
   175  	return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
   176  }
   177  
   178  var deferType *_type // type of _defer struct
   179  
   180  func init() {
   181  	var x interface{}
   182  	x = (*_defer)(nil)
   183  	deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
   184  }
   185  
   186  // Allocate a Defer, usually using per-P pool.
   187  // Each defer must be released with freedefer.
   188  //
   189  // This must not grow the stack because there may be a frame without
   190  // stack map information when this is called.
   191  //
   192  //go:nosplit
   193  func newdefer(siz int32) *_defer {
   194  	var d *_defer
   195  	sc := deferclass(uintptr(siz))
   196  	gp := getg()
   197  	if sc < uintptr(len(p{}.deferpool)) {
   198  		pp := gp.m.p.ptr()
   199  		if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
   200  			// Take the slow path on the system stack so
   201  			// we don't grow newdefer's stack.
   202  			systemstack(func() {
   203  				lock(&sched.deferlock)
   204  				for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
   205  					d := sched.deferpool[sc]
   206  					sched.deferpool[sc] = d.link
   207  					d.link = nil
   208  					pp.deferpool[sc] = append(pp.deferpool[sc], d)
   209  				}
   210  				unlock(&sched.deferlock)
   211  			})
   212  		}
   213  		if n := len(pp.deferpool[sc]); n > 0 {
   214  			d = pp.deferpool[sc][n-1]
   215  			pp.deferpool[sc][n-1] = nil
   216  			pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   217  		}
   218  	}
   219  	if d == nil {
   220  		// Allocate new defer+args.
   221  		systemstack(func() {
   222  			total := roundupsize(totaldefersize(uintptr(siz)))
   223  			d = (*_defer)(mallocgc(total, deferType, true))
   224  		})
   225  	}
   226  	d.siz = siz
   227  	d.link = gp._defer
   228  	gp._defer = d
   229  	return d
   230  }
   231  
   232  // Free the given defer.
   233  // The defer cannot be used after this call.
   234  //
   235  // This must not grow the stack because there may be a frame without a
   236  // stack map when this is called.
   237  //
   238  //go:nosplit
   239  func freedefer(d *_defer) {
   240  	if d._panic != nil {
   241  		freedeferpanic()
   242  	}
   243  	if d.fn != nil {
   244  		freedeferfn()
   245  	}
   246  	sc := deferclass(uintptr(d.siz))
   247  	if sc >= uintptr(len(p{}.deferpool)) {
   248  		return
   249  	}
   250  	pp := getg().m.p.ptr()
   251  	if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
   252  		// Transfer half of local cache to the central cache.
   253  		//
   254  		// Take this slow path on the system stack so
   255  		// we don't grow freedefer's stack.
   256  		systemstack(func() {
   257  			var first, last *_defer
   258  			for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
   259  				n := len(pp.deferpool[sc])
   260  				d := pp.deferpool[sc][n-1]
   261  				pp.deferpool[sc][n-1] = nil
   262  				pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   263  				if first == nil {
   264  					first = d
   265  				} else {
   266  					last.link = d
   267  				}
   268  				last = d
   269  			}
   270  			lock(&sched.deferlock)
   271  			last.link = sched.deferpool[sc]
   272  			sched.deferpool[sc] = first
   273  			unlock(&sched.deferlock)
   274  		})
   275  	}
   276  	*d = _defer{}
   277  	pp.deferpool[sc] = append(pp.deferpool[sc], d)
   278  }
   279  
   280  // Separate function so that it can split stack.
   281  // Windows otherwise runs out of stack space.
   282  func freedeferpanic() {
   283  	// _panic must be cleared before d is unlinked from gp.
   284  	throw("freedefer with d._panic != nil")
   285  }
   286  
   287  func freedeferfn() {
   288  	// fn must be cleared before d is unlinked from gp.
   289  	throw("freedefer with d.fn != nil")
   290  }
   291  
   292  // Run a deferred function if there is one.
   293  // The compiler inserts a call to this at the end of any
   294  // function which calls defer.
   295  // If there is a deferred function, this will call runtime·jmpdefer,
   296  // which will jump to the deferred function such that it appears
   297  // to have been called by the caller of deferreturn at the point
   298  // just before deferreturn was called. The effect is that deferreturn
   299  // is called again and again until there are no more deferred functions.
   300  // Cannot split the stack because we reuse the caller's frame to
   301  // call the deferred function.
   302  
   303  // The single argument isn't actually used - it just has its address
   304  // taken so it can be matched against pending defers.
   305  //go:nosplit
   306  func deferreturn(arg0 uintptr) {
   307  	gp := getg()
   308  	d := gp._defer
   309  	if d == nil {
   310  		return
   311  	}
   312  	sp := getcallersp(unsafe.Pointer(&arg0))
   313  	if d.sp != sp {
   314  		return
   315  	}
   316  
   317  	// Moving arguments around.
   318  	//
   319  	// Everything called after this point must be recursively
   320  	// nosplit because the garbage collector won't know the form
   321  	// of the arguments until the jmpdefer can flip the PC over to
   322  	// fn.
   323  	switch d.siz {
   324  	case 0:
   325  		// Do nothing.
   326  	case sys.PtrSize:
   327  		*(*uintptr)(unsafe.Pointer(&arg0)) = *(*uintptr)(deferArgs(d))
   328  	default:
   329  		memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
   330  	}
   331  	fn := d.fn
   332  	d.fn = nil
   333  	gp._defer = d.link
   334  	freedefer(d)
   335  	jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
   336  }
   337  
   338  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   339  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   340  // is not panic, however, any recover calls in those deferred functions will return nil.
   341  //
   342  // Calling Goexit from the main goroutine terminates that goroutine
   343  // without func main returning. Since func main has not returned,
   344  // the program continues execution of other goroutines.
   345  // If all other goroutines exit, the program crashes.
   346  func Goexit() {
   347  	// Run all deferred functions for the current goroutine.
   348  	// This code is similar to gopanic, see that implementation
   349  	// for detailed comments.
   350  	gp := getg()
   351  	for {
   352  		d := gp._defer
   353  		if d == nil {
   354  			break
   355  		}
   356  		if d.started {
   357  			if d._panic != nil {
   358  				d._panic.aborted = true
   359  				d._panic = nil
   360  			}
   361  			d.fn = nil
   362  			gp._defer = d.link
   363  			freedefer(d)
   364  			continue
   365  		}
   366  		d.started = true
   367  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   368  		if gp._defer != d {
   369  			throw("bad defer entry in Goexit")
   370  		}
   371  		d._panic = nil
   372  		d.fn = nil
   373  		gp._defer = d.link
   374  		freedefer(d)
   375  		// Note: we ignore recovers here because Goexit isn't a panic
   376  	}
   377  	goexit1()
   378  }
   379  
   380  // Call all Error and String methods before freezing the world.
   381  // Used when crashing with panicking.
   382  // This must match types handled by printany.
   383  func preprintpanics(p *_panic) {
   384  	defer func() {
   385  		if recover() != nil {
   386  			throw("panic while printing panic value")
   387  		}
   388  	}()
   389  	for p != nil {
   390  		switch v := p.arg.(type) {
   391  		case error:
   392  			p.arg = v.Error()
   393  		case stringer:
   394  			p.arg = v.String()
   395  		}
   396  		p = p.link
   397  	}
   398  }
   399  
   400  // Print all currently active panics. Used when crashing.
   401  func printpanics(p *_panic) {
   402  	if p.link != nil {
   403  		printpanics(p.link)
   404  		print("\t")
   405  	}
   406  	print("panic: ")
   407  	printany(p.arg)
   408  	if p.recovered {
   409  		print(" [recovered]")
   410  	}
   411  	print("\n")
   412  }
   413  
   414  // The implementation of the predeclared function panic.
   415  func gopanic(e interface{}) {
   416  	gp := getg()
   417  	if gp.m.curg != gp {
   418  		print("panic: ")
   419  		printany(e)
   420  		print("\n")
   421  		throw("panic on system stack")
   422  	}
   423  
   424  	// m.softfloat is set during software floating point.
   425  	// It increments m.locks to avoid preemption.
   426  	// We moved the memory loads out, so there shouldn't be
   427  	// any reason for it to panic anymore.
   428  	if gp.m.softfloat != 0 {
   429  		gp.m.locks--
   430  		gp.m.softfloat = 0
   431  		throw("panic during softfloat")
   432  	}
   433  	if gp.m.mallocing != 0 {
   434  		print("panic: ")
   435  		printany(e)
   436  		print("\n")
   437  		throw("panic during malloc")
   438  	}
   439  	if gp.m.preemptoff != "" {
   440  		print("panic: ")
   441  		printany(e)
   442  		print("\n")
   443  		print("preempt off reason: ")
   444  		print(gp.m.preemptoff)
   445  		print("\n")
   446  		throw("panic during preemptoff")
   447  	}
   448  	if gp.m.locks != 0 {
   449  		print("panic: ")
   450  		printany(e)
   451  		print("\n")
   452  		throw("panic holding locks")
   453  	}
   454  
   455  	var p _panic
   456  	p.arg = e
   457  	p.link = gp._panic
   458  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   459  
   460  	atomic.Xadd(&runningPanicDefers, 1)
   461  
   462  	for {
   463  		d := gp._defer
   464  		if d == nil {
   465  			break
   466  		}
   467  
   468  		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   469  		// take defer off list. The earlier panic or Goexit will not continue running.
   470  		if d.started {
   471  			if d._panic != nil {
   472  				d._panic.aborted = true
   473  			}
   474  			d._panic = nil
   475  			d.fn = nil
   476  			gp._defer = d.link
   477  			freedefer(d)
   478  			continue
   479  		}
   480  
   481  		// Mark defer as started, but keep on list, so that traceback
   482  		// can find and update the defer's argument frame if stack growth
   483  		// or a garbage collection happens before reflectcall starts executing d.fn.
   484  		d.started = true
   485  
   486  		// Record the panic that is running the defer.
   487  		// If there is a new panic during the deferred call, that panic
   488  		// will find d in the list and will mark d._panic (this panic) aborted.
   489  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   490  
   491  		p.argp = unsafe.Pointer(getargp(0))
   492  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   493  		p.argp = nil
   494  
   495  		// reflectcall did not panic. Remove d.
   496  		if gp._defer != d {
   497  			throw("bad defer entry in panic")
   498  		}
   499  		d._panic = nil
   500  		d.fn = nil
   501  		gp._defer = d.link
   502  
   503  		// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
   504  		//GC()
   505  
   506  		pc := d.pc
   507  		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   508  		freedefer(d)
   509  		if p.recovered {
   510  			atomic.Xadd(&runningPanicDefers, -1)
   511  
   512  			gp._panic = p.link
   513  			// Aborted panics are marked but remain on the g.panic list.
   514  			// Remove them from the list.
   515  			for gp._panic != nil && gp._panic.aborted {
   516  				gp._panic = gp._panic.link
   517  			}
   518  			if gp._panic == nil { // must be done with signal
   519  				gp.sig = 0
   520  			}
   521  			// Pass information about recovering frame to recovery.
   522  			gp.sigcode0 = uintptr(sp)
   523  			gp.sigcode1 = pc
   524  			mcall(recovery)
   525  			throw("recovery failed") // mcall should not return
   526  		}
   527  	}
   528  
   529  	// ran out of deferred calls - old-school panic now
   530  	// Because it is unsafe to call arbitrary user code after freezing
   531  	// the world, we call preprintpanics to invoke all necessary Error
   532  	// and String methods to prepare the panic strings before startpanic.
   533  	preprintpanics(gp._panic)
   534  	startpanic()
   535  
   536  	// startpanic set panicking, which will block main from exiting,
   537  	// so now OK to decrement runningPanicDefers.
   538  	atomic.Xadd(&runningPanicDefers, -1)
   539  
   540  	printpanics(gp._panic)
   541  	dopanic(0)       // should not return
   542  	*(*int)(nil) = 0 // not reached
   543  }
   544  
   545  // getargp returns the location where the caller
   546  // writes outgoing function call arguments.
   547  //go:nosplit
   548  //go:noinline
   549  func getargp(x int) uintptr {
   550  	// x is an argument mainly so that we can return its address.
   551  	return uintptr(noescape(unsafe.Pointer(&x)))
   552  }
   553  
   554  // The implementation of the predeclared function recover.
   555  // Cannot split the stack because it needs to reliably
   556  // find the stack segment of its caller.
   557  //
   558  // TODO(rsc): Once we commit to CopyStackAlways,
   559  // this doesn't need to be nosplit.
   560  //go:nosplit
   561  func gorecover(argp uintptr) interface{} {
   562  	// Must be in a function running as part of a deferred call during the panic.
   563  	// Must be called from the topmost function of the call
   564  	// (the function used in the defer statement).
   565  	// p.argp is the argument pointer of that topmost deferred function call.
   566  	// Compare against argp reported by caller.
   567  	// If they match, the caller is the one who can recover.
   568  	gp := getg()
   569  	p := gp._panic
   570  	if p != nil && !p.recovered && argp == uintptr(p.argp) {
   571  		p.recovered = true
   572  		return p.arg
   573  	}
   574  	return nil
   575  }
   576  
   577  //go:nosplit
   578  func startpanic() {
   579  	systemstack(startpanic_m)
   580  }
   581  
   582  //go:nosplit
   583  func dopanic(unused int) {
   584  	pc := getcallerpc(unsafe.Pointer(&unused))
   585  	sp := getcallersp(unsafe.Pointer(&unused))
   586  	gp := getg()
   587  	systemstack(func() {
   588  		dopanic_m(gp, pc, sp) // should never return
   589  	})
   590  	*(*int)(nil) = 0
   591  }
   592  
   593  //go:linkname sync_throw sync.throw
   594  func sync_throw(s string) {
   595  	throw(s)
   596  }
   597  
   598  //go:nosplit
   599  func throw(s string) {
   600  	print("fatal error: ", s, "\n")
   601  	gp := getg()
   602  	if gp.m.throwing == 0 {
   603  		gp.m.throwing = 1
   604  	}
   605  	startpanic()
   606  	dopanic(0)
   607  	*(*int)(nil) = 0 // not reached
   608  }
   609  
   610  // runningPanicDefers is non-zero while running deferred functions for panic.
   611  // runningPanicDefers is incremented and decremented atomically.
   612  // This is used to try hard to get a panic stack trace out when exiting.
   613  var runningPanicDefers uint32
   614  
   615  // panicking is non-zero when crashing the program for an unrecovered panic.
   616  // panicking is incremented and decremented atomically.
   617  var panicking uint32
   618  
   619  // paniclk is held while printing the panic information and stack trace,
   620  // so that two concurrent panics don't overlap their output.
   621  var paniclk mutex
   622  
   623  // Unwind the stack after a deferred function calls recover
   624  // after a panic. Then arrange to continue running as though
   625  // the caller of the deferred function returned normally.
   626  func recovery(gp *g) {
   627  	// Info about defer passed in G struct.
   628  	sp := gp.sigcode0
   629  	pc := gp.sigcode1
   630  
   631  	// d's arguments need to be in the stack.
   632  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
   633  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
   634  		throw("bad recovery")
   635  	}
   636  
   637  	// Make the deferproc for this d return again,
   638  	// this time returning 1.  The calling function will
   639  	// jump to the standard return epilogue.
   640  	gp.sched.sp = sp
   641  	gp.sched.pc = pc
   642  	gp.sched.lr = 0
   643  	gp.sched.ret = 1
   644  	gogo(&gp.sched)
   645  }
   646  
   647  func startpanic_m() {
   648  	_g_ := getg()
   649  	if mheap_.cachealloc.size == 0 { // very early
   650  		print("runtime: panic before malloc heap initialized\n")
   651  		_g_.m.mallocing = 1 // tell rest of panic not to try to malloc
   652  	} else if _g_.m.mcache == nil { // can happen if called from signal handler or throw
   653  		_g_.m.mcache = allocmcache()
   654  	}
   655  
   656  	switch _g_.m.dying {
   657  	case 0:
   658  		_g_.m.dying = 1
   659  		_g_.writebuf = nil
   660  		atomic.Xadd(&panicking, 1)
   661  		lock(&paniclk)
   662  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
   663  			schedtrace(true)
   664  		}
   665  		freezetheworld()
   666  		return
   667  	case 1:
   668  		// Something failed while panicking, probably the print of the
   669  		// argument to panic().  Just print a stack trace and exit.
   670  		_g_.m.dying = 2
   671  		print("panic during panic\n")
   672  		dopanic(0)
   673  		exit(3)
   674  		fallthrough
   675  	case 2:
   676  		// This is a genuine bug in the runtime, we couldn't even
   677  		// print the stack trace successfully.
   678  		_g_.m.dying = 3
   679  		print("stack trace unavailable\n")
   680  		exit(4)
   681  		fallthrough
   682  	default:
   683  		// Can't even print! Just exit.
   684  		exit(5)
   685  	}
   686  }
   687  
   688  var didothers bool
   689  var deadlock mutex
   690  
   691  func dopanic_m(gp *g, pc, sp uintptr) {
   692  	if gp.sig != 0 {
   693  		signame := signame(gp.sig)
   694  		if signame != "" {
   695  			print("[signal ", signame)
   696  		} else {
   697  			print("[signal ", hex(gp.sig))
   698  		}
   699  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
   700  	}
   701  
   702  	level, all, docrash := gotraceback()
   703  	_g_ := getg()
   704  	if level > 0 {
   705  		if gp != gp.m.curg {
   706  			all = true
   707  		}
   708  		if gp != gp.m.g0 {
   709  			print("\n")
   710  			goroutineheader(gp)
   711  			traceback(pc, sp, 0, gp)
   712  		} else if level >= 2 || _g_.m.throwing > 0 {
   713  			print("\nruntime stack:\n")
   714  			traceback(pc, sp, 0, gp)
   715  		}
   716  		if !didothers && all {
   717  			didothers = true
   718  			tracebackothers(gp)
   719  		}
   720  	}
   721  	unlock(&paniclk)
   722  
   723  	if atomic.Xadd(&panicking, -1) != 0 {
   724  		// Some other m is panicking too.
   725  		// Let it print what it needs to print.
   726  		// Wait forever without chewing up cpu.
   727  		// It will exit when it's done.
   728  		lock(&deadlock)
   729  		lock(&deadlock)
   730  	}
   731  
   732  	if docrash {
   733  		crash()
   734  	}
   735  
   736  	exit(2)
   737  }
   738  
   739  //go:nosplit
   740  func canpanic(gp *g) bool {
   741  	// Note that g is m->gsignal, different from gp.
   742  	// Note also that g->m can change at preemption, so m can go stale
   743  	// if this function ever makes a function call.
   744  	_g_ := getg()
   745  	_m_ := _g_.m
   746  
   747  	// Is it okay for gp to panic instead of crashing the program?
   748  	// Yes, as long as it is running Go code, not runtime code,
   749  	// and not stuck in a system call.
   750  	if gp == nil || gp != _m_.curg {
   751  		return false
   752  	}
   753  	if _m_.locks-_m_.softfloat != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
   754  		return false
   755  	}
   756  	status := readgstatus(gp)
   757  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
   758  		return false
   759  	}
   760  	if GOOS == "windows" && _m_.libcallsp != 0 {
   761  		return false
   762  	}
   763  	return true
   764  }