github.com/zxy12/go_duplicate_112_new@v0.0.0-20200807091221-747231827200/src/runtime/panic.go (about)

     1  // Copyright 2014 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  // Calling panic with one of the errors below will call errorString.Error
    14  // which will call mallocgc to concatenate strings. That will fail if
    15  // malloc is locked, causing a confusing error message. Throw a better
    16  // error message instead.
    17  func panicCheckMalloc(err error) {
    18  	gp := getg()
    19  	if gp != nil && gp.m != nil && gp.m.mallocing != 0 {
    20  		throw(string(err.(errorString)))
    21  	}
    22  }
    23  
    24  var indexError = error(errorString("index out of range"))
    25  
    26  // The panicindex, panicslice, and panicdivide functions are called by
    27  // code generated by the compiler for out of bounds index expressions,
    28  // out of bounds slice expressions, and division by zero. The
    29  // panicdivide (again), panicoverflow, panicfloat, and panicmem
    30  // functions are called by the signal handler when a signal occurs
    31  // indicating the respective problem.
    32  //
    33  // Since panicindex and panicslice are never called directly, and
    34  // since the runtime package should never have an out of bounds slice
    35  // or array reference, if we see those functions called from the
    36  // runtime package we turn the panic into a throw. That will dump the
    37  // entire runtime stack for easier debugging.
    38  
    39  func panicindex() {
    40  	if hasPrefix(funcname(findfunc(getcallerpc())), "runtime.") {
    41  		throw(string(indexError.(errorString)))
    42  	}
    43  	panicCheckMalloc(indexError)
    44  	panic(indexError)
    45  }
    46  
    47  var sliceError = error(errorString("slice bounds out of range"))
    48  
    49  func panicslice() {
    50  	if hasPrefix(funcname(findfunc(getcallerpc())), "runtime.") {
    51  		throw(string(sliceError.(errorString)))
    52  	}
    53  	panicCheckMalloc(sliceError)
    54  	panic(sliceError)
    55  }
    56  
    57  var divideError = error(errorString("integer divide by zero"))
    58  
    59  func panicdivide() {
    60  	panicCheckMalloc(divideError)
    61  	panic(divideError)
    62  }
    63  
    64  var overflowError = error(errorString("integer overflow"))
    65  
    66  func panicoverflow() {
    67  	panicCheckMalloc(overflowError)
    68  	panic(overflowError)
    69  }
    70  
    71  var floatError = error(errorString("floating point error"))
    72  
    73  func panicfloat() {
    74  	panicCheckMalloc(floatError)
    75  	panic(floatError)
    76  }
    77  
    78  var memoryError = error(errorString("invalid memory address or nil pointer dereference"))
    79  
    80  func panicmem() {
    81  	panicCheckMalloc(memoryError)
    82  	panic(memoryError)
    83  }
    84  
    85  func throwinit() {
    86  	throw("recursive call during initialization - linker skew")
    87  }
    88  
    89  // Create a new deferred function fn with siz bytes of arguments.
    90  // The compiler turns a defer statement into a call to this.
    91  //go:nosplit
    92  func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
    93  	if getg().m.curg != getg() {
    94  		// go code on the system stack can't defer
    95  		throw("defer on system stack")
    96  	}
    97  
    98  	// the arguments of fn are in a perilous state. The stack map
    99  	// for deferproc does not describe them. So we can't let garbage
   100  	// collection or stack copying trigger until we've copied them out
   101  	// to somewhere safe. The memmove below does that.
   102  	// Until the copy completes, we can only call nosplit routines.
   103  	sp := getcallersp()
   104  	argp := uintptr(unsafe.Pointer(&fn)) + unsafe.Sizeof(fn)
   105  	callerpc := getcallerpc()
   106  
   107  	d := newdefer(siz)
   108  	if d._panic != nil {
   109  		throw("deferproc: d.panic != nil after newdefer")
   110  	}
   111  	d.fn = fn
   112  	d.pc = callerpc
   113  	d.sp = sp
   114  	switch siz {
   115  	case 0:
   116  		// Do nothing.
   117  	case sys.PtrSize:
   118  		*(*uintptr)(deferArgs(d)) = *(*uintptr)(unsafe.Pointer(argp))
   119  	default:
   120  		memmove(deferArgs(d), unsafe.Pointer(argp), uintptr(siz))
   121  	}
   122  
   123  	// deferproc returns 0 normally.
   124  	// a deferred func that stops a panic
   125  	// makes the deferproc return 1.
   126  	// the code the compiler generates always
   127  	// checks the return value and jumps to the
   128  	// end of the function if deferproc returns != 0.
   129  	return0()
   130  	// No code can go here - the C return register has
   131  	// been set and must not be clobbered.
   132  }
   133  
   134  // Small malloc size classes >= 16 are the multiples of 16: 16, 32, 48, 64, 80, 96, 112, 128, 144, ...
   135  // Each P holds a pool for defers with small arg sizes.
   136  // Assign defer allocations to pools by rounding to 16, to match malloc size classes.
   137  
   138  const (
   139  	deferHeaderSize = unsafe.Sizeof(_defer{})
   140  	minDeferAlloc   = (deferHeaderSize + 15) &^ 15
   141  	minDeferArgs    = minDeferAlloc - deferHeaderSize
   142  )
   143  
   144  // defer size class for arg size sz
   145  //go:nosplit
   146  func deferclass(siz uintptr) uintptr {
   147  	if siz <= minDeferArgs {
   148  		return 0
   149  	}
   150  	return (siz - minDeferArgs + 15) / 16
   151  }
   152  
   153  // total size of memory block for defer with arg size sz
   154  func totaldefersize(siz uintptr) uintptr {
   155  	if siz <= minDeferArgs {
   156  		return minDeferAlloc
   157  	}
   158  	return deferHeaderSize + siz
   159  }
   160  
   161  // Ensure that defer arg sizes that map to the same defer size class
   162  // also map to the same malloc size class.
   163  func testdefersizes() {
   164  	var m [len(p{}.deferpool)]int32
   165  
   166  	for i := range m {
   167  		m[i] = -1
   168  	}
   169  	for i := uintptr(0); ; i++ {
   170  		defersc := deferclass(i)
   171  		if defersc >= uintptr(len(m)) {
   172  			break
   173  		}
   174  		siz := roundupsize(totaldefersize(i))
   175  		if m[defersc] < 0 {
   176  			m[defersc] = int32(siz)
   177  			continue
   178  		}
   179  		if m[defersc] != int32(siz) {
   180  			print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
   181  			throw("bad defer size class")
   182  		}
   183  	}
   184  }
   185  
   186  // The arguments associated with a deferred call are stored
   187  // immediately after the _defer header in memory.
   188  //go:nosplit
   189  func deferArgs(d *_defer) unsafe.Pointer {
   190  	if d.siz == 0 {
   191  		// Avoid pointer past the defer allocation.
   192  		return nil
   193  	}
   194  	return add(unsafe.Pointer(d), unsafe.Sizeof(*d))
   195  }
   196  
   197  var deferType *_type // type of _defer struct
   198  
   199  func init() {
   200  	var x interface{}
   201  	x = (*_defer)(nil)
   202  	deferType = (*(**ptrtype)(unsafe.Pointer(&x))).elem
   203  }
   204  
   205  // Allocate a Defer, usually using per-P pool.
   206  // Each defer must be released with freedefer.
   207  //
   208  // This must not grow the stack because there may be a frame without
   209  // stack map information when this is called.
   210  //
   211  //go:nosplit
   212  func newdefer(siz int32) *_defer {
   213  	var d *_defer
   214  	sc := deferclass(uintptr(siz))
   215  	gp := getg()
   216  	if sc < uintptr(len(p{}.deferpool)) {
   217  		pp := gp.m.p.ptr()
   218  		if len(pp.deferpool[sc]) == 0 && sched.deferpool[sc] != nil {
   219  			// Take the slow path on the system stack so
   220  			// we don't grow newdefer's stack.
   221  			systemstack(func() {
   222  				lock(&sched.deferlock)
   223  				for len(pp.deferpool[sc]) < cap(pp.deferpool[sc])/2 && sched.deferpool[sc] != nil {
   224  					d := sched.deferpool[sc]
   225  					sched.deferpool[sc] = d.link
   226  					d.link = nil
   227  					pp.deferpool[sc] = append(pp.deferpool[sc], d)
   228  				}
   229  				unlock(&sched.deferlock)
   230  			})
   231  		}
   232  		if n := len(pp.deferpool[sc]); n > 0 {
   233  			d = pp.deferpool[sc][n-1]
   234  			pp.deferpool[sc][n-1] = nil
   235  			pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   236  		}
   237  	}
   238  	if d == nil {
   239  		// Allocate new defer+args.
   240  		systemstack(func() {
   241  			total := roundupsize(totaldefersize(uintptr(siz)))
   242  			d = (*_defer)(mallocgc(total, deferType, true))
   243  		})
   244  		if debugCachedWork {
   245  			// Duplicate the tail below so if there's a
   246  			// crash in checkPut we can tell if d was just
   247  			// allocated or came from the pool.
   248  			d.siz = siz
   249  			d.link = gp._defer
   250  			gp._defer = d
   251  			return d
   252  		}
   253  	}
   254  	d.siz = siz
   255  	d.link = gp._defer
   256  	gp._defer = d
   257  	return d
   258  }
   259  
   260  // Free the given defer.
   261  // The defer cannot be used after this call.
   262  //
   263  // This must not grow the stack because there may be a frame without a
   264  // stack map when this is called.
   265  //
   266  //go:nosplit
   267  func freedefer(d *_defer) {
   268  	if d._panic != nil {
   269  		freedeferpanic()
   270  	}
   271  	if d.fn != nil {
   272  		freedeferfn()
   273  	}
   274  	sc := deferclass(uintptr(d.siz))
   275  	if sc >= uintptr(len(p{}.deferpool)) {
   276  		return
   277  	}
   278  	pp := getg().m.p.ptr()
   279  	if len(pp.deferpool[sc]) == cap(pp.deferpool[sc]) {
   280  		// Transfer half of local cache to the central cache.
   281  		//
   282  		// Take this slow path on the system stack so
   283  		// we don't grow freedefer's stack.
   284  		systemstack(func() {
   285  			var first, last *_defer
   286  			for len(pp.deferpool[sc]) > cap(pp.deferpool[sc])/2 {
   287  				n := len(pp.deferpool[sc])
   288  				d := pp.deferpool[sc][n-1]
   289  				pp.deferpool[sc][n-1] = nil
   290  				pp.deferpool[sc] = pp.deferpool[sc][:n-1]
   291  				if first == nil {
   292  					first = d
   293  				} else {
   294  					last.link = d
   295  				}
   296  				last = d
   297  			}
   298  			lock(&sched.deferlock)
   299  			last.link = sched.deferpool[sc]
   300  			sched.deferpool[sc] = first
   301  			unlock(&sched.deferlock)
   302  		})
   303  	}
   304  
   305  	// These lines used to be simply `*d = _defer{}` but that
   306  	// started causing a nosplit stack overflow via typedmemmove.
   307  	d.siz = 0
   308  	d.started = false
   309  	d.sp = 0
   310  	d.pc = 0
   311  	// d._panic and d.fn must be nil already.
   312  	// If not, we would have called freedeferpanic or freedeferfn above,
   313  	// both of which throw.
   314  	d.link = nil
   315  
   316  	pp.deferpool[sc] = append(pp.deferpool[sc], d)
   317  }
   318  
   319  // Separate function so that it can split stack.
   320  // Windows otherwise runs out of stack space.
   321  func freedeferpanic() {
   322  	// _panic must be cleared before d is unlinked from gp.
   323  	throw("freedefer with d._panic != nil")
   324  }
   325  
   326  func freedeferfn() {
   327  	// fn must be cleared before d is unlinked from gp.
   328  	throw("freedefer with d.fn != nil")
   329  }
   330  
   331  // Run a deferred function if there is one.
   332  // The compiler inserts a call to this at the end of any
   333  // function which calls defer.
   334  // If there is a deferred function, this will call runtime·jmpdefer,
   335  // which will jump to the deferred function such that it appears
   336  // to have been called by the caller of deferreturn at the point
   337  // just before deferreturn was called. The effect is that deferreturn
   338  // is called again and again until there are no more deferred functions.
   339  // Cannot split the stack because we reuse the caller's frame to
   340  // call the deferred function.
   341  
   342  // The single argument isn't actually used - it just has its address
   343  // taken so it can be matched against pending defers.
   344  //go:nosplit
   345  func deferreturn(arg0 uintptr) {
   346  	gp := getg()
   347  	d := gp._defer
   348  	if d == nil {
   349  		return
   350  	}
   351  	sp := getcallersp()
   352  	if d.sp != sp {
   353  		return
   354  	}
   355  
   356  	// Moving arguments around.
   357  	//
   358  	// Everything called after this point must be recursively
   359  	// nosplit because the garbage collector won't know the form
   360  	// of the arguments until the jmpdefer can flip the PC over to
   361  	// fn.
   362  	switch d.siz {
   363  	case 0:
   364  		// Do nothing.
   365  	case sys.PtrSize:
   366  		*(*uintptr)(unsafe.Pointer(&arg0)) = *(*uintptr)(deferArgs(d))
   367  	default:
   368  		memmove(unsafe.Pointer(&arg0), deferArgs(d), uintptr(d.siz))
   369  	}
   370  	fn := d.fn
   371  	d.fn = nil
   372  	gp._defer = d.link
   373  	freedefer(d)
   374  	jmpdefer(fn, uintptr(unsafe.Pointer(&arg0)))
   375  }
   376  
   377  // Goexit terminates the goroutine that calls it. No other goroutine is affected.
   378  // Goexit runs all deferred calls before terminating the goroutine. Because Goexit
   379  // is not a panic, any recover calls in those deferred functions will return nil.
   380  //
   381  // Calling Goexit from the main goroutine terminates that goroutine
   382  // without func main returning. Since func main has not returned,
   383  // the program continues execution of other goroutines.
   384  // If all other goroutines exit, the program crashes.
   385  func Goexit() {
   386  	// Run all deferred functions for the current goroutine.
   387  	// This code is similar to gopanic, see that implementation
   388  	// for detailed comments.
   389  	gp := getg()
   390  	for {
   391  		d := gp._defer
   392  		if d == nil {
   393  			break
   394  		}
   395  		if d.started {
   396  			if d._panic != nil {
   397  				d._panic.aborted = true
   398  				d._panic = nil
   399  			}
   400  			d.fn = nil
   401  			gp._defer = d.link
   402  			freedefer(d)
   403  			continue
   404  		}
   405  		d.started = true
   406  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   407  		if gp._defer != d {
   408  			throw("bad defer entry in Goexit")
   409  		}
   410  		d._panic = nil
   411  		d.fn = nil
   412  		gp._defer = d.link
   413  		freedefer(d)
   414  		// Note: we ignore recovers here because Goexit isn't a panic
   415  	}
   416  	goexit1()
   417  }
   418  
   419  // Call all Error and String methods before freezing the world.
   420  // Used when crashing with panicking.
   421  func preprintpanics(p *_panic) {
   422  	defer func() {
   423  		if recover() != nil {
   424  			throw("panic while printing panic value")
   425  		}
   426  	}()
   427  	for p != nil {
   428  		switch v := p.arg.(type) {
   429  		case error:
   430  			p.arg = v.Error()
   431  		case stringer:
   432  			p.arg = v.String()
   433  		}
   434  		p = p.link
   435  	}
   436  }
   437  
   438  // Print all currently active panics. Used when crashing.
   439  // Should only be called after preprintpanics.
   440  func printpanics(p *_panic) {
   441  	if p.link != nil {
   442  		printpanics(p.link)
   443  		print("\t")
   444  	}
   445  	print("panic: ")
   446  	printany(p.arg)
   447  	if p.recovered {
   448  		print(" [recovered]")
   449  	}
   450  	print("\n")
   451  }
   452  
   453  // The implementation of the predeclared function panic.
   454  func gopanic(e interface{}) {
   455  	gp := getg()
   456  	if gp.m.curg != gp {
   457  		print("panic: ")
   458  		printany(e)
   459  		print("\n")
   460  		throw("panic on system stack")
   461  	}
   462  
   463  	if gp.m.mallocing != 0 {
   464  		print("panic: ")
   465  		printany(e)
   466  		print("\n")
   467  		throw("panic during malloc")
   468  	}
   469  	if gp.m.preemptoff != "" {
   470  		print("panic: ")
   471  		printany(e)
   472  		print("\n")
   473  		print("preempt off reason: ")
   474  		print(gp.m.preemptoff)
   475  		print("\n")
   476  		throw("panic during preemptoff")
   477  	}
   478  	if gp.m.locks != 0 {
   479  		print("panic: ")
   480  		printany(e)
   481  		print("\n")
   482  		throw("panic holding locks")
   483  	}
   484  
   485  	var p _panic
   486  	p.arg = e
   487  	p.link = gp._panic
   488  	gp._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   489  
   490  	atomic.Xadd(&runningPanicDefers, 1)
   491  
   492  	for {
   493  		d := gp._defer
   494  		if d == nil {
   495  			break
   496  		}
   497  
   498  		// If defer was started by earlier panic or Goexit (and, since we're back here, that triggered a new panic),
   499  		// take defer off list. The earlier panic or Goexit will not continue running.
   500  		if d.started {
   501  			if d._panic != nil {
   502  				d._panic.aborted = true
   503  			}
   504  			d._panic = nil
   505  			d.fn = nil
   506  			gp._defer = d.link
   507  			freedefer(d)
   508  			continue
   509  		}
   510  
   511  		// Mark defer as started, but keep on list, so that traceback
   512  		// can find and update the defer's argument frame if stack growth
   513  		// or a garbage collection happens before reflectcall starts executing d.fn.
   514  		d.started = true
   515  
   516  		// Record the panic that is running the defer.
   517  		// If there is a new panic during the deferred call, that panic
   518  		// will find d in the list and will mark d._panic (this panic) aborted.
   519  		d._panic = (*_panic)(noescape(unsafe.Pointer(&p)))
   520  
   521  		p.argp = unsafe.Pointer(getargp(0))
   522  		reflectcall(nil, unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
   523  		p.argp = nil
   524  
   525  		// reflectcall did not panic. Remove d.
   526  		if gp._defer != d {
   527  			throw("bad defer entry in panic")
   528  		}
   529  		d._panic = nil
   530  		d.fn = nil
   531  		gp._defer = d.link
   532  
   533  		// trigger shrinkage to test stack copy. See stack_test.go:TestStackPanic
   534  		//GC()
   535  
   536  		pc := d.pc
   537  		sp := unsafe.Pointer(d.sp) // must be pointer so it gets adjusted during stack copy
   538  		freedefer(d)
   539  		if p.recovered {
   540  			atomic.Xadd(&runningPanicDefers, -1)
   541  
   542  			gp._panic = p.link
   543  			// Aborted panics are marked but remain on the g.panic list.
   544  			// Remove them from the list.
   545  			for gp._panic != nil && gp._panic.aborted {
   546  				gp._panic = gp._panic.link
   547  			}
   548  			if gp._panic == nil { // must be done with signal
   549  				gp.sig = 0
   550  			}
   551  			// Pass information about recovering frame to recovery.
   552  			gp.sigcode0 = uintptr(sp)
   553  			gp.sigcode1 = pc
   554  			mcall(recovery)
   555  			throw("recovery failed") // mcall should not return
   556  		}
   557  	}
   558  
   559  	// ran out of deferred calls - old-school panic now
   560  	// Because it is unsafe to call arbitrary user code after freezing
   561  	// the world, we call preprintpanics to invoke all necessary Error
   562  	// and String methods to prepare the panic strings before startpanic.
   563  	preprintpanics(gp._panic)
   564  
   565  	fatalpanic(gp._panic) // should not return
   566  	*(*int)(nil) = 0      // not reached
   567  }
   568  
   569  // getargp returns the location where the caller
   570  // writes outgoing function call arguments.
   571  //go:nosplit
   572  //go:noinline
   573  func getargp(x int) uintptr {
   574  	// x is an argument mainly so that we can return its address.
   575  	return uintptr(noescape(unsafe.Pointer(&x)))
   576  }
   577  
   578  // The implementation of the predeclared function recover.
   579  // Cannot split the stack because it needs to reliably
   580  // find the stack segment of its caller.
   581  //
   582  // TODO(rsc): Once we commit to CopyStackAlways,
   583  // this doesn't need to be nosplit.
   584  //go:nosplit
   585  func gorecover(argp uintptr) interface{} {
   586  	// Must be in a function running as part of a deferred call during the panic.
   587  	// Must be called from the topmost function of the call
   588  	// (the function used in the defer statement).
   589  	// p.argp is the argument pointer of that topmost deferred function call.
   590  	// Compare against argp reported by caller.
   591  	// If they match, the caller is the one who can recover.
   592  	gp := getg()
   593  	p := gp._panic
   594  	if p != nil && !p.recovered && argp == uintptr(p.argp) {
   595  		p.recovered = true
   596  		return p.arg
   597  	}
   598  	return nil
   599  }
   600  
   601  //go:linkname sync_throw sync.throw
   602  func sync_throw(s string) {
   603  	throw(s)
   604  }
   605  
   606  //go:nosplit
   607  func throw(s string) {
   608  	// Everything throw does should be recursively nosplit so it
   609  	// can be called even when it's unsafe to grow the stack.
   610  	systemstack(func() {
   611  		print("fatal error: ", s, "\n")
   612  	})
   613  	gp := getg()
   614  	if gp.m.throwing == 0 {
   615  		gp.m.throwing = 1
   616  	}
   617  	fatalthrow()
   618  	*(*int)(nil) = 0 // not reached
   619  }
   620  
   621  // runningPanicDefers is non-zero while running deferred functions for panic.
   622  // runningPanicDefers is incremented and decremented atomically.
   623  // This is used to try hard to get a panic stack trace out when exiting.
   624  var runningPanicDefers uint32
   625  
   626  // panicking is non-zero when crashing the program for an unrecovered panic.
   627  // panicking is incremented and decremented atomically.
   628  var panicking uint32
   629  
   630  // paniclk is held while printing the panic information and stack trace,
   631  // so that two concurrent panics don't overlap their output.
   632  var paniclk mutex
   633  
   634  // Unwind the stack after a deferred function calls recover
   635  // after a panic. Then arrange to continue running as though
   636  // the caller of the deferred function returned normally.
   637  func recovery(gp *g) {
   638  	// Info about defer passed in G struct.
   639  	sp := gp.sigcode0
   640  	pc := gp.sigcode1
   641  
   642  	// d's arguments need to be in the stack.
   643  	if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
   644  		print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
   645  		throw("bad recovery")
   646  	}
   647  
   648  	// Make the deferproc for this d return again,
   649  	// this time returning 1.  The calling function will
   650  	// jump to the standard return epilogue.
   651  	gp.sched.sp = sp
   652  	gp.sched.pc = pc
   653  	gp.sched.lr = 0
   654  	gp.sched.ret = 1
   655  	gogo(&gp.sched)
   656  }
   657  
   658  // fatalthrow implements an unrecoverable runtime throw. It freezes the
   659  // system, prints stack traces starting from its caller, and terminates the
   660  // process.
   661  //
   662  //go:nosplit
   663  func fatalthrow() {
   664  	pc := getcallerpc()
   665  	sp := getcallersp()
   666  	gp := getg()
   667  	// Switch to the system stack to avoid any stack growth, which
   668  	// may make things worse if the runtime is in a bad state.
   669  	systemstack(func() {
   670  		startpanic_m()
   671  
   672  		if dopanic_m(gp, pc, sp) {
   673  			// crash uses a decent amount of nosplit stack and we're already
   674  			// low on stack in throw, so crash on the system stack (unlike
   675  			// fatalpanic).
   676  			crash()
   677  		}
   678  
   679  		exit(2)
   680  	})
   681  
   682  	*(*int)(nil) = 0 // not reached
   683  }
   684  
   685  // fatalpanic implements an unrecoverable panic. It is like fatalthrow, except
   686  // that if msgs != nil, fatalpanic also prints panic messages and decrements
   687  // runningPanicDefers once main is blocked from exiting.
   688  //
   689  //go:nosplit
   690  func fatalpanic(msgs *_panic) {
   691  	pc := getcallerpc()
   692  	sp := getcallersp()
   693  	gp := getg()
   694  	var docrash bool
   695  	// Switch to the system stack to avoid any stack growth, which
   696  	// may make things worse if the runtime is in a bad state.
   697  	systemstack(func() {
   698  		if startpanic_m() && msgs != nil {
   699  			// There were panic messages and startpanic_m
   700  			// says it's okay to try to print them.
   701  
   702  			// startpanic_m set panicking, which will
   703  			// block main from exiting, so now OK to
   704  			// decrement runningPanicDefers.
   705  			atomic.Xadd(&runningPanicDefers, -1)
   706  
   707  			printpanics(msgs)
   708  		}
   709  
   710  		docrash = dopanic_m(gp, pc, sp)
   711  	})
   712  
   713  	if docrash {
   714  		// By crashing outside the above systemstack call, debuggers
   715  		// will not be confused when generating a backtrace.
   716  		// Function crash is marked nosplit to avoid stack growth.
   717  		crash()
   718  	}
   719  
   720  	systemstack(func() {
   721  		exit(2)
   722  	})
   723  
   724  	*(*int)(nil) = 0 // not reached
   725  }
   726  
   727  // startpanic_m prepares for an unrecoverable panic.
   728  //
   729  // It returns true if panic messages should be printed, or false if
   730  // the runtime is in bad shape and should just print stacks.
   731  //
   732  // It must not have write barriers even though the write barrier
   733  // explicitly ignores writes once dying > 0. Write barriers still
   734  // assume that g.m.p != nil, and this function may not have P
   735  // in some contexts (e.g. a panic in a signal handler for a signal
   736  // sent to an M with no P).
   737  //
   738  //go:nowritebarrierrec
   739  func startpanic_m() bool {
   740  	_g_ := getg()
   741  	if mheap_.cachealloc.size == 0 { // very early
   742  		print("runtime: panic before malloc heap initialized\n")
   743  	}
   744  	// Disallow malloc during an unrecoverable panic. A panic
   745  	// could happen in a signal handler, or in a throw, or inside
   746  	// malloc itself. We want to catch if an allocation ever does
   747  	// happen (even if we're not in one of these situations).
   748  	_g_.m.mallocing++
   749  
   750  	// If we're dying because of a bad lock count, set it to a
   751  	// good lock count so we don't recursively panic below.
   752  	if _g_.m.locks < 0 {
   753  		_g_.m.locks = 1
   754  	}
   755  
   756  	switch _g_.m.dying {
   757  	case 0:
   758  		// Setting dying >0 has the side-effect of disabling this G's writebuf.
   759  		_g_.m.dying = 1
   760  		atomic.Xadd(&panicking, 1)
   761  		lock(&paniclk)
   762  		if debug.schedtrace > 0 || debug.scheddetail > 0 {
   763  			schedtrace(true)
   764  		}
   765  		freezetheworld()
   766  		return true
   767  	case 1:
   768  		// Something failed while panicking.
   769  		// Just print a stack trace and exit.
   770  		_g_.m.dying = 2
   771  		print("panic during panic\n")
   772  		return false
   773  	case 2:
   774  		// This is a genuine bug in the runtime, we couldn't even
   775  		// print the stack trace successfully.
   776  		_g_.m.dying = 3
   777  		print("stack trace unavailable\n")
   778  		exit(4)
   779  		fallthrough
   780  	default:
   781  		// Can't even print! Just exit.
   782  		exit(5)
   783  		return false // Need to return something.
   784  	}
   785  }
   786  
   787  var didothers bool
   788  var deadlock mutex
   789  
   790  func dopanic_m(gp *g, pc, sp uintptr) bool {
   791  	if gp.sig != 0 {
   792  		signame := signame(gp.sig)
   793  		if signame != "" {
   794  			print("[signal ", signame)
   795  		} else {
   796  			print("[signal ", hex(gp.sig))
   797  		}
   798  		print(" code=", hex(gp.sigcode0), " addr=", hex(gp.sigcode1), " pc=", hex(gp.sigpc), "]\n")
   799  	}
   800  
   801  	level, all, docrash := gotraceback()
   802  	_g_ := getg()
   803  	if level > 0 {
   804  		if gp != gp.m.curg {
   805  			all = true
   806  		}
   807  		if gp != gp.m.g0 {
   808  			print("\n")
   809  			goroutineheader(gp)
   810  			traceback(pc, sp, 0, gp)
   811  		} else if level >= 2 || _g_.m.throwing > 0 {
   812  			print("\nruntime stack:\n")
   813  			traceback(pc, sp, 0, gp)
   814  		}
   815  		if !didothers && all {
   816  			didothers = true
   817  			tracebackothers(gp)
   818  		}
   819  	}
   820  	unlock(&paniclk)
   821  
   822  	if atomic.Xadd(&panicking, -1) != 0 {
   823  		// Some other m is panicking too.
   824  		// Let it print what it needs to print.
   825  		// Wait forever without chewing up cpu.
   826  		// It will exit when it's done.
   827  		lock(&deadlock)
   828  		lock(&deadlock)
   829  	}
   830  
   831  	return docrash
   832  }
   833  
   834  // canpanic returns false if a signal should throw instead of
   835  // panicking.
   836  //
   837  //go:nosplit
   838  func canpanic(gp *g) bool {
   839  	// Note that g is m->gsignal, different from gp.
   840  	// Note also that g->m can change at preemption, so m can go stale
   841  	// if this function ever makes a function call.
   842  	_g_ := getg()
   843  	_m_ := _g_.m
   844  
   845  	// Is it okay for gp to panic instead of crashing the program?
   846  	// Yes, as long as it is running Go code, not runtime code,
   847  	// and not stuck in a system call.
   848  	if gp == nil || gp != _m_.curg {
   849  		return false
   850  	}
   851  	if _m_.locks != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 {
   852  		return false
   853  	}
   854  	status := readgstatus(gp)
   855  	if status&^_Gscan != _Grunning || gp.syscallsp != 0 {
   856  		return false
   857  	}
   858  	if GOOS == "windows" && _m_.libcallsp != 0 {
   859  		return false
   860  	}
   861  	return true
   862  }
   863  
   864  // shouldPushSigpanic reports whether pc should be used as sigpanic's
   865  // return PC (pushing a frame for the call). Otherwise, it should be
   866  // left alone so that LR is used as sigpanic's return PC, effectively
   867  // replacing the top-most frame with sigpanic. This is used by
   868  // preparePanic.
   869  func shouldPushSigpanic(gp *g, pc, lr uintptr) bool {
   870  	if pc == 0 {
   871  		// Probably a call to a nil func. The old LR is more
   872  		// useful in the stack trace. Not pushing the frame
   873  		// will make the trace look like a call to sigpanic
   874  		// instead. (Otherwise the trace will end at sigpanic
   875  		// and we won't get to see who faulted.)
   876  		return false
   877  	}
   878  	// If we don't recognize the PC as code, but we do recognize
   879  	// the link register as code, then this assumes the panic was
   880  	// caused by a call to non-code. In this case, we want to
   881  	// ignore this call to make unwinding show the context.
   882  	//
   883  	// If we running C code, we're not going to recognize pc as a
   884  	// Go function, so just assume it's good. Otherwise, traceback
   885  	// may try to read a stale LR that looks like a Go code
   886  	// pointer and wander into the woods.
   887  	if gp.m.incgo || findfunc(pc).valid() {
   888  		// This wasn't a bad call, so use PC as sigpanic's
   889  		// return PC.
   890  		return true
   891  	}
   892  	if findfunc(lr).valid() {
   893  		// This was a bad call, but the LR is good, so use the
   894  		// LR as sigpanic's return PC.
   895  		return false
   896  	}
   897  	// Neither the PC or LR is good. Hopefully pushing a frame
   898  	// will work.
   899  	return true
   900  }
   901  
   902  // isAbortPC reports whether pc is the program counter at which
   903  // runtime.abort raises a signal.
   904  //
   905  // It is nosplit because it's part of the isgoexception
   906  // implementation.
   907  //
   908  //go:nosplit
   909  func isAbortPC(pc uintptr) bool {
   910  	return pc == funcPC(abort) || ((GOARCH == "arm" || GOARCH == "arm64") && pc == funcPC(abort)+sys.PCQuantum)
   911  }