github.com/primecitizens/pcz/std@v0.2.1/core/stack/unwind.go (about)

     1  // SPDX-License-Identifier: Apache-2.0
     2  // Copyright 2023 The Prime Citizens
     3  //
     4  // Copyright 2009 The Go Authors. All rights reserved.
     5  // Use of this source code is governed by a BSD-style
     6  // license that can be found in the LICENSE file.
     7  
     8  //go:build ignore
     9  
    10  package stack
    11  
    12  import (
    13  	"unsafe"
    14  
    15  	stdgo "github.com/primecitizens/pcz/std/builtin/go"
    16  	stdprint "github.com/primecitizens/pcz/std/builtin/print"
    17  	"github.com/primecitizens/pcz/std/core/abi"
    18  	"github.com/primecitizens/pcz/std/core/arch"
    19  	"github.com/primecitizens/pcz/std/core/assert"
    20  	"github.com/primecitizens/pcz/std/core/num"
    21  	"github.com/primecitizens/pcz/std/core/os"
    22  	"github.com/primecitizens/pcz/std/core/thread"
    23  )
    24  
    25  type hex = uint64
    26  
    27  type pcvalueCache struct {
    28  	entries [2][8]pcvalueCacheEnt
    29  }
    30  
    31  type pcvalueCacheEnt struct {
    32  	// targetpc and off together are the key of this cache entry.
    33  	targetpc uintptr
    34  	off      uint32
    35  	// val is the value of this cached pcvalue entry.
    36  	val int32
    37  }
    38  
    39  // pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc.
    40  // It must be very cheap to calculate.
    41  // For now, align to arch.PtrSize and reduce mod the number of entries.
    42  // In practice, this appears to be fairly randomly and evenly distributed.
    43  func pcvalueCacheKey(targetpc uintptr) uintptr {
    44  	return (targetpc / arch.PtrSize) % uintptr(len(pcvalueCache{}.entries))
    45  }
    46  
    47  const debugPcln = false
    48  
    49  func funcspdelta(f abi.FuncInfo, targetpc uintptr, cache *pcvalueCache) int32 {
    50  	x, _ := pcvalue(f, f.PCSP, targetpc, cache, true)
    51  	if debugPcln && x&(arch.PtrSize-1) != 0 {
    52  		print("invalid spdelta ", f.Name(), " ", hex(f.Entry()), " ", hex(targetpc), " ", hex(f.PCSP), " ", x, "\n")
    53  		assert.Throw("bad", "spdelta")
    54  	}
    55  	return x
    56  }
    57  
    58  // Returns the PCData value, and the PC where this value starts.
    59  // TODO: the start PC is returned only when cache is nil.
    60  func pcvalue(f abi.FuncInfo, off uint32, targetpc uintptr, cache *pcvalueCache, strict bool) (int32, uintptr) {
    61  	if off == 0 {
    62  		return -1, 0
    63  	}
    64  
    65  	// Check the cache. This speeds up walks of deep stacks, which
    66  	// tend to have the same recursive functions over and over.
    67  	//
    68  	// This cache is small enough that full associativity is
    69  	// cheaper than doing the hashing for a less associative
    70  	// cache.
    71  	if cache != nil {
    72  		x := pcvalueCacheKey(targetpc)
    73  		for i := range cache.entries[x] {
    74  			// We check off first because we're more
    75  			// likely to have multiple entries with
    76  			// different offsets for the same targetpc
    77  			// than the other way around, so we'll usually
    78  			// fail in the first clause.
    79  			ent := &cache.entries[x][i]
    80  			if ent.off == off && ent.targetpc == targetpc {
    81  				return ent.val, 0
    82  			}
    83  		}
    84  	}
    85  
    86  	if !f.Valid() {
    87  		if strict && panicking.Load() == 0 {
    88  			println("runtime:", "no", "module", "data", "for", hex(f.Entry()))
    89  			assert.Throw("no", "module", "data")
    90  		}
    91  		return -1, 0
    92  	}
    93  	datap := f.Datap
    94  	p := datap.PCTab[off:]
    95  	pc := f.Entry()
    96  	prevpc := pc
    97  	val := int32(-1)
    98  	for {
    99  		var ok bool
   100  		p, ok = step(p, &pc, &val, pc == f.Entry())
   101  		if !ok {
   102  			break
   103  		}
   104  		if targetpc < pc {
   105  			// Replace a random entry in the cache. Random
   106  			// replacement prevents a performance cliff if
   107  			// a recursive stack's cycle is slightly
   108  			// larger than the cache.
   109  			// Put the new element at the beginning,
   110  			// since it is the most likely to be newly used.
   111  			if cache != nil {
   112  				x := pcvalueCacheKey(targetpc)
   113  				e := &cache.entries[x]
   114  				ci := fastrandn(uint32(len(cache.entries[x])))
   115  				e[ci] = e[0]
   116  				e[0] = pcvalueCacheEnt{
   117  					targetpc: targetpc,
   118  					off:      off,
   119  					val:      val,
   120  				}
   121  			}
   122  
   123  			return val, prevpc
   124  		}
   125  		prevpc = pc
   126  	}
   127  
   128  	// If there was a table, it should have covered all program counters.
   129  	// If not, something is wrong.
   130  	if panicking.Load() != 0 || !strict {
   131  		return -1, 0
   132  	}
   133  
   134  	print(
   135  		"runtime:", " ", "invalid", " ", "pc-encoded", " ", "table",
   136  		" ", "f=", f.Name(),
   137  		" ", "pc=", hex(pc),
   138  		" ", "targetpc=", hex(targetpc),
   139  		" ", "tab=", p, "\n",
   140  	)
   141  
   142  	p = datap.PCTab[off:]
   143  	pc = f.Entry()
   144  	val = -1
   145  	for {
   146  		var ok bool
   147  		p, ok = step(p, &pc, &val, pc == f.Entry())
   148  		if !ok {
   149  			break
   150  		}
   151  		print("\tvalue=", val, " until pc=", hex(pc), "\n")
   152  	}
   153  
   154  	assert.Throw("invalid runtime symbol table")
   155  	return -1, 0
   156  }
   157  
   158  // step advances to the next pc, value pair in the encoded table.
   159  func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) {
   160  	// For both uvdelta and pcdelta, the common case (~70%)
   161  	// is that they are a single byte. If so, avoid calling readvarint.
   162  	uvdelta := uint32(p[0])
   163  	if uvdelta == 0 && !first {
   164  		return nil, false
   165  	}
   166  	n := uint32(1)
   167  	if uvdelta&0x80 != 0 {
   168  		n, uvdelta = readvarint(p)
   169  	}
   170  	*val += int32(-(uvdelta & 1) ^ (uvdelta >> 1))
   171  	p = p[n:]
   172  
   173  	pcdelta := uint32(p[0])
   174  	n = 1
   175  	if pcdelta&0x80 != 0 {
   176  		n, pcdelta = readvarint(p)
   177  	}
   178  	p = p[n:]
   179  	*pc += uintptr(pcdelta * arch.PCQuantum)
   180  	return p, true
   181  }
   182  
   183  // readvarint reads a varint from p.
   184  func readvarint(p []byte) (read uint32, val uint32) {
   185  	var v, shift, n uint32
   186  	for {
   187  		b := p[n]
   188  		n++
   189  		v |= uint32(b&0x7F) << (shift & 31)
   190  		if b&0x80 == 0 {
   191  			break
   192  		}
   193  		shift += 7
   194  	}
   195  	return n, v
   196  }
   197  
   198  // The code in this file implements stack trace walking for all architectures.
   199  // The most important fact about a given architecture is whether it uses a link register.
   200  // On systems with link registers, the prologue for a non-leaf function stores the
   201  // incoming value of LR at the bottom of the newly allocated stack frame.
   202  // On systems without link registers (x86), the architecture pushes a return PC during
   203  // the call instruction, so the return PC ends up above the stack frame.
   204  // In this file, the return PC is always called LR, no matter how it was found.
   205  
   206  const usesLR = arch.MinFrameSize > 0
   207  
   208  const (
   209  	// tracebackInnerFrames is the number of innermost frames to print in a
   210  	// stack trace. The total maximum frames is tracebackInnerFrames +
   211  	// tracebackOuterFrames.
   212  	tracebackInnerFrames = 50
   213  
   214  	// tracebackOuterFrames is the number of outermost frames to print in a
   215  	// stack trace.
   216  	tracebackOuterFrames = 50
   217  )
   218  
   219  // unwindFlags control the behavior of various unwinders.
   220  type unwindFlags uint8
   221  
   222  const (
   223  	// unwindPrintErrors indicates that if unwinding encounters an error, it
   224  	// should print a message and stop without throwing. This is used for things
   225  	// like stack printing, where it's better to get incomplete information than
   226  	// to crash. This is also used in situations where everything may not be
   227  	// stopped nicely and the stack walk may not be able to complete, such as
   228  	// during profiling signals or during a crash.
   229  	//
   230  	// If neither unwindPrintErrors or unwindSilentErrors are set, unwinding
   231  	// performs extra consistency checks and throws on any error.
   232  	//
   233  	// Note that there are a small number of fatal situations that will throw
   234  	// regardless of unwindPrintErrors or unwindSilentErrors.
   235  	unwindPrintErrors unwindFlags = 1 << iota
   236  
   237  	// unwindSilentErrors silently ignores errors during unwinding.
   238  	unwindSilentErrors
   239  
   240  	// unwindTrap indicates that the initial PC and SP are from a trap, not a
   241  	// return PC from a call.
   242  	//
   243  	// The unwindTrap flag is updated during unwinding. If set, frame.pc is the
   244  	// address of a faulting instruction instead of the return address of a
   245  	// call. It also means the liveness at pc may not be known.
   246  	//
   247  	// TODO: Distinguish frame.continpc, which is really the stack map PC, from
   248  	// the actual continuation PC, which is computed differently depending on
   249  	// this flag and a few other things.
   250  	unwindTrap
   251  
   252  	// unwindJumpStack indicates that, if the traceback is on a system stack, it
   253  	// should resume tracing at the user stack when the system stack is
   254  	// exhausted.
   255  	unwindJumpStack
   256  )
   257  
   258  // An unwinder iterates the physical stack frames of a Go stack.
   259  //
   260  // Typical use of an unwinder looks like:
   261  //
   262  //	var u unwinder
   263  //	for u.init(gp, 0); u.Valid(); u.next() {
   264  //		// ... use frame info in u ...
   265  //	}
   266  //
   267  // Implementation note: This is carefully structured to be pointer-free because
   268  // tracebacks happen in places that disallow write barriers (e.g., signals).
   269  // Even if this is stack-allocated, its pointer-receiver methods don't know that
   270  // their receiver is on the stack, so they still emit write barriers. Here we
   271  // address that by carefully avoiding any pointers in this type. Another
   272  // approach would be to split this into a mutable part that's passed by pointer
   273  // but contains no pointers itself and an immutable part that's passed and
   274  // returned by value and can contain pointers. We could potentially hide that
   275  // we're doing that in trivial methods that are inlined into the caller that has
   276  // the stack allocation, but that's fragile.
   277  type unwinder struct {
   278  	// frame is the current physical stack frame, or all 0s if
   279  	// there is no frame.
   280  	frame Frame
   281  
   282  	// g is the G who's stack is being unwound. If the
   283  	// unwindJumpStack flag is set and the unwinder jumps stacks,
   284  	// this will be different from the initial G.
   285  	g stdgo.Guintptr
   286  
   287  	// cgoCtxt is the index into g.cgoCtxt of the next frame on the cgo stack.
   288  	// The cgo stack is unwound in tandem with the Go stack as we find marker frames.
   289  	cgoCtxt int
   290  
   291  	// calleeFuncID is the function ID of the caller of the current
   292  	// frame.
   293  	calleeFuncID abi.FuncID
   294  
   295  	// flags are the flags to this unwind. Some of these are updated as we
   296  	// unwind (see the flags documentation).
   297  	flags unwindFlags
   298  
   299  	// cache is used to cache pcvalue lookups.
   300  	cache pcvalueCache
   301  }
   302  
   303  // init initializes u to start unwinding gp's stack and positions the
   304  // iterator on gp's innermost frame. gp must not be the current G.
   305  //
   306  // A single unwinder can be reused for multiple unwinds.
   307  func (u *unwinder) init(gp *stdgo.GHead, flags unwindFlags) {
   308  	// Implementation note: This starts the iterator on the first frame and we
   309  	// provide a "valid" method. Alternatively, this could start in a "before
   310  	// the first frame" state and "next" could return whether it was able to
   311  	// move to the next frame, but that's both more awkward to use in a "for"
   312  	// loop and is harder to implement because we have to do things differently
   313  	// for the first frame.
   314  	u.initAt(^uintptr(0), ^uintptr(0), ^uintptr(0), gp, flags)
   315  }
   316  
   317  func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *stdgo.GHead, flags unwindFlags) {
   318  	// Don't call this "g"; it's too easy get "g" and "gp" confused.
   319  	if ourg := thread.G(); ourg == gp && ourg == ourg.M.curg {
   320  		// The starting sp has been passed in as a uintptr, and the caller may
   321  		// have other uintptr-typed stack references as well.
   322  		// If during one of the calls that got us here or during one of the
   323  		// callbacks below the stack must be grown, all these uintptr references
   324  		// to the stack will not be updated, and traceback will continue
   325  		// to inspect the old stack memory, which may no longer be valid.
   326  		// Even if all the variables were updated correctly, it is not clear that
   327  		// we want to expose a traceback that begins on one stack and ends
   328  		// on another stack. That could confuse callers quite a bit.
   329  		// Instead, we require that initAt and any other function that
   330  		// accepts an sp for the current goroutine (typically obtained by
   331  		// calling getcallersp) must not run on that goroutine's stack but
   332  		// instead on the g0 stack.
   333  		assert.Throw("cannot", "trace", "user", "goroutine", "on", "its", "own", "stack")
   334  	}
   335  
   336  	if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
   337  		if gp.SyscallSP != 0 {
   338  			pc0 = gp.SyscallPC
   339  			sp0 = gp.SyscallSP
   340  			if usesLR {
   341  				lr0 = 0
   342  			}
   343  		} else {
   344  			pc0 = gp.Sched.PC
   345  			sp0 = gp.Sched.SP
   346  			if usesLR {
   347  				lr0 = gp.Sched.LR
   348  			}
   349  		}
   350  	}
   351  
   352  	var frame Frame
   353  	frame.pc = pc0
   354  	frame.sp = sp0
   355  	if usesLR {
   356  		frame.lr = lr0
   357  	}
   358  
   359  	// If the PC is zero, it's likely a nil function call.
   360  	// Start in the caller's frame.
   361  	if frame.pc == 0 {
   362  		if usesLR {
   363  			frame.pc = *(*uintptr)(unsafe.Pointer(frame.sp))
   364  			frame.lr = 0
   365  		} else {
   366  			frame.pc = uintptr(*(*uintptr)(unsafe.Pointer(frame.sp)))
   367  			frame.sp += arch.PtrSize
   368  		}
   369  	}
   370  
   371  	// runtime/internal/atomic functions call into kernel helpers on
   372  	// arm < 7. See runtime/internal/atomic/sys_linux_arm.s.
   373  	//
   374  	// Start in the caller's frame.
   375  	if arch.IsArm&os.IsLinux != 0 && goarm < 7 && frame.pc&0xffff0000 == 0xffff0000 {
   376  		// Note that the calls are simple BL without pushing the return
   377  		// address, so we use LR directly.
   378  		//
   379  		// The kernel helpers are frameless leaf functions, so SP and
   380  		// LR are not touched.
   381  		frame.pc = frame.lr
   382  		frame.lr = 0
   383  	}
   384  
   385  	f := abi.FindFunc(frame.pc)
   386  	if !f.Valid() {
   387  		if flags&unwindSilentErrors == 0 {
   388  			print("runtime: g ", gp.ID, ": unknown pc ", hex(frame.pc), "\n")
   389  			tracebackHexdump(gp.Stack, &frame, 0)
   390  		}
   391  		if flags&(unwindPrintErrors|unwindSilentErrors) == 0 {
   392  			assert.Throw("unknown", "pc")
   393  		}
   394  		*u = unwinder{}
   395  		return
   396  	}
   397  	frame.fn = f
   398  
   399  	// Populate the unwinder.
   400  	*u = unwinder{
   401  		frame:        frame,
   402  		g:            gp.Guintptr(),
   403  		cgoCtxt:      len(gp.cgoCtxt) - 1,
   404  		calleeFuncID: abi.FuncIDNormal,
   405  		flags:        flags,
   406  	}
   407  
   408  	isSyscall := frame.pc == pc0 && frame.sp == sp0 && pc0 == gp.SyscallPC && sp0 == gp.SyscallSP
   409  	u.resolveInternal(true, isSyscall)
   410  }
   411  
   412  func (u *unwinder) valid() bool {
   413  	return u.frame.pc != 0
   414  }
   415  
   416  // resolveInternal fills in u.frame based on u.frame.fn, pc, and sp.
   417  //
   418  // innermost indicates that this is the first resolve on this stack. If
   419  // innermost is set, isSyscall indicates that the PC/SP was retrieved from
   420  // gp.syscall*; this is otherwise ignored.
   421  //
   422  // On entry, u.frame contains:
   423  //   - fn is the running function.
   424  //   - pc is the PC in the running function.
   425  //   - sp is the stack pointer at that program counter.
   426  //   - For the innermost frame on LR machines, lr is the program counter that called fn.
   427  //
   428  // On return, u.frame contains:
   429  //   - fp is the stack pointer of the caller.
   430  //   - lr is the program counter that called fn.
   431  //   - varp, argp, and continpc are populated for the current frame.
   432  //
   433  // If fn is a stack-jumping function, resolveInternal can change the entire
   434  // frame state to follow that stack jump.
   435  //
   436  // This is internal to unwinder.
   437  func (u *unwinder) resolveInternal(innermost, isSyscall bool) {
   438  	frame := &u.frame
   439  	gp := u.g.Ptr()
   440  
   441  	f := frame.fn
   442  	if f.PCSP == 0 {
   443  		// No frame information, must be external function, like race support.
   444  		// See golang.org/issue/13568.
   445  		u.finishInternal()
   446  		return
   447  	}
   448  
   449  	// Compute function info flags.
   450  	flag := f.Flag
   451  	if f.FuncID == abi.FuncID_cgocallback {
   452  		// cgocallback does write SP to switch from the g0 to the curg stack,
   453  		// but it carefully arranges that during the transition BOTH stacks
   454  		// have cgocallback frame valid for unwinding through.
   455  		// So we don't need to exclude it with the other SP-writing functions.
   456  		flag &^= abi.FuncFlagSPWrite
   457  	}
   458  	if isSyscall {
   459  		// Some Syscall functions write to SP, but they do so only after
   460  		// saving the entry PC/SP using entersyscall.
   461  		// Since we are using the entry PC/SP, the later SP write doesn't matter.
   462  		flag &^= abi.FuncFlagSPWrite
   463  	}
   464  
   465  	// Found an actual function.
   466  	// Derive frame pointer.
   467  	if frame.fp == 0 {
   468  		// Jump over system stack transitions. If we're on g0 and there's a user
   469  		// goroutine, try to jump. Otherwise this is a regular call.
   470  		// We also defensively check that this won't switch M's on us,
   471  		// which could happen at critical points in the scheduler.
   472  		// This ensures gp.m doesn't change from a stack jump.
   473  		if u.flags&unwindJumpStack != 0 && gp == gp.M.g0 && gp.M.curg != nil && gp.M.curg.M == gp.M {
   474  			switch f.FuncID {
   475  			case abi.FuncID_morestack:
   476  				// morestack does not return normally -- newstack()
   477  				// gogo's to curg.sched. Match that.
   478  				// This keeps morestack() from showing up in the backtrace,
   479  				// but that makes some sense since it'll never be returned
   480  				// to.
   481  				gp = gp.M.curg
   482  				u.g.Set(gp)
   483  				frame.pc = gp.Sched.PC
   484  				frame.fn = abi.FindFunc(frame.pc)
   485  				f = frame.fn
   486  				flag = f.Flag
   487  				frame.lr = gp.Sched.LR
   488  				frame.sp = gp.Sched.SP
   489  				u.cgoCtxt = len(gp.cgoCtxt) - 1
   490  			case abi.FuncID_systemstack:
   491  				// systemstack returns normally, so just follow the
   492  				// stack transition.
   493  				if usesLR && funcspdelta(f, frame.pc, &u.cache) == 0 {
   494  					// We're at the function prologue and the stack
   495  					// switch hasn't happened, or epilogue where we're
   496  					// about to return. Just unwind normally.
   497  					// Do this only on LR machines because on x86
   498  					// systemstack doesn't have an SP delta (the CALL
   499  					// instruction opens the frame), therefore no way
   500  					// to check.
   501  					flag &^= abi.FuncFlagSPWrite
   502  					break
   503  				}
   504  				gp = gp.M.curg
   505  				u.g.Set(gp)
   506  				frame.sp = gp.Sched.SP
   507  				u.cgoCtxt = len(gp.cgoCtxt) - 1
   508  				flag &^= abi.FuncFlagSPWrite
   509  			}
   510  		}
   511  		frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc, &u.cache))
   512  		if !usesLR {
   513  			// On x86, call instruction pushes return PC before entering new function.
   514  			frame.fp += arch.PtrSize
   515  		}
   516  	}
   517  
   518  	// Derive link register.
   519  	if flag&abi.FuncFlagTopFrame != 0 {
   520  		// This function marks the top of the stack. Stop the traceback.
   521  		frame.lr = 0
   522  	} else if flag&abi.FuncFlagSPWrite != 0 {
   523  		// The function we are in does a write to SP that we don't know
   524  		// how to encode in the spdelta table. Examples include context
   525  		// switch routines like runtime.gogo but also any code that switches
   526  		// to the g0 stack to run host C code.
   527  		if u.flags&(unwindPrintErrors|unwindSilentErrors) != 0 {
   528  			// We can't reliably unwind the SP (we might
   529  			// not even be on the stack we think we are),
   530  			// so stop the traceback here.
   531  			frame.lr = 0
   532  		} else {
   533  			// For a GC stack traversal, we should only see
   534  			// an SPWRITE function when it has voluntarily preempted itself on entry
   535  			// during the stack growth check. In that case, the function has
   536  			// not yet had a chance to do any writes to SP and is safe to unwind.
   537  			// isAsyncSafePoint does not allow assembly functions to be async preempted,
   538  			// and preemptPark double-checks that SPWRITE functions are not async preempted.
   539  			// So for GC stack traversal, we can safely ignore SPWRITE for the innermost frame,
   540  			// but farther up the stack we'd better not find any.
   541  			if !innermost {
   542  				println("traceback:", "unexpected", "SPWRITE", "function", f.Name())
   543  				assert.Throw("traceback")
   544  			}
   545  		}
   546  	} else {
   547  		var lrPtr uintptr
   548  		if usesLR {
   549  			if innermost && frame.sp < frame.fp || frame.lr == 0 {
   550  				lrPtr = frame.sp
   551  				frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr))
   552  			}
   553  		} else {
   554  			if frame.lr == 0 {
   555  				lrPtr = frame.fp - arch.PtrSize
   556  				frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr))
   557  			}
   558  		}
   559  	}
   560  
   561  	frame.varp = frame.fp
   562  	if !usesLR {
   563  		// On x86, call instruction pushes return PC before entering new function.
   564  		frame.varp -= arch.PtrSize
   565  	}
   566  
   567  	// For architectures with frame pointers, if there's
   568  	// a frame, then there's a saved frame pointer here.
   569  	//
   570  	// NOTE: This code is not as general as it looks.
   571  	// On x86, the ABI is to save the frame pointer word at the
   572  	// top of the stack frame, so we have to back down over it.
   573  	// On arm64, the frame pointer should be at the bottom of
   574  	// the stack (with R29 (aka FP) = RSP), in which case we would
   575  	// not want to do the subtraction here. But we started out without
   576  	// any frame pointer, and when we wanted to add it, we didn't
   577  	// want to break all the assembly doing direct writes to 8(RSP)
   578  	// to set the first parameter to a called function.
   579  	// So we decided to write the FP link *below* the stack pointer
   580  	// (with R29 = RSP - 8 in Go functions).
   581  	// This is technically ABI-compatible but not standard.
   582  	// And it happens to end up mimicking the x86 layout.
   583  	// Other architectures may make different decisions.
   584  	if frame.varp > frame.sp && arch.FramePointerEnabled {
   585  		frame.varp -= arch.PtrSize
   586  	}
   587  
   588  	frame.argp = frame.fp + arch.MinFrameSize
   589  
   590  	// Determine frame's 'continuation PC', where it can continue.
   591  	// Normally this is the return address on the stack, but if sigpanic
   592  	// is immediately below this function on the stack, then the frame
   593  	// stopped executing due to a trap, and frame.pc is probably not
   594  	// a safe point for looking up liveness information. In this panicking case,
   595  	// the function either doesn't return at all (if it has no defers or if the
   596  	// defers do not recover) or it returns from one of the calls to
   597  	// deferproc a second time (if the corresponding deferred func recovers).
   598  	// In the latter case, use a deferreturn call site as the continuation pc.
   599  	frame.continpc = frame.pc
   600  	if u.calleeFuncID == abi.FuncID_sigpanic {
   601  		if frame.fn.DeferReturn != 0 {
   602  			frame.continpc = frame.fn.Entry() + uintptr(frame.fn.DeferReturn) + 1
   603  			// Note: this may perhaps keep return variables alive longer than
   604  			// strictly necessary, as we are using "function has a defer statement"
   605  			// as a proxy for "function actually deferred something". It seems
   606  			// to be a minor drawback. (We used to actually look through the
   607  			// gp._defer for a defer corresponding to this function, but that
   608  			// is hard to do with defer records on the stack during a stack copy.)
   609  			// Note: the +1 is to offset the -1 that
   610  			// stack.go:getStackMap does to back up a return
   611  			// address make sure the pc is in the CALL instruction.
   612  		} else {
   613  			frame.continpc = 0
   614  		}
   615  	}
   616  }
   617  
   618  func (u *unwinder) next() {
   619  	frame := &u.frame
   620  	f := frame.fn
   621  	gp := u.g.Ptr()
   622  
   623  	// Do not unwind past the bottom of the stack.
   624  	if frame.lr == 0 {
   625  		u.finishInternal()
   626  		return
   627  	}
   628  	flr := abi.FindFunc(frame.lr)
   629  	if !flr.Valid() {
   630  		// This happens if you get a profiling interrupt at just the wrong time.
   631  		// In that context it is okay to stop early.
   632  		// But if no error flags are set, we're doing a garbage collection and must
   633  		// get everything, so crash loudly.
   634  		fail := u.flags&(unwindPrintErrors|unwindSilentErrors) == 0
   635  		doPrint := u.flags&unwindSilentErrors == 0
   636  		if doPrint && gp.M.incgo && f.FuncID == abi.FuncID_sigpanic {
   637  			// We can inject sigpanic
   638  			// calls directly into C code,
   639  			// in which case we'll see a C
   640  			// return PC. Don't complain.
   641  			doPrint = false
   642  		}
   643  		if fail || doPrint {
   644  			print("runtime: g ", gp.ID_, ": unexpected return pc for ", f.Name(), " called from ", hex(frame.lr), "\n")
   645  			tracebackHexdump(gp.Stack, frame, 0)
   646  		}
   647  		if fail {
   648  			assert.Throw("unknown", "caller", "pc")
   649  		}
   650  		frame.lr = 0
   651  		u.finishInternal()
   652  		return
   653  	}
   654  
   655  	if frame.pc == frame.lr && frame.sp == frame.fp {
   656  		// If the next frame is identical to the current frame, we cannot make progress.
   657  		print("runtime: traceback stuck. pc=", hex(frame.pc), " sp=", hex(frame.sp), "\n")
   658  		tracebackHexdump(gp.Stack, frame, frame.sp)
   659  		assert.Throw("traceback", "stuck")
   660  	}
   661  
   662  	injectedCall := f.FuncID == abi.FuncID_sigpanic || f.FuncID == abi.FuncID_asyncPreempt || f.FuncID == abi.FuncID_debugCallV2
   663  	if injectedCall {
   664  		u.flags |= unwindTrap
   665  	} else {
   666  		u.flags &^= unwindTrap
   667  	}
   668  
   669  	// Unwind to next frame.
   670  	u.calleeFuncID = f.FuncID
   671  	frame.fn = flr
   672  	frame.pc = frame.lr
   673  	frame.lr = 0
   674  	frame.sp = frame.fp
   675  	frame.fp = 0
   676  
   677  	// On link register architectures, sighandler saves the LR on stack
   678  	// before faking a call.
   679  	if usesLR && injectedCall {
   680  		x := *(*uintptr)(unsafe.Pointer(frame.sp))
   681  		frame.sp += num.AlignUp[uintptr](arch.MinFrameSize, arch.StackAlign)
   682  		f = abi.FindFunc(frame.pc)
   683  		frame.fn = f
   684  		if !f.Valid() {
   685  			frame.pc = x
   686  		} else if funcspdelta(f, frame.pc, &u.cache) == 0 {
   687  			frame.lr = x
   688  		}
   689  	}
   690  
   691  	u.resolveInternal(false, false)
   692  }
   693  
   694  // tracebackHexdump hexdumps part of stk around frame.sp and frame.fp
   695  // for debugging purposes. If the address bad is included in the
   696  // hexdumped range, it will mark it as well.
   697  func tracebackHexdump(stk stdgo.Stack, frame *Frame, bad uintptr) {
   698  	const expand = 32 * arch.PtrSize
   699  	const maxExpand = 256 * arch.PtrSize
   700  	// Start around frame.sp.
   701  	lo, hi := frame.sp, frame.sp
   702  	// Expand to include frame.fp.
   703  	if frame.fp != 0 && frame.fp < lo {
   704  		lo = frame.fp
   705  	}
   706  	if frame.fp != 0 && frame.fp > hi {
   707  		hi = frame.fp
   708  	}
   709  	// Expand a bit more.
   710  	lo, hi = lo-expand, hi+expand
   711  	// But don't go too far from frame.sp.
   712  	if lo < frame.sp-maxExpand {
   713  		lo = frame.sp - maxExpand
   714  	}
   715  	if hi > frame.sp+maxExpand {
   716  		hi = frame.sp + maxExpand
   717  	}
   718  	// And don't go outside the stack bounds.
   719  	if lo < stk.Lo {
   720  		lo = stk.Lo
   721  	}
   722  	if hi > stk.Hi {
   723  		hi = stk.Hi
   724  	}
   725  
   726  	// Print the hex dump.
   727  	print("stack: frame={sp:", hex(frame.sp), ", fp:", hex(frame.fp), "} stack=[", hex(stk.Lo), ",", hex(stk.hi), ")\n")
   728  	hexdumpWords(lo, hi, func(p uintptr) byte {
   729  		switch p {
   730  		case frame.fp:
   731  			return '>'
   732  		case frame.sp:
   733  			return '<'
   734  		case bad:
   735  			return '!'
   736  		}
   737  		return 0
   738  	})
   739  }
   740  
   741  // hexdumpWords prints a word-oriented hex dump of [p, end).
   742  //
   743  // If mark != nil, it will be called with each printed word's address
   744  // and should return a character mark to appear just before that
   745  // word's value. It can return 0 to indicate no mark.
   746  func hexdumpWords(p, end uintptr, mark func(uintptr) byte) {
   747  	stdprint.PrintLock()
   748  	var markbuf [1]byte
   749  	markbuf[0] = ' '
   750  
   751  	for i := uintptr(0); p+i < end; i += arch.PtrSize {
   752  		if i%16 == 0 {
   753  			if i != 0 {
   754  				println()
   755  			}
   756  			print(hex(p+i), ": ")
   757  		}
   758  
   759  		if mark != nil {
   760  			markbuf[0] = mark(p + i)
   761  			if markbuf[0] == 0 {
   762  				markbuf[0] = ' '
   763  			}
   764  		}
   765  		gwrite(markbuf[:])
   766  		val := *(*uintptr)(unsafe.Pointer(p + i))
   767  		print(hex(val))
   768  		print(" ")
   769  
   770  		// Can we symbolize val?
   771  		fn := abi.FindFunc(val)
   772  		if fn.Valid() {
   773  			print("<", fn.Name(), "+", hex(val-fn.Entry()), "> ")
   774  		}
   775  	}
   776  
   777  	println()
   778  	stdprint.PrintUnlock()
   779  }
   780  
   781  // finishInternal is an unwinder-internal helper called after the stack has been
   782  // exhausted. It sets the unwinder to an invalid state and checks that it
   783  // successfully unwound the entire stack.
   784  func (u *unwinder) finishInternal() {
   785  	u.frame.pc = 0
   786  
   787  	// Note that panic != nil is okay here: there can be leftover panics,
   788  	// because the defers on the panic stack do not nest in frame order as
   789  	// they do on the defer stack. If you have:
   790  	//
   791  	//	frame 1 defers d1
   792  	//	frame 2 defers d2
   793  	//	frame 3 defers d3
   794  	//	frame 4 panics
   795  	//	frame 4's panic starts running defers
   796  	//	frame 5, running d3, defers d4
   797  	//	frame 5 panics
   798  	//	frame 5's panic starts running defers
   799  	//	frame 6, running d4, garbage collects
   800  	//	frame 6, running d2, garbage collects
   801  	//
   802  	// During the execution of d4, the panic stack is d4 -> d3, which
   803  	// is nested properly, and we'll treat frame 3 as resumable, because we
   804  	// can find d3. (And in fact frame 3 is resumable. If d4 recovers
   805  	// and frame 5 continues running, d3, d3 can recover and we'll
   806  	// resume execution in (returning from) frame 3.)
   807  	//
   808  	// During the execution of d2, however, the panic stack is d2 -> d3,
   809  	// which is inverted. The scan will match d2 to frame 2 but having
   810  	// d2 on the stack until then means it will not match d3 to frame 3.
   811  	// This is okay: if we're running d2, then all the defers after d2 have
   812  	// completed and their corresponding frames are dead. Not finding d3
   813  	// for frame 3 means we'll set frame 3's continpc == 0, which is correct
   814  	// (frame 3 is dead). At the end of the walk the panic stack can thus
   815  	// contain defers (d3 in this case) for dead frames. The inversion here
   816  	// always indicates a dead frame, and the effect of the inversion on the
   817  	// scan is to hide those dead frames, so the scan is still okay:
   818  	// what's left on the panic stack are exactly (and only) the dead frames.
   819  	//
   820  	// We require callback != nil here because only when callback != nil
   821  	// do we know that gentraceback is being called in a "must be correct"
   822  	// context as opposed to a "best effort" context. The tracebacks with
   823  	// callbacks only happen when everything is stopped nicely.
   824  	// At other times, such as when gathering a stack for a profiling signal
   825  	// or when printing a traceback during a crash, everything may not be
   826  	// stopped nicely, and the stack walk may not be able to complete.
   827  	gp := u.g.Ptr()
   828  	if u.flags&(unwindPrintErrors|unwindSilentErrors) == 0 && u.frame.sp != gp.StacktopSP {
   829  		print("runtime: g", gp.ID_, ": frame.sp=", hex(u.frame.sp), " top=", hex(gp.StacktopSP), "\n")
   830  		print("\tstack=[", hex(gp.Stack.Lo), "-", hex(gp.Stack.Hi), "\n")
   831  		assert.Throw("traceback", "did", "not", "unwind", "completely")
   832  	}
   833  }
   834  
   835  // symPC returns the PC that should be used for symbolizing the current frame.
   836  // Specifically, this is the PC of the last instruction executed in this frame.
   837  //
   838  // If this frame did a normal call, then frame.pc is a return PC, so this will
   839  // return frame.pc-1, which points into the CALL instruction. If the frame was
   840  // interrupted by a signal (e.g., profiler, segv, etc) then frame.pc is for the
   841  // trapped instruction, so this returns frame.pc. See issue #34123. Finally,
   842  // frame.pc can be at function entry when the frame is initialized without
   843  // actually running code, like in runtime.mstart, in which case this returns
   844  // frame.pc because that's the best we can do.
   845  func (u *unwinder) symPC() uintptr {
   846  	if u.flags&unwindTrap == 0 && u.frame.pc > u.frame.fn.Entry() {
   847  		// Regular call.
   848  		return u.frame.pc - 1
   849  	}
   850  	// Trapping instruction or we're at the function entry point.
   851  	return u.frame.pc
   852  }
   853  
   854  // cgoCallers populates pcBuf with the cgo callers of the current frame using
   855  // the registered cgo unwinder. It returns the number of PCs written to pcBuf.
   856  // If the current frame is not a cgo frame or if there's no registered cgo
   857  // unwinder, it returns 0.
   858  func (u *unwinder) cgoCallers(pcBuf []uintptr) int {
   859  	if cgoTraceback == nil || u.frame.fn.FuncID != abi.FuncID_cgocallback || u.cgoCtxt < 0 {
   860  		// We don't have a cgo unwinder (typical case), or we do but we're not
   861  		// in a cgo frame or we're out of cgo context.
   862  		return 0
   863  	}
   864  
   865  	ctxt := u.g.Ptr().cgoCtxt[u.cgoCtxt]
   866  	u.cgoCtxt--
   867  	cgoContextPCs(ctxt, pcBuf)
   868  	for i, pc := range pcBuf {
   869  		if pc == 0 {
   870  			return i
   871  		}
   872  	}
   873  	return len(pcBuf)
   874  }