github.com/twelsh-aw/go/src@v0.0.0-20230516233729-a56fe86a7c81/runtime/stack.go (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"internal/abi"
     9  	"internal/cpu"
    10  	"internal/goarch"
    11  	"internal/goos"
    12  	"runtime/internal/atomic"
    13  	"runtime/internal/sys"
    14  	"unsafe"
    15  )
    16  
    17  /*
    18  Stack layout parameters.
    19  Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
    20  
    21  The per-goroutine g->stackguard is set to point StackGuard bytes
    22  above the bottom of the stack.  Each function compares its stack
    23  pointer against g->stackguard to check for overflow.  To cut one
    24  instruction from the check sequence for functions with tiny frames,
    25  the stack is allowed to protrude StackSmall bytes below the stack
    26  guard.  Functions with large frames don't bother with the check and
    27  always call morestack.  The sequences are (for amd64, others are
    28  similar):
    29  
    30  	guard = g->stackguard
    31  	frame = function's stack frame size
    32  	argsize = size of function arguments (call + return)
    33  
    34  	stack frame size <= StackSmall:
    35  		CMPQ guard, SP
    36  		JHI 3(PC)
    37  		MOVQ m->morearg, $(argsize << 32)
    38  		CALL morestack(SB)
    39  
    40  	stack frame size > StackSmall but < StackBig
    41  		LEAQ (frame-StackSmall)(SP), R0
    42  		CMPQ guard, R0
    43  		JHI 3(PC)
    44  		MOVQ m->morearg, $(argsize << 32)
    45  		CALL morestack(SB)
    46  
    47  	stack frame size >= StackBig:
    48  		MOVQ m->morearg, $((argsize << 32) | frame)
    49  		CALL morestack(SB)
    50  
    51  The bottom StackGuard - StackSmall bytes are important: there has
    52  to be enough room to execute functions that refuse to check for
    53  stack overflow, either because they need to be adjacent to the
    54  actual caller's frame (deferproc) or because they handle the imminent
    55  stack overflow (morestack).
    56  
    57  For example, deferproc might call malloc, which does one of the
    58  above checks (without allocating a full frame), which might trigger
    59  a call to morestack.  This sequence needs to fit in the bottom
    60  section of the stack.  On amd64, morestack's frame is 40 bytes, and
    61  deferproc's frame is 56 bytes.  That fits well within the
    62  StackGuard - StackSmall bytes at the bottom.
    63  The linkers explore all possible call traces involving non-splitting
    64  functions to make sure that this limit cannot be violated.
    65  */
    66  
    67  const (
    68  	// stackSystem is a number of additional bytes to add
    69  	// to each stack below the usual guard area for OS-specific
    70  	// purposes like signal handling. Used on Windows, Plan 9,
    71  	// and iOS because they do not use a separate stack.
    72  	stackSystem = goos.IsWindows*512*goarch.PtrSize + goos.IsPlan9*512 + goos.IsIos*goarch.IsArm64*1024
    73  
    74  	// The minimum size of stack used by Go code
    75  	stackMin = 2048
    76  
    77  	// The minimum stack size to allocate.
    78  	// The hackery here rounds fixedStack0 up to a power of 2.
    79  	fixedStack0 = stackMin + stackSystem
    80  	fixedStack1 = fixedStack0 - 1
    81  	fixedStack2 = fixedStack1 | (fixedStack1 >> 1)
    82  	fixedStack3 = fixedStack2 | (fixedStack2 >> 2)
    83  	fixedStack4 = fixedStack3 | (fixedStack3 >> 4)
    84  	fixedStack5 = fixedStack4 | (fixedStack4 >> 8)
    85  	fixedStack6 = fixedStack5 | (fixedStack5 >> 16)
    86  	fixedStack  = fixedStack6 + 1
    87  
    88  	// stackNosplit is the maximum number of bytes that a chain of NOSPLIT
    89  	// functions can use.
    90  	// This arithmetic must match that in cmd/internal/objabi/stack.go:StackNosplit.
    91  	stackNosplit = abi.StackNosplitBase * sys.StackGuardMultiplier
    92  
    93  	// The stack guard is a pointer this many bytes above the
    94  	// bottom of the stack.
    95  	//
    96  	// The guard leaves enough room for a stackNosplit chain of NOSPLIT calls
    97  	// plus one stackSmall frame plus stackSystem bytes for the OS.
    98  	// This arithmetic must match that in cmd/internal/objabi/stack.go:StackLimit.
    99  	stackGuard = stackNosplit + stackSystem + abi.StackSmall
   100  )
   101  
   102  const (
   103  	// stackDebug == 0: no logging
   104  	//            == 1: logging of per-stack operations
   105  	//            == 2: logging of per-frame operations
   106  	//            == 3: logging of per-word updates
   107  	//            == 4: logging of per-word reads
   108  	stackDebug       = 0
   109  	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
   110  	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
   111  	stackNoCache     = 0 // disable per-P small stack caches
   112  
   113  	// check the BP links during traceback.
   114  	debugCheckBP = false
   115  )
   116  
   117  var (
   118  	stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
   119  )
   120  
   121  const (
   122  	uintptrMask = 1<<(8*goarch.PtrSize) - 1
   123  
   124  	// The values below can be stored to g.stackguard0 to force
   125  	// the next stack check to fail.
   126  	// These are all larger than any real SP.
   127  
   128  	// Goroutine preemption request.
   129  	// 0xfffffade in hex.
   130  	stackPreempt = uintptrMask & -1314
   131  
   132  	// Thread is forking. Causes a split stack check failure.
   133  	// 0xfffffb2e in hex.
   134  	stackFork = uintptrMask & -1234
   135  
   136  	// Force a stack movement. Used for debugging.
   137  	// 0xfffffeed in hex.
   138  	stackForceMove = uintptrMask & -275
   139  
   140  	// stackPoisonMin is the lowest allowed stack poison value.
   141  	stackPoisonMin = uintptrMask & -4096
   142  )
   143  
   144  // Global pool of spans that have free stacks.
   145  // Stacks are assigned an order according to size.
   146  //
   147  //	order = log_2(size/FixedStack)
   148  //
   149  // There is a free list for each order.
   150  var stackpool [_NumStackOrders]struct {
   151  	item stackpoolItem
   152  	_    [(cpu.CacheLinePadSize - unsafe.Sizeof(stackpoolItem{})%cpu.CacheLinePadSize) % cpu.CacheLinePadSize]byte
   153  }
   154  
   155  type stackpoolItem struct {
   156  	_    sys.NotInHeap
   157  	mu   mutex
   158  	span mSpanList
   159  }
   160  
   161  // Global pool of large stack spans.
   162  var stackLarge struct {
   163  	lock mutex
   164  	free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
   165  }
   166  
   167  func stackinit() {
   168  	if _StackCacheSize&_PageMask != 0 {
   169  		throw("cache size must be a multiple of page size")
   170  	}
   171  	for i := range stackpool {
   172  		stackpool[i].item.span.init()
   173  		lockInit(&stackpool[i].item.mu, lockRankStackpool)
   174  	}
   175  	for i := range stackLarge.free {
   176  		stackLarge.free[i].init()
   177  		lockInit(&stackLarge.lock, lockRankStackLarge)
   178  	}
   179  }
   180  
   181  // stacklog2 returns ⌊log_2(n)⌋.
   182  func stacklog2(n uintptr) int {
   183  	log2 := 0
   184  	for n > 1 {
   185  		n >>= 1
   186  		log2++
   187  	}
   188  	return log2
   189  }
   190  
   191  // Allocates a stack from the free pool. Must be called with
   192  // stackpool[order].item.mu held.
   193  func stackpoolalloc(order uint8) gclinkptr {
   194  	list := &stackpool[order].item.span
   195  	s := list.first
   196  	lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   197  	if s == nil {
   198  		// no free stacks. Allocate another span worth.
   199  		s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
   200  		if s == nil {
   201  			throw("out of memory")
   202  		}
   203  		if s.allocCount != 0 {
   204  			throw("bad allocCount")
   205  		}
   206  		if s.manualFreeList.ptr() != nil {
   207  			throw("bad manualFreeList")
   208  		}
   209  		osStackAlloc(s)
   210  		s.elemsize = fixedStack << order
   211  		for i := uintptr(0); i < _StackCacheSize; i += s.elemsize {
   212  			x := gclinkptr(s.base() + i)
   213  			x.ptr().next = s.manualFreeList
   214  			s.manualFreeList = x
   215  		}
   216  		list.insert(s)
   217  	}
   218  	x := s.manualFreeList
   219  	if x.ptr() == nil {
   220  		throw("span has no free stacks")
   221  	}
   222  	s.manualFreeList = x.ptr().next
   223  	s.allocCount++
   224  	if s.manualFreeList.ptr() == nil {
   225  		// all stacks in s are allocated.
   226  		list.remove(s)
   227  	}
   228  	return x
   229  }
   230  
   231  // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held.
   232  func stackpoolfree(x gclinkptr, order uint8) {
   233  	s := spanOfUnchecked(uintptr(x))
   234  	if s.state.get() != mSpanManual {
   235  		throw("freeing stack not in a stack span")
   236  	}
   237  	if s.manualFreeList.ptr() == nil {
   238  		// s will now have a free stack
   239  		stackpool[order].item.span.insert(s)
   240  	}
   241  	x.ptr().next = s.manualFreeList
   242  	s.manualFreeList = x
   243  	s.allocCount--
   244  	if gcphase == _GCoff && s.allocCount == 0 {
   245  		// Span is completely free. Return it to the heap
   246  		// immediately if we're sweeping.
   247  		//
   248  		// If GC is active, we delay the free until the end of
   249  		// GC to avoid the following type of situation:
   250  		//
   251  		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
   252  		// 2) The stack that pointer points to is copied
   253  		// 3) The old stack is freed
   254  		// 4) The containing span is marked free
   255  		// 5) GC attempts to mark the SudoG.elem pointer. The
   256  		//    marking fails because the pointer looks like a
   257  		//    pointer into a free span.
   258  		//
   259  		// By not freeing, we prevent step #4 until GC is done.
   260  		stackpool[order].item.span.remove(s)
   261  		s.manualFreeList = 0
   262  		osStackFree(s)
   263  		mheap_.freeManual(s, spanAllocStack)
   264  	}
   265  }
   266  
   267  // stackcacherefill/stackcacherelease implement a global pool of stack segments.
   268  // The pool is required to prevent unlimited growth of per-thread caches.
   269  //
   270  //go:systemstack
   271  func stackcacherefill(c *mcache, order uint8) {
   272  	if stackDebug >= 1 {
   273  		print("stackcacherefill order=", order, "\n")
   274  	}
   275  
   276  	// Grab some stacks from the global cache.
   277  	// Grab half of the allowed capacity (to prevent thrashing).
   278  	var list gclinkptr
   279  	var size uintptr
   280  	lock(&stackpool[order].item.mu)
   281  	for size < _StackCacheSize/2 {
   282  		x := stackpoolalloc(order)
   283  		x.ptr().next = list
   284  		list = x
   285  		size += fixedStack << order
   286  	}
   287  	unlock(&stackpool[order].item.mu)
   288  	c.stackcache[order].list = list
   289  	c.stackcache[order].size = size
   290  }
   291  
   292  //go:systemstack
   293  func stackcacherelease(c *mcache, order uint8) {
   294  	if stackDebug >= 1 {
   295  		print("stackcacherelease order=", order, "\n")
   296  	}
   297  	x := c.stackcache[order].list
   298  	size := c.stackcache[order].size
   299  	lock(&stackpool[order].item.mu)
   300  	for size > _StackCacheSize/2 {
   301  		y := x.ptr().next
   302  		stackpoolfree(x, order)
   303  		x = y
   304  		size -= fixedStack << order
   305  	}
   306  	unlock(&stackpool[order].item.mu)
   307  	c.stackcache[order].list = x
   308  	c.stackcache[order].size = size
   309  }
   310  
   311  //go:systemstack
   312  func stackcache_clear(c *mcache) {
   313  	if stackDebug >= 1 {
   314  		print("stackcache clear\n")
   315  	}
   316  	for order := uint8(0); order < _NumStackOrders; order++ {
   317  		lock(&stackpool[order].item.mu)
   318  		x := c.stackcache[order].list
   319  		for x.ptr() != nil {
   320  			y := x.ptr().next
   321  			stackpoolfree(x, order)
   322  			x = y
   323  		}
   324  		c.stackcache[order].list = 0
   325  		c.stackcache[order].size = 0
   326  		unlock(&stackpool[order].item.mu)
   327  	}
   328  }
   329  
   330  // stackalloc allocates an n byte stack.
   331  //
   332  // stackalloc must run on the system stack because it uses per-P
   333  // resources and must not split the stack.
   334  //
   335  //go:systemstack
   336  func stackalloc(n uint32) stack {
   337  	// Stackalloc must be called on scheduler stack, so that we
   338  	// never try to grow the stack during the code that stackalloc runs.
   339  	// Doing so would cause a deadlock (issue 1547).
   340  	thisg := getg()
   341  	if thisg != thisg.m.g0 {
   342  		throw("stackalloc not on scheduler stack")
   343  	}
   344  	if n&(n-1) != 0 {
   345  		throw("stack size not a power of 2")
   346  	}
   347  	if stackDebug >= 1 {
   348  		print("stackalloc ", n, "\n")
   349  	}
   350  
   351  	if debug.efence != 0 || stackFromSystem != 0 {
   352  		n = uint32(alignUp(uintptr(n), physPageSize))
   353  		v := sysAlloc(uintptr(n), &memstats.stacks_sys)
   354  		if v == nil {
   355  			throw("out of memory (stackalloc)")
   356  		}
   357  		return stack{uintptr(v), uintptr(v) + uintptr(n)}
   358  	}
   359  
   360  	// Small stacks are allocated with a fixed-size free-list allocator.
   361  	// If we need a stack of a bigger size, we fall back on allocating
   362  	// a dedicated span.
   363  	var v unsafe.Pointer
   364  	if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
   365  		order := uint8(0)
   366  		n2 := n
   367  		for n2 > fixedStack {
   368  			order++
   369  			n2 >>= 1
   370  		}
   371  		var x gclinkptr
   372  		if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" {
   373  			// thisg.m.p == 0 can happen in the guts of exitsyscall
   374  			// or procresize. Just get a stack from the global pool.
   375  			// Also don't touch stackcache during gc
   376  			// as it's flushed concurrently.
   377  			lock(&stackpool[order].item.mu)
   378  			x = stackpoolalloc(order)
   379  			unlock(&stackpool[order].item.mu)
   380  		} else {
   381  			c := thisg.m.p.ptr().mcache
   382  			x = c.stackcache[order].list
   383  			if x.ptr() == nil {
   384  				stackcacherefill(c, order)
   385  				x = c.stackcache[order].list
   386  			}
   387  			c.stackcache[order].list = x.ptr().next
   388  			c.stackcache[order].size -= uintptr(n)
   389  		}
   390  		v = unsafe.Pointer(x)
   391  	} else {
   392  		var s *mspan
   393  		npage := uintptr(n) >> _PageShift
   394  		log2npage := stacklog2(npage)
   395  
   396  		// Try to get a stack from the large stack cache.
   397  		lock(&stackLarge.lock)
   398  		if !stackLarge.free[log2npage].isEmpty() {
   399  			s = stackLarge.free[log2npage].first
   400  			stackLarge.free[log2npage].remove(s)
   401  		}
   402  		unlock(&stackLarge.lock)
   403  
   404  		lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
   405  
   406  		if s == nil {
   407  			// Allocate a new stack from the heap.
   408  			s = mheap_.allocManual(npage, spanAllocStack)
   409  			if s == nil {
   410  				throw("out of memory")
   411  			}
   412  			osStackAlloc(s)
   413  			s.elemsize = uintptr(n)
   414  		}
   415  		v = unsafe.Pointer(s.base())
   416  	}
   417  
   418  	if raceenabled {
   419  		racemalloc(v, uintptr(n))
   420  	}
   421  	if msanenabled {
   422  		msanmalloc(v, uintptr(n))
   423  	}
   424  	if asanenabled {
   425  		asanunpoison(v, uintptr(n))
   426  	}
   427  	if stackDebug >= 1 {
   428  		print("  allocated ", v, "\n")
   429  	}
   430  	return stack{uintptr(v), uintptr(v) + uintptr(n)}
   431  }
   432  
   433  // stackfree frees an n byte stack allocation at stk.
   434  //
   435  // stackfree must run on the system stack because it uses per-P
   436  // resources and must not split the stack.
   437  //
   438  //go:systemstack
   439  func stackfree(stk stack) {
   440  	gp := getg()
   441  	v := unsafe.Pointer(stk.lo)
   442  	n := stk.hi - stk.lo
   443  	if n&(n-1) != 0 {
   444  		throw("stack not a power of 2")
   445  	}
   446  	if stk.lo+n < stk.hi {
   447  		throw("bad stack size")
   448  	}
   449  	if stackDebug >= 1 {
   450  		println("stackfree", v, n)
   451  		memclrNoHeapPointers(v, n) // for testing, clobber stack data
   452  	}
   453  	if debug.efence != 0 || stackFromSystem != 0 {
   454  		if debug.efence != 0 || stackFaultOnFree != 0 {
   455  			sysFault(v, n)
   456  		} else {
   457  			sysFree(v, n, &memstats.stacks_sys)
   458  		}
   459  		return
   460  	}
   461  	if msanenabled {
   462  		msanfree(v, n)
   463  	}
   464  	if asanenabled {
   465  		asanpoison(v, n)
   466  	}
   467  	if n < fixedStack<<_NumStackOrders && n < _StackCacheSize {
   468  		order := uint8(0)
   469  		n2 := n
   470  		for n2 > fixedStack {
   471  			order++
   472  			n2 >>= 1
   473  		}
   474  		x := gclinkptr(v)
   475  		if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" {
   476  			lock(&stackpool[order].item.mu)
   477  			stackpoolfree(x, order)
   478  			unlock(&stackpool[order].item.mu)
   479  		} else {
   480  			c := gp.m.p.ptr().mcache
   481  			if c.stackcache[order].size >= _StackCacheSize {
   482  				stackcacherelease(c, order)
   483  			}
   484  			x.ptr().next = c.stackcache[order].list
   485  			c.stackcache[order].list = x
   486  			c.stackcache[order].size += n
   487  		}
   488  	} else {
   489  		s := spanOfUnchecked(uintptr(v))
   490  		if s.state.get() != mSpanManual {
   491  			println(hex(s.base()), v)
   492  			throw("bad span state")
   493  		}
   494  		if gcphase == _GCoff {
   495  			// Free the stack immediately if we're
   496  			// sweeping.
   497  			osStackFree(s)
   498  			mheap_.freeManual(s, spanAllocStack)
   499  		} else {
   500  			// If the GC is running, we can't return a
   501  			// stack span to the heap because it could be
   502  			// reused as a heap span, and this state
   503  			// change would race with GC. Add it to the
   504  			// large stack cache instead.
   505  			log2npage := stacklog2(s.npages)
   506  			lock(&stackLarge.lock)
   507  			stackLarge.free[log2npage].insert(s)
   508  			unlock(&stackLarge.lock)
   509  		}
   510  	}
   511  }
   512  
   513  var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
   514  
   515  var maxstackceiling = maxstacksize
   516  
   517  var ptrnames = []string{
   518  	0: "scalar",
   519  	1: "ptr",
   520  }
   521  
   522  // Stack frame layout
   523  //
   524  // (x86)
   525  // +------------------+
   526  // | args from caller |
   527  // +------------------+ <- frame->argp
   528  // |  return address  |
   529  // +------------------+
   530  // |  caller's BP (*) | (*) if framepointer_enabled && varp > sp
   531  // +------------------+ <- frame->varp
   532  // |     locals       |
   533  // +------------------+
   534  // |  args to callee  |
   535  // +------------------+ <- frame->sp
   536  //
   537  // (arm)
   538  // +------------------+
   539  // | args from caller |
   540  // +------------------+ <- frame->argp
   541  // | caller's retaddr |
   542  // +------------------+
   543  // |  caller's FP (*) | (*) on ARM64, if framepointer_enabled && varp > sp
   544  // +------------------+ <- frame->varp
   545  // |     locals       |
   546  // +------------------+
   547  // |  args to callee  |
   548  // +------------------+
   549  // |  return address  |
   550  // +------------------+ <- frame->sp
   551  //
   552  // varp > sp means that the function has a frame;
   553  // varp == sp means frameless function.
   554  
   555  type adjustinfo struct {
   556  	old   stack
   557  	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
   558  	cache pcvalueCache
   559  
   560  	// sghi is the highest sudog.elem on the stack.
   561  	sghi uintptr
   562  }
   563  
   564  // adjustpointer checks whether *vpp is in the old stack described by adjinfo.
   565  // If so, it rewrites *vpp to point into the new stack.
   566  func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
   567  	pp := (*uintptr)(vpp)
   568  	p := *pp
   569  	if stackDebug >= 4 {
   570  		print("        ", pp, ":", hex(p), "\n")
   571  	}
   572  	if adjinfo.old.lo <= p && p < adjinfo.old.hi {
   573  		*pp = p + adjinfo.delta
   574  		if stackDebug >= 3 {
   575  			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n")
   576  		}
   577  	}
   578  }
   579  
   580  // Information from the compiler about the layout of stack frames.
   581  // Note: this type must agree with reflect.bitVector.
   582  type bitvector struct {
   583  	n        int32 // # of bits
   584  	bytedata *uint8
   585  }
   586  
   587  // ptrbit returns the i'th bit in bv.
   588  // ptrbit is less efficient than iterating directly over bitvector bits,
   589  // and should only be used in non-performance-critical code.
   590  // See adjustpointers for an example of a high-efficiency walk of a bitvector.
   591  func (bv *bitvector) ptrbit(i uintptr) uint8 {
   592  	b := *(addb(bv.bytedata, i/8))
   593  	return (b >> (i % 8)) & 1
   594  }
   595  
   596  // bv describes the memory starting at address scanp.
   597  // Adjust any pointers contained therein.
   598  func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) {
   599  	minp := adjinfo.old.lo
   600  	maxp := adjinfo.old.hi
   601  	delta := adjinfo.delta
   602  	num := uintptr(bv.n)
   603  	// If this frame might contain channel receive slots, use CAS
   604  	// to adjust pointers. If the slot hasn't been received into
   605  	// yet, it may contain stack pointers and a concurrent send
   606  	// could race with adjusting those pointers. (The sent value
   607  	// itself can never contain stack pointers.)
   608  	useCAS := uintptr(scanp) < adjinfo.sghi
   609  	for i := uintptr(0); i < num; i += 8 {
   610  		if stackDebug >= 4 {
   611  			for j := uintptr(0); j < 8; j++ {
   612  				print("        ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n")
   613  			}
   614  		}
   615  		b := *(addb(bv.bytedata, i/8))
   616  		for b != 0 {
   617  			j := uintptr(sys.TrailingZeros8(b))
   618  			b &= b - 1
   619  			pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize))
   620  		retry:
   621  			p := *pp
   622  			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 {
   623  				// Looks like a junk value in a pointer slot.
   624  				// Live analysis wrong?
   625  				getg().m.traceback = 2
   626  				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
   627  				throw("invalid pointer found on stack")
   628  			}
   629  			if minp <= p && p < maxp {
   630  				if stackDebug >= 3 {
   631  					print("adjust ptr ", hex(p), " ", funcname(f), "\n")
   632  				}
   633  				if useCAS {
   634  					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp))
   635  					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) {
   636  						goto retry
   637  					}
   638  				} else {
   639  					*pp = p + delta
   640  				}
   641  			}
   642  		}
   643  	}
   644  }
   645  
   646  // Note: the argument/return area is adjusted by the callee.
   647  func adjustframe(frame *stkframe, adjinfo *adjustinfo) {
   648  	if frame.continpc == 0 {
   649  		// Frame is dead.
   650  		return
   651  	}
   652  	f := frame.fn
   653  	if stackDebug >= 2 {
   654  		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
   655  	}
   656  
   657  	// Adjust saved frame pointer if there is one.
   658  	if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize {
   659  		if stackDebug >= 3 {
   660  			print("      saved bp\n")
   661  		}
   662  		if debugCheckBP {
   663  			// Frame pointers should always point to the next higher frame on
   664  			// the Go stack (or be nil, for the top frame on the stack).
   665  			bp := *(*uintptr)(unsafe.Pointer(frame.varp))
   666  			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   667  				println("runtime: found invalid frame pointer")
   668  				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   669  				throw("bad frame pointer")
   670  			}
   671  		}
   672  		// On AMD64, this is the caller's frame pointer saved in the current
   673  		// frame.
   674  		// On ARM64, this is the frame pointer of the caller's caller saved
   675  		// by the caller in its frame (one word below its SP).
   676  		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
   677  	}
   678  
   679  	locals, args, objs := frame.getStackMap(&adjinfo.cache, true)
   680  
   681  	// Adjust local variables if stack frame has been allocated.
   682  	if locals.n > 0 {
   683  		size := uintptr(locals.n) * goarch.PtrSize
   684  		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f)
   685  	}
   686  
   687  	// Adjust arguments.
   688  	if args.n > 0 {
   689  		if stackDebug >= 3 {
   690  			print("      args\n")
   691  		}
   692  		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{})
   693  	}
   694  
   695  	// Adjust pointers in all stack objects (whether they are live or not).
   696  	// See comments in mgcmark.go:scanframeworker.
   697  	if frame.varp != 0 {
   698  		for i := range objs {
   699  			obj := &objs[i]
   700  			off := obj.off
   701  			base := frame.varp // locals base pointer
   702  			if off >= 0 {
   703  				base = frame.argp // arguments and return values base pointer
   704  			}
   705  			p := base + uintptr(off)
   706  			if p < frame.sp {
   707  				// Object hasn't been allocated in the frame yet.
   708  				// (Happens when the stack bounds check fails and
   709  				// we call into morestack.)
   710  				continue
   711  			}
   712  			ptrdata := obj.ptrdata()
   713  			gcdata := obj.gcdata()
   714  			var s *mspan
   715  			if obj.useGCProg() {
   716  				// See comments in mgcmark.go:scanstack
   717  				s = materializeGCProg(ptrdata, gcdata)
   718  				gcdata = (*byte)(unsafe.Pointer(s.startAddr))
   719  			}
   720  			for i := uintptr(0); i < ptrdata; i += goarch.PtrSize {
   721  				if *addb(gcdata, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 {
   722  					adjustpointer(adjinfo, unsafe.Pointer(p+i))
   723  				}
   724  			}
   725  			if s != nil {
   726  				dematerializeGCProg(s)
   727  			}
   728  		}
   729  	}
   730  }
   731  
   732  func adjustctxt(gp *g, adjinfo *adjustinfo) {
   733  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
   734  	if !framepointer_enabled {
   735  		return
   736  	}
   737  	if debugCheckBP {
   738  		bp := gp.sched.bp
   739  		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) {
   740  			println("runtime: found invalid top frame pointer")
   741  			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n")
   742  			throw("bad top frame pointer")
   743  		}
   744  	}
   745  	oldfp := gp.sched.bp
   746  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))
   747  	if GOARCH == "arm64" {
   748  		// On ARM64, the frame pointer is saved one word *below* the SP,
   749  		// which is not copied or adjusted in any frame. Do it explicitly
   750  		// here.
   751  		if oldfp == gp.sched.sp-goarch.PtrSize {
   752  			memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize)
   753  			adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))
   754  		}
   755  	}
   756  }
   757  
   758  func adjustdefers(gp *g, adjinfo *adjustinfo) {
   759  	// Adjust pointers in the Defer structs.
   760  	// We need to do this first because we need to adjust the
   761  	// defer.link fields so we always work on the new stack.
   762  	adjustpointer(adjinfo, unsafe.Pointer(&gp._defer))
   763  	for d := gp._defer; d != nil; d = d.link {
   764  		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
   765  		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
   766  		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
   767  		adjustpointer(adjinfo, unsafe.Pointer(&d.link))
   768  		adjustpointer(adjinfo, unsafe.Pointer(&d.varp))
   769  		adjustpointer(adjinfo, unsafe.Pointer(&d.fd))
   770  	}
   771  }
   772  
   773  func adjustpanics(gp *g, adjinfo *adjustinfo) {
   774  	// Panics are on stack and already adjusted.
   775  	// Update pointer to head of list in G.
   776  	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
   777  }
   778  
   779  func adjustsudogs(gp *g, adjinfo *adjustinfo) {
   780  	// the data elements pointed to by a SudoG structure
   781  	// might be in the stack.
   782  	for s := gp.waiting; s != nil; s = s.waitlink {
   783  		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
   784  	}
   785  }
   786  
   787  func fillstack(stk stack, b byte) {
   788  	for p := stk.lo; p < stk.hi; p++ {
   789  		*(*byte)(unsafe.Pointer(p)) = b
   790  	}
   791  }
   792  
   793  func findsghi(gp *g, stk stack) uintptr {
   794  	var sghi uintptr
   795  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   796  		p := uintptr(sg.elem) + uintptr(sg.c.elemsize)
   797  		if stk.lo <= p && p < stk.hi && p > sghi {
   798  			sghi = p
   799  		}
   800  	}
   801  	return sghi
   802  }
   803  
   804  // syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
   805  // stack they refer to while synchronizing with concurrent channel
   806  // operations. It returns the number of bytes of stack copied.
   807  func syncadjustsudogs(gp *g, used uintptr, adjinfo *adjustinfo) uintptr {
   808  	if gp.waiting == nil {
   809  		return 0
   810  	}
   811  
   812  	// Lock channels to prevent concurrent send/receive.
   813  	var lastc *hchan
   814  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   815  		if sg.c != lastc {
   816  			// There is a ranking cycle here between gscan bit and
   817  			// hchan locks. Normally, we only allow acquiring hchan
   818  			// locks and then getting a gscan bit. In this case, we
   819  			// already have the gscan bit. We allow acquiring hchan
   820  			// locks here as a special case, since a deadlock can't
   821  			// happen because the G involved must already be
   822  			// suspended. So, we get a special hchan lock rank here
   823  			// that is lower than gscan, but doesn't allow acquiring
   824  			// any other locks other than hchan.
   825  			lockWithRank(&sg.c.lock, lockRankHchanLeaf)
   826  		}
   827  		lastc = sg.c
   828  	}
   829  
   830  	// Adjust sudogs.
   831  	adjustsudogs(gp, adjinfo)
   832  
   833  	// Copy the part of the stack the sudogs point in to
   834  	// while holding the lock to prevent races on
   835  	// send/receive slots.
   836  	var sgsize uintptr
   837  	if adjinfo.sghi != 0 {
   838  		oldBot := adjinfo.old.hi - used
   839  		newBot := oldBot + adjinfo.delta
   840  		sgsize = adjinfo.sghi - oldBot
   841  		memmove(unsafe.Pointer(newBot), unsafe.Pointer(oldBot), sgsize)
   842  	}
   843  
   844  	// Unlock channels.
   845  	lastc = nil
   846  	for sg := gp.waiting; sg != nil; sg = sg.waitlink {
   847  		if sg.c != lastc {
   848  			unlock(&sg.c.lock)
   849  		}
   850  		lastc = sg.c
   851  	}
   852  
   853  	return sgsize
   854  }
   855  
   856  // Copies gp's stack to a new stack of a different size.
   857  // Caller must have changed gp status to Gcopystack.
   858  func copystack(gp *g, newsize uintptr) {
   859  	if gp.syscallsp != 0 {
   860  		throw("stack growth not allowed in system call")
   861  	}
   862  	old := gp.stack
   863  	if old.lo == 0 {
   864  		throw("nil stackbase")
   865  	}
   866  	used := old.hi - gp.sched.sp
   867  	// Add just the difference to gcController.addScannableStack.
   868  	// g0 stacks never move, so this will never account for them.
   869  	// It's also fine if we have no P, addScannableStack can deal with
   870  	// that case.
   871  	gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))
   872  
   873  	// allocate new stack
   874  	new := stackalloc(uint32(newsize))
   875  	if stackPoisonCopy != 0 {
   876  		fillstack(new, 0xfd)
   877  	}
   878  	if stackDebug >= 1 {
   879  		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
   880  	}
   881  
   882  	// Compute adjustment.
   883  	var adjinfo adjustinfo
   884  	adjinfo.old = old
   885  	adjinfo.delta = new.hi - old.hi
   886  
   887  	// Adjust sudogs, synchronizing with channel ops if necessary.
   888  	ncopy := used
   889  	if !gp.activeStackChans {
   890  		if newsize < old.hi-old.lo && gp.parkingOnChan.Load() {
   891  			// It's not safe for someone to shrink this stack while we're actively
   892  			// parking on a channel, but it is safe to grow since we do that
   893  			// ourselves and explicitly don't want to synchronize with channels
   894  			// since we could self-deadlock.
   895  			throw("racy sudog adjustment due to parking on channel")
   896  		}
   897  		adjustsudogs(gp, &adjinfo)
   898  	} else {
   899  		// sudogs may be pointing in to the stack and gp has
   900  		// released channel locks, so other goroutines could
   901  		// be writing to gp's stack. Find the highest such
   902  		// pointer so we can handle everything there and below
   903  		// carefully. (This shouldn't be far from the bottom
   904  		// of the stack, so there's little cost in handling
   905  		// everything below it carefully.)
   906  		adjinfo.sghi = findsghi(gp, old)
   907  
   908  		// Synchronize with channel ops and copy the part of
   909  		// the stack they may interact with.
   910  		ncopy -= syncadjustsudogs(gp, used, &adjinfo)
   911  	}
   912  
   913  	// Copy the stack (or the rest of it) to the new location
   914  	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy)
   915  
   916  	// Adjust remaining structures that have pointers into stacks.
   917  	// We have to do most of these before we traceback the new
   918  	// stack because gentraceback uses them.
   919  	adjustctxt(gp, &adjinfo)
   920  	adjustdefers(gp, &adjinfo)
   921  	adjustpanics(gp, &adjinfo)
   922  	if adjinfo.sghi != 0 {
   923  		adjinfo.sghi += adjinfo.delta
   924  	}
   925  
   926  	// Swap out old stack for new one
   927  	gp.stack = new
   928  	gp.stackguard0 = new.lo + stackGuard // NOTE: might clobber a preempt request
   929  	gp.sched.sp = new.hi - used
   930  	gp.stktopsp += adjinfo.delta
   931  
   932  	// Adjust pointers in the new stack.
   933  	var u unwinder
   934  	for u.init(gp, 0); u.valid(); u.next() {
   935  		adjustframe(&u.frame, &adjinfo)
   936  	}
   937  
   938  	// free old stack
   939  	if stackPoisonCopy != 0 {
   940  		fillstack(old, 0xfc)
   941  	}
   942  	stackfree(old)
   943  }
   944  
   945  // round x up to a power of 2.
   946  func round2(x int32) int32 {
   947  	s := uint(0)
   948  	for 1<<s < x {
   949  		s++
   950  	}
   951  	return 1 << s
   952  }
   953  
   954  // Called from runtime·morestack when more stack is needed.
   955  // Allocate larger stack and relocate to new stack.
   956  // Stack growth is multiplicative, for constant amortized cost.
   957  //
   958  // g->atomicstatus will be Grunning or Gscanrunning upon entry.
   959  // If the scheduler is trying to stop this g, then it will set preemptStop.
   960  //
   961  // This must be nowritebarrierrec because it can be called as part of
   962  // stack growth from other nowritebarrierrec functions, but the
   963  // compiler doesn't check this.
   964  //
   965  //go:nowritebarrierrec
   966  func newstack() {
   967  	thisg := getg()
   968  	// TODO: double check all gp. shouldn't be getg().
   969  	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
   970  		throw("stack growth after fork")
   971  	}
   972  	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
   973  		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
   974  		morebuf := thisg.m.morebuf
   975  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
   976  		throw("runtime: wrong goroutine in newstack")
   977  	}
   978  
   979  	gp := thisg.m.curg
   980  
   981  	if thisg.m.curg.throwsplit {
   982  		// Update syscallsp, syscallpc in case traceback uses them.
   983  		morebuf := thisg.m.morebuf
   984  		gp.syscallsp = morebuf.sp
   985  		gp.syscallpc = morebuf.pc
   986  		pcname, pcoff := "(unknown)", uintptr(0)
   987  		f := findfunc(gp.sched.pc)
   988  		if f.valid() {
   989  			pcname = funcname(f)
   990  			pcoff = gp.sched.pc - f.entry()
   991  		}
   992  		print("runtime: newstack at ", pcname, "+", hex(pcoff),
   993  			" sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   994  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   995  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   996  
   997  		thisg.m.traceback = 2 // Include runtime frames
   998  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
   999  		throw("runtime: stack split at bad time")
  1000  	}
  1001  
  1002  	morebuf := thisg.m.morebuf
  1003  	thisg.m.morebuf.pc = 0
  1004  	thisg.m.morebuf.lr = 0
  1005  	thisg.m.morebuf.sp = 0
  1006  	thisg.m.morebuf.g = 0
  1007  
  1008  	// NOTE: stackguard0 may change underfoot, if another thread
  1009  	// is about to try to preempt gp. Read it just once and use that same
  1010  	// value now and below.
  1011  	stackguard0 := atomic.Loaduintptr(&gp.stackguard0)
  1012  
  1013  	// Be conservative about where we preempt.
  1014  	// We are interested in preempting user Go code, not runtime code.
  1015  	// If we're holding locks, mallocing, or preemption is disabled, don't
  1016  	// preempt.
  1017  	// This check is very early in newstack so that even the status change
  1018  	// from Grunning to Gwaiting and back doesn't happen in this case.
  1019  	// That status change by itself can be viewed as a small preemption,
  1020  	// because the GC might change Gwaiting to Gscanwaiting, and then
  1021  	// this goroutine has to wait for the GC to finish before continuing.
  1022  	// If the GC is in some way dependent on this goroutine (for example,
  1023  	// it needs a lock held by the goroutine), that small preemption turns
  1024  	// into a real deadlock.
  1025  	preempt := stackguard0 == stackPreempt
  1026  	if preempt {
  1027  		if !canPreemptM(thisg.m) {
  1028  			// Let the goroutine keep running for now.
  1029  			// gp->preempt is set, so it will be preempted next time.
  1030  			gp.stackguard0 = gp.stack.lo + stackGuard
  1031  			gogo(&gp.sched) // never return
  1032  		}
  1033  	}
  1034  
  1035  	if gp.stack.lo == 0 {
  1036  		throw("missing stack in newstack")
  1037  	}
  1038  	sp := gp.sched.sp
  1039  	if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM {
  1040  		// The call to morestack cost a word.
  1041  		sp -= goarch.PtrSize
  1042  	}
  1043  	if stackDebug >= 1 || sp < gp.stack.lo {
  1044  		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
  1045  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
  1046  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
  1047  	}
  1048  	if sp < gp.stack.lo {
  1049  		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ")
  1050  		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
  1051  		throw("runtime: split stack overflow")
  1052  	}
  1053  
  1054  	if preempt {
  1055  		if gp == thisg.m.g0 {
  1056  			throw("runtime: preempt g0")
  1057  		}
  1058  		if thisg.m.p == 0 && thisg.m.locks == 0 {
  1059  			throw("runtime: g is running but p is not")
  1060  		}
  1061  
  1062  		if gp.preemptShrink {
  1063  			// We're at a synchronous safe point now, so
  1064  			// do the pending stack shrink.
  1065  			gp.preemptShrink = false
  1066  			shrinkstack(gp)
  1067  		}
  1068  
  1069  		if gp.preemptStop {
  1070  			preemptPark(gp) // never returns
  1071  		}
  1072  
  1073  		// Act like goroutine called runtime.Gosched.
  1074  		gopreempt_m(gp) // never return
  1075  	}
  1076  
  1077  	// Allocate a bigger segment and move the stack.
  1078  	oldsize := gp.stack.hi - gp.stack.lo
  1079  	newsize := oldsize * 2
  1080  
  1081  	// Make sure we grow at least as much as needed to fit the new frame.
  1082  	// (This is just an optimization - the caller of morestack will
  1083  	// recheck the bounds on return.)
  1084  	if f := findfunc(gp.sched.pc); f.valid() {
  1085  		max := uintptr(funcMaxSPDelta(f))
  1086  		needed := max + stackGuard
  1087  		used := gp.stack.hi - gp.sched.sp
  1088  		for newsize-used < needed {
  1089  			newsize *= 2
  1090  		}
  1091  	}
  1092  
  1093  	if stackguard0 == stackForceMove {
  1094  		// Forced stack movement used for debugging.
  1095  		// Don't double the stack (or we may quickly run out
  1096  		// if this is done repeatedly).
  1097  		newsize = oldsize
  1098  	}
  1099  
  1100  	if newsize > maxstacksize || newsize > maxstackceiling {
  1101  		if maxstacksize < maxstackceiling {
  1102  			print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
  1103  		} else {
  1104  			print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n")
  1105  		}
  1106  		print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
  1107  		throw("stack overflow")
  1108  	}
  1109  
  1110  	// The goroutine must be executing in order to call newstack,
  1111  	// so it must be Grunning (or Gscanrunning).
  1112  	casgstatus(gp, _Grunning, _Gcopystack)
  1113  
  1114  	// The concurrent GC will not scan the stack while we are doing the copy since
  1115  	// the gp is in a Gcopystack status.
  1116  	copystack(gp, newsize)
  1117  	if stackDebug >= 1 {
  1118  		print("stack grow done\n")
  1119  	}
  1120  	casgstatus(gp, _Gcopystack, _Grunning)
  1121  	gogo(&gp.sched)
  1122  }
  1123  
  1124  //go:nosplit
  1125  func nilfunc() {
  1126  	*(*uint8)(nil) = 0
  1127  }
  1128  
  1129  // adjust Gobuf as if it executed a call to fn
  1130  // and then stopped before the first instruction in fn.
  1131  func gostartcallfn(gobuf *gobuf, fv *funcval) {
  1132  	var fn unsafe.Pointer
  1133  	if fv != nil {
  1134  		fn = unsafe.Pointer(fv.fn)
  1135  	} else {
  1136  		fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc))
  1137  	}
  1138  	gostartcall(gobuf, fn, unsafe.Pointer(fv))
  1139  }
  1140  
  1141  // isShrinkStackSafe returns whether it's safe to attempt to shrink
  1142  // gp's stack. Shrinking the stack is only safe when we have precise
  1143  // pointer maps for all frames on the stack.
  1144  func isShrinkStackSafe(gp *g) bool {
  1145  	// We can't copy the stack if we're in a syscall.
  1146  	// The syscall might have pointers into the stack and
  1147  	// often we don't have precise pointer maps for the innermost
  1148  	// frames.
  1149  	//
  1150  	// We also can't copy the stack if we're at an asynchronous
  1151  	// safe-point because we don't have precise pointer maps for
  1152  	// all frames.
  1153  	//
  1154  	// We also can't *shrink* the stack in the window between the
  1155  	// goroutine calling gopark to park on a channel and
  1156  	// gp.activeStackChans being set.
  1157  	return gp.syscallsp == 0 && !gp.asyncSafePoint && !gp.parkingOnChan.Load()
  1158  }
  1159  
  1160  // Maybe shrink the stack being used by gp.
  1161  //
  1162  // gp must be stopped and we must own its stack. It may be in
  1163  // _Grunning, but only if this is our own user G.
  1164  func shrinkstack(gp *g) {
  1165  	if gp.stack.lo == 0 {
  1166  		throw("missing stack in shrinkstack")
  1167  	}
  1168  	if s := readgstatus(gp); s&_Gscan == 0 {
  1169  		// We don't own the stack via _Gscan. We could still
  1170  		// own it if this is our own user G and we're on the
  1171  		// system stack.
  1172  		if !(gp == getg().m.curg && getg() != getg().m.curg && s == _Grunning) {
  1173  			// We don't own the stack.
  1174  			throw("bad status in shrinkstack")
  1175  		}
  1176  	}
  1177  	if !isShrinkStackSafe(gp) {
  1178  		throw("shrinkstack at bad time")
  1179  	}
  1180  	// Check for self-shrinks while in a libcall. These may have
  1181  	// pointers into the stack disguised as uintptrs, but these
  1182  	// code paths should all be nosplit.
  1183  	if gp == getg().m.curg && gp.m.libcallsp != 0 {
  1184  		throw("shrinking stack in libcall")
  1185  	}
  1186  
  1187  	if debug.gcshrinkstackoff > 0 {
  1188  		return
  1189  	}
  1190  	f := findfunc(gp.startpc)
  1191  	if f.valid() && f.funcID == abi.FuncID_gcBgMarkWorker {
  1192  		// We're not allowed to shrink the gcBgMarkWorker
  1193  		// stack (see gcBgMarkWorker for explanation).
  1194  		return
  1195  	}
  1196  
  1197  	oldsize := gp.stack.hi - gp.stack.lo
  1198  	newsize := oldsize / 2
  1199  	// Don't shrink the allocation below the minimum-sized stack
  1200  	// allocation.
  1201  	if newsize < fixedStack {
  1202  		return
  1203  	}
  1204  	// Compute how much of the stack is currently in use and only
  1205  	// shrink the stack if gp is using less than a quarter of its
  1206  	// current stack. The currently used stack includes everything
  1207  	// down to the SP plus the stack guard space that ensures
  1208  	// there's room for nosplit functions.
  1209  	avail := gp.stack.hi - gp.stack.lo
  1210  	if used := gp.stack.hi - gp.sched.sp + stackNosplit; used >= avail/4 {
  1211  		return
  1212  	}
  1213  
  1214  	if stackDebug > 0 {
  1215  		print("shrinking stack ", oldsize, "->", newsize, "\n")
  1216  	}
  1217  
  1218  	copystack(gp, newsize)
  1219  }
  1220  
  1221  // freeStackSpans frees unused stack spans at the end of GC.
  1222  func freeStackSpans() {
  1223  	// Scan stack pools for empty stack spans.
  1224  	for order := range stackpool {
  1225  		lock(&stackpool[order].item.mu)
  1226  		list := &stackpool[order].item.span
  1227  		for s := list.first; s != nil; {
  1228  			next := s.next
  1229  			if s.allocCount == 0 {
  1230  				list.remove(s)
  1231  				s.manualFreeList = 0
  1232  				osStackFree(s)
  1233  				mheap_.freeManual(s, spanAllocStack)
  1234  			}
  1235  			s = next
  1236  		}
  1237  		unlock(&stackpool[order].item.mu)
  1238  	}
  1239  
  1240  	// Free large stack spans.
  1241  	lock(&stackLarge.lock)
  1242  	for i := range stackLarge.free {
  1243  		for s := stackLarge.free[i].first; s != nil; {
  1244  			next := s.next
  1245  			stackLarge.free[i].remove(s)
  1246  			osStackFree(s)
  1247  			mheap_.freeManual(s, spanAllocStack)
  1248  			s = next
  1249  		}
  1250  	}
  1251  	unlock(&stackLarge.lock)
  1252  }
  1253  
  1254  // A stackObjectRecord is generated by the compiler for each stack object in a stack frame.
  1255  // This record must match the generator code in cmd/compile/internal/liveness/plive.go:emitStackObjects.
  1256  type stackObjectRecord struct {
  1257  	// offset in frame
  1258  	// if negative, offset from varp
  1259  	// if non-negative, offset from argp
  1260  	off       int32
  1261  	size      int32
  1262  	_ptrdata  int32  // ptrdata, or -ptrdata is GC prog is used
  1263  	gcdataoff uint32 // offset to gcdata from moduledata.rodata
  1264  }
  1265  
  1266  func (r *stackObjectRecord) useGCProg() bool {
  1267  	return r._ptrdata < 0
  1268  }
  1269  
  1270  func (r *stackObjectRecord) ptrdata() uintptr {
  1271  	x := r._ptrdata
  1272  	if x < 0 {
  1273  		return uintptr(-x)
  1274  	}
  1275  	return uintptr(x)
  1276  }
  1277  
  1278  // gcdata returns pointer map or GC prog of the type.
  1279  func (r *stackObjectRecord) gcdata() *byte {
  1280  	ptr := uintptr(unsafe.Pointer(r))
  1281  	var mod *moduledata
  1282  	for datap := &firstmoduledata; datap != nil; datap = datap.next {
  1283  		if datap.gofunc <= ptr && ptr < datap.end {
  1284  			mod = datap
  1285  			break
  1286  		}
  1287  	}
  1288  	// If you get a panic here due to a nil mod,
  1289  	// you may have made a copy of a stackObjectRecord.
  1290  	// You must use the original pointer.
  1291  	res := mod.rodata + uintptr(r.gcdataoff)
  1292  	return (*byte)(unsafe.Pointer(res))
  1293  }
  1294  
  1295  // This is exported as ABI0 via linkname so obj can call it.
  1296  //
  1297  //go:nosplit
  1298  //go:linkname morestackc
  1299  func morestackc() {
  1300  	throw("attempt to execute system stack code on user stack")
  1301  }
  1302  
  1303  // startingStackSize is the amount of stack that new goroutines start with.
  1304  // It is a power of 2, and between _FixedStack and maxstacksize, inclusive.
  1305  // startingStackSize is updated every GC by tracking the average size of
  1306  // stacks scanned during the GC.
  1307  var startingStackSize uint32 = fixedStack
  1308  
  1309  func gcComputeStartingStackSize() {
  1310  	if debug.adaptivestackstart == 0 {
  1311  		return
  1312  	}
  1313  	// For details, see the design doc at
  1314  	// https://docs.google.com/document/d/1YDlGIdVTPnmUiTAavlZxBI1d9pwGQgZT7IKFKlIXohQ/edit?usp=sharing
  1315  	// The basic algorithm is to track the average size of stacks
  1316  	// and start goroutines with stack equal to that average size.
  1317  	// Starting at the average size uses at most 2x the space that
  1318  	// an ideal algorithm would have used.
  1319  	// This is just a heuristic to avoid excessive stack growth work
  1320  	// early in a goroutine's lifetime. See issue 18138. Stacks that
  1321  	// are allocated too small can still grow, and stacks allocated
  1322  	// too large can still shrink.
  1323  	var scannedStackSize uint64
  1324  	var scannedStacks uint64
  1325  	for _, p := range allp {
  1326  		scannedStackSize += p.scannedStackSize
  1327  		scannedStacks += p.scannedStacks
  1328  		// Reset for next time
  1329  		p.scannedStackSize = 0
  1330  		p.scannedStacks = 0
  1331  	}
  1332  	if scannedStacks == 0 {
  1333  		startingStackSize = fixedStack
  1334  		return
  1335  	}
  1336  	avg := scannedStackSize/scannedStacks + stackGuard
  1337  	// Note: we add stackGuard to ensure that a goroutine that
  1338  	// uses the average space will not trigger a growth.
  1339  	if avg > uint64(maxstacksize) {
  1340  		avg = uint64(maxstacksize)
  1341  	}
  1342  	if avg < fixedStack {
  1343  		avg = fixedStack
  1344  	}
  1345  	// Note: maxstacksize fits in 30 bits, so avg also does.
  1346  	startingStackSize = uint32(round2(int32(avg)))
  1347  }