github.com/mdempsky/go@v0.0.0-20151201204031-5dd372bd1e70/src/runtime/stack.go (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import (
     8  	"runtime/internal/atomic"
     9  	"runtime/internal/sys"
    10  	"unsafe"
    11  )
    12  
    13  /*
    14  Stack layout parameters.
    15  Included both by runtime (compiled via 6c) and linkers (compiled via gcc).
    16  
    17  The per-goroutine g->stackguard is set to point StackGuard bytes
    18  above the bottom of the stack.  Each function compares its stack
    19  pointer against g->stackguard to check for overflow.  To cut one
    20  instruction from the check sequence for functions with tiny frames,
    21  the stack is allowed to protrude StackSmall bytes below the stack
    22  guard.  Functions with large frames don't bother with the check and
    23  always call morestack.  The sequences are (for amd64, others are
    24  similar):
    25  
    26  	guard = g->stackguard
    27  	frame = function's stack frame size
    28  	argsize = size of function arguments (call + return)
    29  
    30  	stack frame size <= StackSmall:
    31  		CMPQ guard, SP
    32  		JHI 3(PC)
    33  		MOVQ m->morearg, $(argsize << 32)
    34  		CALL morestack(SB)
    35  
    36  	stack frame size > StackSmall but < StackBig
    37  		LEAQ (frame-StackSmall)(SP), R0
    38  		CMPQ guard, R0
    39  		JHI 3(PC)
    40  		MOVQ m->morearg, $(argsize << 32)
    41  		CALL morestack(SB)
    42  
    43  	stack frame size >= StackBig:
    44  		MOVQ m->morearg, $((argsize << 32) | frame)
    45  		CALL morestack(SB)
    46  
    47  The bottom StackGuard - StackSmall bytes are important: there has
    48  to be enough room to execute functions that refuse to check for
    49  stack overflow, either because they need to be adjacent to the
    50  actual caller's frame (deferproc) or because they handle the imminent
    51  stack overflow (morestack).
    52  
    53  For example, deferproc might call malloc, which does one of the
    54  above checks (without allocating a full frame), which might trigger
    55  a call to morestack.  This sequence needs to fit in the bottom
    56  section of the stack.  On amd64, morestack's frame is 40 bytes, and
    57  deferproc's frame is 56 bytes.  That fits well within the
    58  StackGuard - StackSmall bytes at the bottom.
    59  The linkers explore all possible call traces involving non-splitting
    60  functions to make sure that this limit cannot be violated.
    61  */
    62  
    63  const (
    64  	// StackSystem is a number of additional bytes to add
    65  	// to each stack below the usual guard area for OS-specific
    66  	// purposes like signal handling. Used on Windows, Plan 9,
    67  	// and Darwin/ARM because they do not use a separate stack.
    68  	_StackSystem = sys.GoosWindows*512*sys.PtrSize + sys.GoosPlan9*512 + sys.GoosDarwin*sys.GoarchArm*1024
    69  
    70  	// The minimum size of stack used by Go code
    71  	_StackMin = 2048
    72  
    73  	// The minimum stack size to allocate.
    74  	// The hackery here rounds FixedStack0 up to a power of 2.
    75  	_FixedStack0 = _StackMin + _StackSystem
    76  	_FixedStack1 = _FixedStack0 - 1
    77  	_FixedStack2 = _FixedStack1 | (_FixedStack1 >> 1)
    78  	_FixedStack3 = _FixedStack2 | (_FixedStack2 >> 2)
    79  	_FixedStack4 = _FixedStack3 | (_FixedStack3 >> 4)
    80  	_FixedStack5 = _FixedStack4 | (_FixedStack4 >> 8)
    81  	_FixedStack6 = _FixedStack5 | (_FixedStack5 >> 16)
    82  	_FixedStack  = _FixedStack6 + 1
    83  
    84  	// Functions that need frames bigger than this use an extra
    85  	// instruction to do the stack split check, to avoid overflow
    86  	// in case SP - framesize wraps below zero.
    87  	// This value can be no bigger than the size of the unmapped
    88  	// space at zero.
    89  	_StackBig = 4096
    90  
    91  	// The stack guard is a pointer this many bytes above the
    92  	// bottom of the stack.
    93  	_StackGuard = 720*sys.StackGuardMultiplier + _StackSystem
    94  
    95  	// After a stack split check the SP is allowed to be this
    96  	// many bytes below the stack guard.  This saves an instruction
    97  	// in the checking sequence for tiny frames.
    98  	_StackSmall = 128
    99  
   100  	// The maximum number of bytes that a chain of NOSPLIT
   101  	// functions can use.
   102  	_StackLimit = _StackGuard - _StackSystem - _StackSmall
   103  )
   104  
   105  // Goroutine preemption request.
   106  // Stored into g->stackguard0 to cause split stack check failure.
   107  // Must be greater than any real sp.
   108  // 0xfffffade in hex.
   109  const (
   110  	_StackPreempt = uintptrMask & -1314
   111  	_StackFork    = uintptrMask & -1234
   112  )
   113  
   114  const (
   115  	// stackDebug == 0: no logging
   116  	//            == 1: logging of per-stack operations
   117  	//            == 2: logging of per-frame operations
   118  	//            == 3: logging of per-word updates
   119  	//            == 4: logging of per-word reads
   120  	stackDebug       = 0
   121  	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
   122  	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
   123  	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
   124  
   125  	stackCache = 1
   126  )
   127  
   128  const (
   129  	uintptrMask = 1<<(8*sys.PtrSize) - 1
   130  	poisonStack = uintptrMask & 0x6868686868686868
   131  
   132  	// Goroutine preemption request.
   133  	// Stored into g->stackguard0 to cause split stack check failure.
   134  	// Must be greater than any real sp.
   135  	// 0xfffffade in hex.
   136  	stackPreempt = uintptrMask & -1314
   137  
   138  	// Thread is forking.
   139  	// Stored into g->stackguard0 to cause split stack check failure.
   140  	// Must be greater than any real sp.
   141  	stackFork = uintptrMask & -1234
   142  )
   143  
   144  // Global pool of spans that have free stacks.
   145  // Stacks are assigned an order according to size.
   146  //     order = log_2(size/FixedStack)
   147  // There is a free list for each order.
   148  // TODO: one lock per order?
   149  var stackpool [_NumStackOrders]mSpanList
   150  var stackpoolmu mutex
   151  
   152  // List of stack spans to be freed at the end of GC. Protected by
   153  // stackpoolmu.
   154  var stackFreeQueue mSpanList
   155  
   156  // Cached value of haveexperiment("framepointer")
   157  var framepointer_enabled bool
   158  
   159  func stackinit() {
   160  	if _StackCacheSize&_PageMask != 0 {
   161  		throw("cache size must be a multiple of page size")
   162  	}
   163  	for i := range stackpool {
   164  		stackpool[i].init()
   165  	}
   166  	stackFreeQueue.init()
   167  }
   168  
   169  // Allocates a stack from the free pool.  Must be called with
   170  // stackpoolmu held.
   171  func stackpoolalloc(order uint8) gclinkptr {
   172  	list := &stackpool[order]
   173  	s := list.first
   174  	if s == nil {
   175  		// no free stacks.  Allocate another span worth.
   176  		s = mheap_.allocStack(_StackCacheSize >> _PageShift)
   177  		if s == nil {
   178  			throw("out of memory")
   179  		}
   180  		if s.ref != 0 {
   181  			throw("bad ref")
   182  		}
   183  		if s.freelist.ptr() != nil {
   184  			throw("bad freelist")
   185  		}
   186  		for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
   187  			x := gclinkptr(uintptr(s.start)<<_PageShift + i)
   188  			x.ptr().next = s.freelist
   189  			s.freelist = x
   190  		}
   191  		list.insert(s)
   192  	}
   193  	x := s.freelist
   194  	if x.ptr() == nil {
   195  		throw("span has no free stacks")
   196  	}
   197  	s.freelist = x.ptr().next
   198  	s.ref++
   199  	if s.freelist.ptr() == nil {
   200  		// all stacks in s are allocated.
   201  		list.remove(s)
   202  	}
   203  	return x
   204  }
   205  
   206  // Adds stack x to the free pool.  Must be called with stackpoolmu held.
   207  func stackpoolfree(x gclinkptr, order uint8) {
   208  	s := mheap_.lookup(unsafe.Pointer(x))
   209  	if s.state != _MSpanStack {
   210  		throw("freeing stack not in a stack span")
   211  	}
   212  	if s.freelist.ptr() == nil {
   213  		// s will now have a free stack
   214  		stackpool[order].insert(s)
   215  	}
   216  	x.ptr().next = s.freelist
   217  	s.freelist = x
   218  	s.ref--
   219  	if gcphase == _GCoff && s.ref == 0 {
   220  		// Span is completely free. Return it to the heap
   221  		// immediately if we're sweeping.
   222  		//
   223  		// If GC is active, we delay the free until the end of
   224  		// GC to avoid the following type of situation:
   225  		//
   226  		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
   227  		// 2) The stack that pointer points to is copied
   228  		// 3) The old stack is freed
   229  		// 4) The containing span is marked free
   230  		// 5) GC attempts to mark the SudoG.elem pointer. The
   231  		//    marking fails because the pointer looks like a
   232  		//    pointer into a free span.
   233  		//
   234  		// By not freeing, we prevent step #4 until GC is done.
   235  		stackpool[order].remove(s)
   236  		s.freelist = 0
   237  		mheap_.freeStack(s)
   238  	}
   239  }
   240  
   241  // stackcacherefill/stackcacherelease implement a global pool of stack segments.
   242  // The pool is required to prevent unlimited growth of per-thread caches.
   243  func stackcacherefill(c *mcache, order uint8) {
   244  	if stackDebug >= 1 {
   245  		print("stackcacherefill order=", order, "\n")
   246  	}
   247  
   248  	// Grab some stacks from the global cache.
   249  	// Grab half of the allowed capacity (to prevent thrashing).
   250  	var list gclinkptr
   251  	var size uintptr
   252  	lock(&stackpoolmu)
   253  	for size < _StackCacheSize/2 {
   254  		x := stackpoolalloc(order)
   255  		x.ptr().next = list
   256  		list = x
   257  		size += _FixedStack << order
   258  	}
   259  	unlock(&stackpoolmu)
   260  	c.stackcache[order].list = list
   261  	c.stackcache[order].size = size
   262  }
   263  
   264  func stackcacherelease(c *mcache, order uint8) {
   265  	if stackDebug >= 1 {
   266  		print("stackcacherelease order=", order, "\n")
   267  	}
   268  	x := c.stackcache[order].list
   269  	size := c.stackcache[order].size
   270  	lock(&stackpoolmu)
   271  	for size > _StackCacheSize/2 {
   272  		y := x.ptr().next
   273  		stackpoolfree(x, order)
   274  		x = y
   275  		size -= _FixedStack << order
   276  	}
   277  	unlock(&stackpoolmu)
   278  	c.stackcache[order].list = x
   279  	c.stackcache[order].size = size
   280  }
   281  
   282  func stackcache_clear(c *mcache) {
   283  	if stackDebug >= 1 {
   284  		print("stackcache clear\n")
   285  	}
   286  	lock(&stackpoolmu)
   287  	for order := uint8(0); order < _NumStackOrders; order++ {
   288  		x := c.stackcache[order].list
   289  		for x.ptr() != nil {
   290  			y := x.ptr().next
   291  			stackpoolfree(x, order)
   292  			x = y
   293  		}
   294  		c.stackcache[order].list = 0
   295  		c.stackcache[order].size = 0
   296  	}
   297  	unlock(&stackpoolmu)
   298  }
   299  
   300  func stackalloc(n uint32) (stack, []stkbar) {
   301  	// Stackalloc must be called on scheduler stack, so that we
   302  	// never try to grow the stack during the code that stackalloc runs.
   303  	// Doing so would cause a deadlock (issue 1547).
   304  	thisg := getg()
   305  	if thisg != thisg.m.g0 {
   306  		throw("stackalloc not on scheduler stack")
   307  	}
   308  	if n&(n-1) != 0 {
   309  		throw("stack size not a power of 2")
   310  	}
   311  	if stackDebug >= 1 {
   312  		print("stackalloc ", n, "\n")
   313  	}
   314  
   315  	// Compute the size of stack barrier array.
   316  	maxstkbar := gcMaxStackBarriers(int(n))
   317  	nstkbar := unsafe.Sizeof(stkbar{}) * uintptr(maxstkbar)
   318  
   319  	if debug.efence != 0 || stackFromSystem != 0 {
   320  		v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
   321  		if v == nil {
   322  			throw("out of memory (stackalloc)")
   323  		}
   324  		top := uintptr(n) - nstkbar
   325  		stkbarSlice := slice{add(v, top), 0, maxstkbar}
   326  		return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
   327  	}
   328  
   329  	// Small stacks are allocated with a fixed-size free-list allocator.
   330  	// If we need a stack of a bigger size, we fall back on allocating
   331  	// a dedicated span.
   332  	var v unsafe.Pointer
   333  	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   334  		order := uint8(0)
   335  		n2 := n
   336  		for n2 > _FixedStack {
   337  			order++
   338  			n2 >>= 1
   339  		}
   340  		var x gclinkptr
   341  		c := thisg.m.mcache
   342  		if c == nil || thisg.m.preemptoff != "" || thisg.m.helpgc != 0 {
   343  			// c == nil can happen in the guts of exitsyscall or
   344  			// procresize. Just get a stack from the global pool.
   345  			// Also don't touch stackcache during gc
   346  			// as it's flushed concurrently.
   347  			lock(&stackpoolmu)
   348  			x = stackpoolalloc(order)
   349  			unlock(&stackpoolmu)
   350  		} else {
   351  			x = c.stackcache[order].list
   352  			if x.ptr() == nil {
   353  				stackcacherefill(c, order)
   354  				x = c.stackcache[order].list
   355  			}
   356  			c.stackcache[order].list = x.ptr().next
   357  			c.stackcache[order].size -= uintptr(n)
   358  		}
   359  		v = unsafe.Pointer(x)
   360  	} else {
   361  		s := mheap_.allocStack(round(uintptr(n), _PageSize) >> _PageShift)
   362  		if s == nil {
   363  			throw("out of memory")
   364  		}
   365  		v = unsafe.Pointer(s.start << _PageShift)
   366  	}
   367  
   368  	if raceenabled {
   369  		racemalloc(v, uintptr(n))
   370  	}
   371  	if msanenabled {
   372  		msanmalloc(v, uintptr(n))
   373  	}
   374  	if stackDebug >= 1 {
   375  		print("  allocated ", v, "\n")
   376  	}
   377  	top := uintptr(n) - nstkbar
   378  	stkbarSlice := slice{add(v, top), 0, maxstkbar}
   379  	return stack{uintptr(v), uintptr(v) + top}, *(*[]stkbar)(unsafe.Pointer(&stkbarSlice))
   380  }
   381  
   382  func stackfree(stk stack, n uintptr) {
   383  	gp := getg()
   384  	v := unsafe.Pointer(stk.lo)
   385  	if n&(n-1) != 0 {
   386  		throw("stack not a power of 2")
   387  	}
   388  	if stk.lo+n < stk.hi {
   389  		throw("bad stack size")
   390  	}
   391  	if stackDebug >= 1 {
   392  		println("stackfree", v, n)
   393  		memclr(v, n) // for testing, clobber stack data
   394  	}
   395  	if debug.efence != 0 || stackFromSystem != 0 {
   396  		if debug.efence != 0 || stackFaultOnFree != 0 {
   397  			sysFault(v, n)
   398  		} else {
   399  			sysFree(v, n, &memstats.stacks_sys)
   400  		}
   401  		return
   402  	}
   403  	if msanenabled {
   404  		msanfree(v, n)
   405  	}
   406  	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   407  		order := uint8(0)
   408  		n2 := n
   409  		for n2 > _FixedStack {
   410  			order++
   411  			n2 >>= 1
   412  		}
   413  		x := gclinkptr(v)
   414  		c := gp.m.mcache
   415  		if c == nil || gp.m.preemptoff != "" || gp.m.helpgc != 0 {
   416  			lock(&stackpoolmu)
   417  			stackpoolfree(x, order)
   418  			unlock(&stackpoolmu)
   419  		} else {
   420  			if c.stackcache[order].size >= _StackCacheSize {
   421  				stackcacherelease(c, order)
   422  			}
   423  			x.ptr().next = c.stackcache[order].list
   424  			c.stackcache[order].list = x
   425  			c.stackcache[order].size += n
   426  		}
   427  	} else {
   428  		s := mheap_.lookup(v)
   429  		if s.state != _MSpanStack {
   430  			println(hex(s.start<<_PageShift), v)
   431  			throw("bad span state")
   432  		}
   433  		if gcphase == _GCoff {
   434  			// Free the stack immediately if we're
   435  			// sweeping.
   436  			mheap_.freeStack(s)
   437  		} else {
   438  			// Otherwise, add it to a list of stack spans
   439  			// to be freed at the end of GC.
   440  			//
   441  			// TODO(austin): Make it possible to re-use
   442  			// these spans as stacks, like we do for small
   443  			// stack spans. (See issue #11466.)
   444  			lock(&stackpoolmu)
   445  			stackFreeQueue.insert(s)
   446  			unlock(&stackpoolmu)
   447  		}
   448  	}
   449  }
   450  
   451  var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
   452  
   453  var ptrnames = []string{
   454  	0: "scalar",
   455  	1: "ptr",
   456  }
   457  
   458  // Stack frame layout
   459  //
   460  // (x86)
   461  // +------------------+
   462  // | args from caller |
   463  // +------------------+ <- frame->argp
   464  // |  return address  |
   465  // +------------------+
   466  // |  caller's BP (*) | (*) if framepointer_enabled && varp < sp
   467  // +------------------+ <- frame->varp
   468  // |     locals       |
   469  // +------------------+
   470  // |  args to callee  |
   471  // +------------------+ <- frame->sp
   472  //
   473  // (arm)
   474  // +------------------+
   475  // | args from caller |
   476  // +------------------+ <- frame->argp
   477  // | caller's retaddr |
   478  // +------------------+ <- frame->varp
   479  // |     locals       |
   480  // +------------------+
   481  // |  args to callee  |
   482  // +------------------+
   483  // |  return address  |
   484  // +------------------+ <- frame->sp
   485  
   486  type adjustinfo struct {
   487  	old   stack
   488  	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
   489  	cache pcvalueCache
   490  }
   491  
   492  // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
   493  // If so, it rewrites *vpp to point into the new stack.
   494  func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
   495  	pp := (*unsafe.Pointer)(vpp)
   496  	p := *pp
   497  	if stackDebug >= 4 {
   498  		print("        ", pp, ":", p, "\n")
   499  	}
   500  	if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
   501  		*pp = add(p, adjinfo.delta)
   502  		if stackDebug >= 3 {
   503  			print("        adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
   504  		}
   505  	}
   506  }
   507  
   508  // Information from the compiler about the layout of stack frames.
   509  type bitvector struct {
   510  	n        int32 // # of bits
   511  	bytedata *uint8
   512  }
   513  
   514  type gobitvector struct {
   515  	n        uintptr
   516  	bytedata []uint8
   517  }
   518  
   519  func gobv(bv bitvector) gobitvector {
   520  	return gobitvector{
   521  		uintptr(bv.n),
   522  		(*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
   523  	}
   524  }
   525  
   526  func ptrbit(bv *gobitvector, i uintptr) uint8 {
   527  	return (bv.bytedata[i/8] >> (i % 8)) & 1
   528  }
   529  
   530  // bv describes the memory starting at address scanp.
   531  // Adjust any pointers contained therein.
   532  func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
   533  	bv := gobv(*cbv)
   534  	minp := adjinfo.old.lo
   535  	maxp := adjinfo.old.hi
   536  	delta := adjinfo.delta
   537  	num := uintptr(bv.n)
   538  	for i := uintptr(0); i < num; i++ {
   539  		if stackDebug >= 4 {
   540  			print("        ", add(scanp, i*sys.PtrSize), ":", ptrnames[ptrbit(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*sys.PtrSize))), " # ", i, " ", bv.bytedata[i/8], "\n")
   541  		}
   542  		if ptrbit(&bv, i) == 1 {
   543  			pp := (*uintptr)(add(scanp, i*sys.PtrSize))
   544  			p := *pp
   545  			if f != nil && 0 < p && p < _PageSize && debug.invalidptr != 0 || p == poisonStack {
   546  				// Looks like a junk value in a pointer slot.
   547  				// Live analysis wrong?
   548  				getg().m.traceback = 2
   549  				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n")
   550  				throw("invalid stack pointer")
   551  			}
   552  			if minp <= p && p < maxp {
   553  				if stackDebug >= 3 {
   554  					print("adjust ptr ", p, " ", funcname(f), "\n")
   555  				}
   556  				*pp = p + delta
   557  			}
   558  		}
   559  	}
   560  }
   561  
   562  // Note: the argument/return area is adjusted by the callee.
   563  func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
   564  	adjinfo := (*adjustinfo)(arg)
   565  	targetpc := frame.continpc
   566  	if targetpc == 0 {
   567  		// Frame is dead.
   568  		return true
   569  	}
   570  	f := frame.fn
   571  	if stackDebug >= 2 {
   572  		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
   573  	}
   574  	if f.entry == systemstack_switchPC {
   575  		// A special routine at the bottom of stack of a goroutine that does an systemstack call.
   576  		// We will allow it to be copied even though we don't
   577  		// have full GC info for it (because it is written in asm).
   578  		return true
   579  	}
   580  	if targetpc != f.entry {
   581  		targetpc--
   582  	}
   583  	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc, &adjinfo.cache)
   584  	if pcdata == -1 {
   585  		pcdata = 0 // in prologue
   586  	}
   587  
   588  	// Adjust local variables if stack frame has been allocated.
   589  	size := frame.varp - frame.sp
   590  	var minsize uintptr
   591  	switch sys.TheChar {
   592  	case '7':
   593  		minsize = sys.SpAlign
   594  	default:
   595  		minsize = sys.MinFrameSize
   596  	}
   597  	if size > minsize {
   598  		var bv bitvector
   599  		stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
   600  		if stackmap == nil || stackmap.n <= 0 {
   601  			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
   602  			throw("missing stackmap")
   603  		}
   604  		// Locals bitmap information, scan just the pointers in locals.
   605  		if pcdata < 0 || pcdata >= stackmap.n {
   606  			// don't know where we are
   607  			print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
   608  			throw("bad symbol table")
   609  		}
   610  		bv = stackmapdata(stackmap, pcdata)
   611  		size = uintptr(bv.n) * sys.PtrSize
   612  		if stackDebug >= 3 {
   613  			print("      locals ", pcdata, "/", stackmap.n, " ", size/sys.PtrSize, " words ", bv.bytedata, "\n")
   614  		}
   615  		adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
   616  	}
   617  
   618  	// Adjust saved base pointer if there is one.
   619  	if sys.TheChar == '6' && frame.argp-frame.varp == 2*sys.RegSize {
   620  		if !framepointer_enabled {
   621  			print("runtime: found space for saved base pointer, but no framepointer experiment\n")
   622  			print("argp=", hex(frame.argp), " varp=", hex(frame.varp), "\n")
   623  			throw("bad frame layout")
   624  		}
   625  		if stackDebug >= 3 {
   626  			print("      saved bp\n")
   627  		}
   628  		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))
   629  	}
   630  
   631  	// Adjust arguments.
   632  	if frame.arglen > 0 {
   633  		var bv bitvector
   634  		if frame.argmap != nil {
   635  			bv = *frame.argmap
   636  		} else {
   637  			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   638  			if stackmap == nil || stackmap.n <= 0 {
   639  				print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
   640  				throw("missing stackmap")
   641  			}
   642  			if pcdata < 0 || pcdata >= stackmap.n {
   643  				// don't know where we are
   644  				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
   645  				throw("bad symbol table")
   646  			}
   647  			bv = stackmapdata(stackmap, pcdata)
   648  		}
   649  		if stackDebug >= 3 {
   650  			print("      args\n")
   651  		}
   652  		adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
   653  	}
   654  	return true
   655  }
   656  
   657  func adjustctxt(gp *g, adjinfo *adjustinfo) {
   658  	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))
   659  }
   660  
   661  func adjustdefers(gp *g, adjinfo *adjustinfo) {
   662  	// Adjust defer argument blocks the same way we adjust active stack frames.
   663  	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
   664  
   665  	// Adjust pointers in the Defer structs.
   666  	// Defer structs themselves are never on the stack.
   667  	for d := gp._defer; d != nil; d = d.link {
   668  		adjustpointer(adjinfo, unsafe.Pointer(&d.fn))
   669  		adjustpointer(adjinfo, unsafe.Pointer(&d.sp))
   670  		adjustpointer(adjinfo, unsafe.Pointer(&d._panic))
   671  	}
   672  }
   673  
   674  func adjustpanics(gp *g, adjinfo *adjustinfo) {
   675  	// Panics are on stack and already adjusted.
   676  	// Update pointer to head of list in G.
   677  	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))
   678  }
   679  
   680  func adjustsudogs(gp *g, adjinfo *adjustinfo) {
   681  	// the data elements pointed to by a SudoG structure
   682  	// might be in the stack.
   683  	for s := gp.waiting; s != nil; s = s.waitlink {
   684  		adjustpointer(adjinfo, unsafe.Pointer(&s.elem))
   685  		adjustpointer(adjinfo, unsafe.Pointer(&s.selectdone))
   686  	}
   687  }
   688  
   689  func adjuststkbar(gp *g, adjinfo *adjustinfo) {
   690  	for i := int(gp.stkbarPos); i < len(gp.stkbar); i++ {
   691  		adjustpointer(adjinfo, unsafe.Pointer(&gp.stkbar[i].savedLRPtr))
   692  	}
   693  }
   694  
   695  func fillstack(stk stack, b byte) {
   696  	for p := stk.lo; p < stk.hi; p++ {
   697  		*(*byte)(unsafe.Pointer(p)) = b
   698  	}
   699  }
   700  
   701  // Copies gp's stack to a new stack of a different size.
   702  // Caller must have changed gp status to Gcopystack.
   703  func copystack(gp *g, newsize uintptr) {
   704  	if gp.syscallsp != 0 {
   705  		throw("stack growth not allowed in system call")
   706  	}
   707  	old := gp.stack
   708  	if old.lo == 0 {
   709  		throw("nil stackbase")
   710  	}
   711  	used := old.hi - gp.sched.sp
   712  
   713  	// allocate new stack
   714  	new, newstkbar := stackalloc(uint32(newsize))
   715  	if stackPoisonCopy != 0 {
   716  		fillstack(new, 0xfd)
   717  	}
   718  	if stackDebug >= 1 {
   719  		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", gp.stackAlloc, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
   720  	}
   721  
   722  	// Disallow sigprof scans of this stack and block if there's
   723  	// one in progress.
   724  	gcLockStackBarriers(gp)
   725  
   726  	// adjust pointers in the to-be-copied frames
   727  	var adjinfo adjustinfo
   728  	adjinfo.old = old
   729  	adjinfo.delta = new.hi - old.hi
   730  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
   731  
   732  	// adjust other miscellaneous things that have pointers into stacks.
   733  	adjustctxt(gp, &adjinfo)
   734  	adjustdefers(gp, &adjinfo)
   735  	adjustpanics(gp, &adjinfo)
   736  	adjustsudogs(gp, &adjinfo)
   737  	adjuststkbar(gp, &adjinfo)
   738  
   739  	// copy the stack to the new location
   740  	if stackPoisonCopy != 0 {
   741  		fillstack(new, 0xfb)
   742  	}
   743  	memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
   744  
   745  	// copy old stack barriers to new stack barrier array
   746  	newstkbar = newstkbar[:len(gp.stkbar)]
   747  	copy(newstkbar, gp.stkbar)
   748  
   749  	// Swap out old stack for new one
   750  	gp.stack = new
   751  	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
   752  	gp.sched.sp = new.hi - used
   753  	oldsize := gp.stackAlloc
   754  	gp.stackAlloc = newsize
   755  	gp.stkbar = newstkbar
   756  	gp.stktopsp += adjinfo.delta
   757  
   758  	gcUnlockStackBarriers(gp)
   759  
   760  	// free old stack
   761  	if stackPoisonCopy != 0 {
   762  		fillstack(old, 0xfc)
   763  	}
   764  	stackfree(old, oldsize)
   765  }
   766  
   767  // round x up to a power of 2.
   768  func round2(x int32) int32 {
   769  	s := uint(0)
   770  	for 1<<s < x {
   771  		s++
   772  	}
   773  	return 1 << s
   774  }
   775  
   776  // Called from runtime·morestack when more stack is needed.
   777  // Allocate larger stack and relocate to new stack.
   778  // Stack growth is multiplicative, for constant amortized cost.
   779  //
   780  // g->atomicstatus will be Grunning or Gscanrunning upon entry.
   781  // If the GC is trying to stop this g then it will set preemptscan to true.
   782  func newstack() {
   783  	thisg := getg()
   784  	// TODO: double check all gp. shouldn't be getg().
   785  	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork {
   786  		throw("stack growth after fork")
   787  	}
   788  	if thisg.m.morebuf.g.ptr() != thisg.m.curg {
   789  		print("runtime: newstack called from g=", hex(thisg.m.morebuf.g), "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
   790  		morebuf := thisg.m.morebuf
   791  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g.ptr())
   792  		throw("runtime: wrong goroutine in newstack")
   793  	}
   794  	if thisg.m.curg.throwsplit {
   795  		gp := thisg.m.curg
   796  		// Update syscallsp, syscallpc in case traceback uses them.
   797  		morebuf := thisg.m.morebuf
   798  		gp.syscallsp = morebuf.sp
   799  		gp.syscallpc = morebuf.pc
   800  		print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   801  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   802  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   803  
   804  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, gp)
   805  		throw("runtime: stack split at bad time")
   806  	}
   807  
   808  	gp := thisg.m.curg
   809  	morebuf := thisg.m.morebuf
   810  	thisg.m.morebuf.pc = 0
   811  	thisg.m.morebuf.lr = 0
   812  	thisg.m.morebuf.sp = 0
   813  	thisg.m.morebuf.g = 0
   814  	rewindmorestack(&gp.sched)
   815  
   816  	// NOTE: stackguard0 may change underfoot, if another thread
   817  	// is about to try to preempt gp. Read it just once and use that same
   818  	// value now and below.
   819  	preempt := atomic.Loaduintptr(&gp.stackguard0) == stackPreempt
   820  
   821  	// Be conservative about where we preempt.
   822  	// We are interested in preempting user Go code, not runtime code.
   823  	// If we're holding locks, mallocing, or preemption is disabled, don't
   824  	// preempt.
   825  	// This check is very early in newstack so that even the status change
   826  	// from Grunning to Gwaiting and back doesn't happen in this case.
   827  	// That status change by itself can be viewed as a small preemption,
   828  	// because the GC might change Gwaiting to Gscanwaiting, and then
   829  	// this goroutine has to wait for the GC to finish before continuing.
   830  	// If the GC is in some way dependent on this goroutine (for example,
   831  	// it needs a lock held by the goroutine), that small preemption turns
   832  	// into a real deadlock.
   833  	if preempt {
   834  		if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.preemptoff != "" || thisg.m.p.ptr().status != _Prunning {
   835  			// Let the goroutine keep running for now.
   836  			// gp->preempt is set, so it will be preempted next time.
   837  			gp.stackguard0 = gp.stack.lo + _StackGuard
   838  			gogo(&gp.sched) // never return
   839  		}
   840  	}
   841  
   842  	// The goroutine must be executing in order to call newstack,
   843  	// so it must be Grunning (or Gscanrunning).
   844  	casgstatus(gp, _Grunning, _Gwaiting)
   845  	gp.waitreason = "stack growth"
   846  
   847  	if gp.stack.lo == 0 {
   848  		throw("missing stack in newstack")
   849  	}
   850  	sp := gp.sched.sp
   851  	if sys.TheChar == '6' || sys.TheChar == '8' {
   852  		// The call to morestack cost a word.
   853  		sp -= sys.PtrSize
   854  	}
   855  	if stackDebug >= 1 || sp < gp.stack.lo {
   856  		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   857  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   858  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   859  	}
   860  	if sp < gp.stack.lo {
   861  		print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
   862  		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
   863  		throw("runtime: split stack overflow")
   864  	}
   865  
   866  	if gp.sched.ctxt != nil {
   867  		// morestack wrote sched.ctxt on its way in here,
   868  		// without a write barrier. Run the write barrier now.
   869  		// It is not possible to be preempted between then
   870  		// and now, so it's okay.
   871  		writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
   872  	}
   873  
   874  	if preempt {
   875  		if gp == thisg.m.g0 {
   876  			throw("runtime: preempt g0")
   877  		}
   878  		if thisg.m.p == 0 && thisg.m.locks == 0 {
   879  			throw("runtime: g is running but p is not")
   880  		}
   881  		if gp.preemptscan {
   882  			for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
   883  				// Likely to be racing with the GC as
   884  				// it sees a _Gwaiting and does the
   885  				// stack scan. If so, gcworkdone will
   886  				// be set and gcphasework will simply
   887  				// return.
   888  			}
   889  			if !gp.gcscandone {
   890  				scanstack(gp)
   891  				gp.gcscandone = true
   892  			}
   893  			gp.preemptscan = false
   894  			gp.preempt = false
   895  			casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
   896  			casgstatus(gp, _Gwaiting, _Grunning)
   897  			gp.stackguard0 = gp.stack.lo + _StackGuard
   898  			gogo(&gp.sched) // never return
   899  		}
   900  
   901  		// Act like goroutine called runtime.Gosched.
   902  		casgstatus(gp, _Gwaiting, _Grunning)
   903  		gopreempt_m(gp) // never return
   904  	}
   905  
   906  	// Allocate a bigger segment and move the stack.
   907  	oldsize := int(gp.stackAlloc)
   908  	newsize := oldsize * 2
   909  	if uintptr(newsize) > maxstacksize {
   910  		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
   911  		throw("stack overflow")
   912  	}
   913  
   914  	casgstatus(gp, _Gwaiting, _Gcopystack)
   915  
   916  	// The concurrent GC will not scan the stack while we are doing the copy since
   917  	// the gp is in a Gcopystack status.
   918  	copystack(gp, uintptr(newsize))
   919  	if stackDebug >= 1 {
   920  		print("stack grow done\n")
   921  	}
   922  	casgstatus(gp, _Gcopystack, _Grunning)
   923  	gogo(&gp.sched)
   924  }
   925  
   926  //go:nosplit
   927  func nilfunc() {
   928  	*(*uint8)(nil) = 0
   929  }
   930  
   931  // adjust Gobuf as if it executed a call to fn
   932  // and then did an immediate gosave.
   933  func gostartcallfn(gobuf *gobuf, fv *funcval) {
   934  	var fn unsafe.Pointer
   935  	if fv != nil {
   936  		fn = unsafe.Pointer(fv.fn)
   937  	} else {
   938  		fn = unsafe.Pointer(funcPC(nilfunc))
   939  	}
   940  	gostartcall(gobuf, fn, unsafe.Pointer(fv))
   941  }
   942  
   943  // Maybe shrink the stack being used by gp.
   944  // Called at garbage collection time.
   945  func shrinkstack(gp *g) {
   946  	if readgstatus(gp) == _Gdead {
   947  		if gp.stack.lo != 0 {
   948  			// Free whole stack - it will get reallocated
   949  			// if G is used again.
   950  			stackfree(gp.stack, gp.stackAlloc)
   951  			gp.stack.lo = 0
   952  			gp.stack.hi = 0
   953  			gp.stkbar = nil
   954  			gp.stkbarPos = 0
   955  		}
   956  		return
   957  	}
   958  	if gp.stack.lo == 0 {
   959  		throw("missing stack in shrinkstack")
   960  	}
   961  
   962  	if debug.gcshrinkstackoff > 0 {
   963  		return
   964  	}
   965  
   966  	oldsize := gp.stackAlloc
   967  	newsize := oldsize / 2
   968  	// Don't shrink the allocation below the minimum-sized stack
   969  	// allocation.
   970  	if newsize < _FixedStack {
   971  		return
   972  	}
   973  	// Compute how much of the stack is currently in use and only
   974  	// shrink the stack if gp is using less than a quarter of its
   975  	// current stack. The currently used stack includes everything
   976  	// down to the SP plus the stack guard space that ensures
   977  	// there's room for nosplit functions.
   978  	avail := gp.stack.hi - gp.stack.lo
   979  	if used := gp.stack.hi - gp.sched.sp + _StackLimit; used >= avail/4 {
   980  		return
   981  	}
   982  
   983  	// We can't copy the stack if we're in a syscall.
   984  	// The syscall might have pointers into the stack.
   985  	if gp.syscallsp != 0 {
   986  		return
   987  	}
   988  	if sys.GoosWindows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
   989  		return
   990  	}
   991  
   992  	if stackDebug > 0 {
   993  		print("shrinking stack ", oldsize, "->", newsize, "\n")
   994  	}
   995  
   996  	oldstatus := casgcopystack(gp)
   997  	copystack(gp, newsize)
   998  	casgstatus(gp, _Gcopystack, oldstatus)
   999  }
  1000  
  1001  // freeStackSpans frees unused stack spans at the end of GC.
  1002  func freeStackSpans() {
  1003  	lock(&stackpoolmu)
  1004  
  1005  	// Scan stack pools for empty stack spans.
  1006  	for order := range stackpool {
  1007  		list := &stackpool[order]
  1008  		for s := list.first; s != nil; {
  1009  			next := s.next
  1010  			if s.ref == 0 {
  1011  				list.remove(s)
  1012  				s.freelist = 0
  1013  				mheap_.freeStack(s)
  1014  			}
  1015  			s = next
  1016  		}
  1017  	}
  1018  
  1019  	// Free queued stack spans.
  1020  	for !stackFreeQueue.isEmpty() {
  1021  		s := stackFreeQueue.first
  1022  		stackFreeQueue.remove(s)
  1023  		mheap_.freeStack(s)
  1024  	}
  1025  
  1026  	unlock(&stackpoolmu)
  1027  }
  1028  
  1029  //go:nosplit
  1030  func morestackc() {
  1031  	systemstack(func() {
  1032  		throw("attempt to execute C code on Go stack")
  1033  	})
  1034  }