github.com/tcnksm/go@v0.0.0-20141208075154-439b32936367/src/runtime/stack1.go (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package runtime
     6  
     7  import "unsafe"
     8  
     9  const (
    10  	// StackDebug == 0: no logging
    11  	//            == 1: logging of per-stack operations
    12  	//            == 2: logging of per-frame operations
    13  	//            == 3: logging of per-word updates
    14  	//            == 4: logging of per-word reads
    15  	stackDebug       = 0
    16  	stackFromSystem  = 0 // allocate stacks from system memory instead of the heap
    17  	stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
    18  	stackPoisonCopy  = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
    19  
    20  	stackCache = 1
    21  )
    22  
    23  const (
    24  	uintptrMask = 1<<(8*ptrSize) - 1
    25  	poisonGC    = uintptrMask & 0xf969696969696969
    26  	poisonStack = uintptrMask & 0x6868686868686868
    27  
    28  	// Goroutine preemption request.
    29  	// Stored into g->stackguard0 to cause split stack check failure.
    30  	// Must be greater than any real sp.
    31  	// 0xfffffade in hex.
    32  	stackPreempt = uintptrMask & -1314
    33  
    34  	// Thread is forking.
    35  	// Stored into g->stackguard0 to cause split stack check failure.
    36  	// Must be greater than any real sp.
    37  	stackFork = uintptrMask & -1234
    38  )
    39  
    40  // Global pool of spans that have free stacks.
    41  // Stacks are assigned an order according to size.
    42  //     order = log_2(size/FixedStack)
    43  // There is a free list for each order.
    44  // TODO: one lock per order?
    45  var stackpool [_NumStackOrders]mspan
    46  var stackpoolmu mutex
    47  
    48  var stackfreequeue stack
    49  
    50  func stackinit() {
    51  	if _StackCacheSize&_PageMask != 0 {
    52  		gothrow("cache size must be a multiple of page size")
    53  	}
    54  	for i := range stackpool {
    55  		mSpanList_Init(&stackpool[i])
    56  	}
    57  }
    58  
    59  // Allocates a stack from the free pool.  Must be called with
    60  // stackpoolmu held.
    61  func stackpoolalloc(order uint8) gclinkptr {
    62  	list := &stackpool[order]
    63  	s := list.next
    64  	if s == list {
    65  		// no free stacks.  Allocate another span worth.
    66  		s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
    67  		if s == nil {
    68  			gothrow("out of memory")
    69  		}
    70  		if s.ref != 0 {
    71  			gothrow("bad ref")
    72  		}
    73  		if s.freelist.ptr() != nil {
    74  			gothrow("bad freelist")
    75  		}
    76  		for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
    77  			x := gclinkptr(uintptr(s.start)<<_PageShift + i)
    78  			x.ptr().next = s.freelist
    79  			s.freelist = x
    80  		}
    81  		mSpanList_Insert(list, s)
    82  	}
    83  	x := s.freelist
    84  	if x.ptr() == nil {
    85  		gothrow("span has no free stacks")
    86  	}
    87  	s.freelist = x.ptr().next
    88  	s.ref++
    89  	if s.freelist.ptr() == nil {
    90  		// all stacks in s are allocated.
    91  		mSpanList_Remove(s)
    92  	}
    93  	return x
    94  }
    95  
    96  // Adds stack x to the free pool.  Must be called with stackpoolmu held.
    97  func stackpoolfree(x gclinkptr, order uint8) {
    98  	s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
    99  	if s.state != _MSpanStack {
   100  		gothrow("freeing stack not in a stack span")
   101  	}
   102  	if s.freelist.ptr() == nil {
   103  		// s will now have a free stack
   104  		mSpanList_Insert(&stackpool[order], s)
   105  	}
   106  	x.ptr().next = s.freelist
   107  	s.freelist = x
   108  	s.ref--
   109  	if s.ref == 0 {
   110  		// span is completely free - return to heap
   111  		mSpanList_Remove(s)
   112  		s.freelist = 0
   113  		mHeap_FreeStack(&mheap_, s)
   114  	}
   115  }
   116  
   117  // stackcacherefill/stackcacherelease implement a global pool of stack segments.
   118  // The pool is required to prevent unlimited growth of per-thread caches.
   119  func stackcacherefill(c *mcache, order uint8) {
   120  	if stackDebug >= 1 {
   121  		print("stackcacherefill order=", order, "\n")
   122  	}
   123  
   124  	// Grab some stacks from the global cache.
   125  	// Grab half of the allowed capacity (to prevent thrashing).
   126  	var list gclinkptr
   127  	var size uintptr
   128  	lock(&stackpoolmu)
   129  	for size < _StackCacheSize/2 {
   130  		x := stackpoolalloc(order)
   131  		x.ptr().next = list
   132  		list = x
   133  		size += _FixedStack << order
   134  	}
   135  	unlock(&stackpoolmu)
   136  	c.stackcache[order].list = list
   137  	c.stackcache[order].size = size
   138  }
   139  
   140  func stackcacherelease(c *mcache, order uint8) {
   141  	if stackDebug >= 1 {
   142  		print("stackcacherelease order=", order, "\n")
   143  	}
   144  	x := c.stackcache[order].list
   145  	size := c.stackcache[order].size
   146  	lock(&stackpoolmu)
   147  	for size > _StackCacheSize/2 {
   148  		y := x.ptr().next
   149  		stackpoolfree(x, order)
   150  		x = y
   151  		size -= _FixedStack << order
   152  	}
   153  	unlock(&stackpoolmu)
   154  	c.stackcache[order].list = x
   155  	c.stackcache[order].size = size
   156  }
   157  
   158  func stackcache_clear(c *mcache) {
   159  	if stackDebug >= 1 {
   160  		print("stackcache clear\n")
   161  	}
   162  	lock(&stackpoolmu)
   163  	for order := uint8(0); order < _NumStackOrders; order++ {
   164  		x := c.stackcache[order].list
   165  		for x.ptr() != nil {
   166  			y := x.ptr().next
   167  			stackpoolfree(x, order)
   168  			x = y
   169  		}
   170  		c.stackcache[order].list = 0
   171  		c.stackcache[order].size = 0
   172  	}
   173  	unlock(&stackpoolmu)
   174  }
   175  
   176  func stackalloc(n uint32) stack {
   177  	// Stackalloc must be called on scheduler stack, so that we
   178  	// never try to grow the stack during the code that stackalloc runs.
   179  	// Doing so would cause a deadlock (issue 1547).
   180  	thisg := getg()
   181  	if thisg != thisg.m.g0 {
   182  		gothrow("stackalloc not on scheduler stack")
   183  	}
   184  	if n&(n-1) != 0 {
   185  		gothrow("stack size not a power of 2")
   186  	}
   187  	if stackDebug >= 1 {
   188  		print("stackalloc ", n, "\n")
   189  	}
   190  
   191  	if debug.efence != 0 || stackFromSystem != 0 {
   192  		v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
   193  		if v == nil {
   194  			gothrow("out of memory (stackalloc)")
   195  		}
   196  		return stack{uintptr(v), uintptr(v) + uintptr(n)}
   197  	}
   198  
   199  	// Small stacks are allocated with a fixed-size free-list allocator.
   200  	// If we need a stack of a bigger size, we fall back on allocating
   201  	// a dedicated span.
   202  	var v unsafe.Pointer
   203  	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   204  		order := uint8(0)
   205  		n2 := n
   206  		for n2 > _FixedStack {
   207  			order++
   208  			n2 >>= 1
   209  		}
   210  		var x gclinkptr
   211  		c := thisg.m.mcache
   212  		if c == nil || thisg.m.gcing != 0 || thisg.m.helpgc != 0 {
   213  			// c == nil can happen in the guts of exitsyscall or
   214  			// procresize. Just get a stack from the global pool.
   215  			// Also don't touch stackcache during gc
   216  			// as it's flushed concurrently.
   217  			lock(&stackpoolmu)
   218  			x = stackpoolalloc(order)
   219  			unlock(&stackpoolmu)
   220  		} else {
   221  			x = c.stackcache[order].list
   222  			if x.ptr() == nil {
   223  				stackcacherefill(c, order)
   224  				x = c.stackcache[order].list
   225  			}
   226  			c.stackcache[order].list = x.ptr().next
   227  			c.stackcache[order].size -= uintptr(n)
   228  		}
   229  		v = (unsafe.Pointer)(x)
   230  	} else {
   231  		s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
   232  		if s == nil {
   233  			gothrow("out of memory")
   234  		}
   235  		v = (unsafe.Pointer)(s.start << _PageShift)
   236  	}
   237  
   238  	if raceenabled {
   239  		racemalloc(v, uintptr(n))
   240  	}
   241  	if stackDebug >= 1 {
   242  		print("  allocated ", v, "\n")
   243  	}
   244  	return stack{uintptr(v), uintptr(v) + uintptr(n)}
   245  }
   246  
   247  func stackfree(stk stack) {
   248  	gp := getg()
   249  	n := stk.hi - stk.lo
   250  	v := (unsafe.Pointer)(stk.lo)
   251  	if n&(n-1) != 0 {
   252  		gothrow("stack not a power of 2")
   253  	}
   254  	if stackDebug >= 1 {
   255  		println("stackfree", v, n)
   256  		memclr(v, n) // for testing, clobber stack data
   257  	}
   258  	if debug.efence != 0 || stackFromSystem != 0 {
   259  		if debug.efence != 0 || stackFaultOnFree != 0 {
   260  			sysFault(v, n)
   261  		} else {
   262  			sysFree(v, n, &memstats.stacks_sys)
   263  		}
   264  		return
   265  	}
   266  	if stackCache != 0 && n < _FixedStack<<_NumStackOrders && n < _StackCacheSize {
   267  		order := uint8(0)
   268  		n2 := n
   269  		for n2 > _FixedStack {
   270  			order++
   271  			n2 >>= 1
   272  		}
   273  		x := gclinkptr(v)
   274  		c := gp.m.mcache
   275  		if c == nil || gp.m.gcing != 0 || gp.m.helpgc != 0 {
   276  			lock(&stackpoolmu)
   277  			stackpoolfree(x, order)
   278  			unlock(&stackpoolmu)
   279  		} else {
   280  			if c.stackcache[order].size >= _StackCacheSize {
   281  				stackcacherelease(c, order)
   282  			}
   283  			x.ptr().next = c.stackcache[order].list
   284  			c.stackcache[order].list = x
   285  			c.stackcache[order].size += n
   286  		}
   287  	} else {
   288  		s := mHeap_Lookup(&mheap_, v)
   289  		if s.state != _MSpanStack {
   290  			println(hex(s.start<<_PageShift), v)
   291  			gothrow("bad span state")
   292  		}
   293  		mHeap_FreeStack(&mheap_, s)
   294  	}
   295  }
   296  
   297  var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
   298  
   299  var mapnames = []string{
   300  	_BitsDead:    "---",
   301  	_BitsScalar:  "scalar",
   302  	_BitsPointer: "ptr",
   303  }
   304  
   305  // Stack frame layout
   306  //
   307  // (x86)
   308  // +------------------+
   309  // | args from caller |
   310  // +------------------+ <- frame->argp
   311  // |  return address  |
   312  // +------------------+ <- frame->varp
   313  // |     locals       |
   314  // +------------------+
   315  // |  args to callee  |
   316  // +------------------+ <- frame->sp
   317  //
   318  // (arm)
   319  // +------------------+
   320  // | args from caller |
   321  // +------------------+ <- frame->argp
   322  // | caller's retaddr |
   323  // +------------------+ <- frame->varp
   324  // |     locals       |
   325  // +------------------+
   326  // |  args to callee  |
   327  // +------------------+
   328  // |  return address  |
   329  // +------------------+ <- frame->sp
   330  
   331  type adjustinfo struct {
   332  	old   stack
   333  	delta uintptr // ptr distance from old to new stack (newbase - oldbase)
   334  }
   335  
   336  // Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
   337  // If so, it rewrites *vpp to point into the new stack.
   338  func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) {
   339  	pp := (*unsafe.Pointer)(vpp)
   340  	p := *pp
   341  	if stackDebug >= 4 {
   342  		print("        ", pp, ":", p, "\n")
   343  	}
   344  	if adjinfo.old.lo <= uintptr(p) && uintptr(p) < adjinfo.old.hi {
   345  		*pp = add(p, adjinfo.delta)
   346  		if stackDebug >= 3 {
   347  			print("        adjust ptr ", pp, ":", p, " -> ", *pp, "\n")
   348  		}
   349  	}
   350  }
   351  
   352  type gobitvector struct {
   353  	n        uintptr
   354  	bytedata []uint8
   355  }
   356  
   357  func gobv(bv bitvector) gobitvector {
   358  	return gobitvector{
   359  		uintptr(bv.n),
   360  		(*[1 << 30]byte)(unsafe.Pointer(bv.bytedata))[:(bv.n+7)/8],
   361  	}
   362  }
   363  
   364  func ptrbits(bv *gobitvector, i uintptr) uint8 {
   365  	return (bv.bytedata[i/4] >> ((i & 3) * 2)) & 3
   366  }
   367  
   368  // bv describes the memory starting at address scanp.
   369  // Adjust any pointers contained therein.
   370  func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f *_func) {
   371  	bv := gobv(*cbv)
   372  	minp := adjinfo.old.lo
   373  	maxp := adjinfo.old.hi
   374  	delta := adjinfo.delta
   375  	num := uintptr(bv.n / _BitsPerPointer)
   376  	for i := uintptr(0); i < num; i++ {
   377  		if stackDebug >= 4 {
   378  			print("        ", add(scanp, i*ptrSize), ":", mapnames[ptrbits(&bv, i)], ":", hex(*(*uintptr)(add(scanp, i*ptrSize))), " # ", i, " ", bv.bytedata[i/4], "\n")
   379  		}
   380  		switch ptrbits(&bv, i) {
   381  		default:
   382  			gothrow("unexpected pointer bits")
   383  		case _BitsDead:
   384  			if debug.gcdead != 0 {
   385  				*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(uintptr(poisonStack))
   386  			}
   387  		case _BitsScalar:
   388  			// ok
   389  		case _BitsPointer:
   390  			p := *(*unsafe.Pointer)(add(scanp, i*ptrSize))
   391  			up := uintptr(p)
   392  			if f != nil && 0 < up && up < _PageSize && invalidptr != 0 || up == poisonGC || up == poisonStack {
   393  				// Looks like a junk value in a pointer slot.
   394  				// Live analysis wrong?
   395  				getg().m.traceback = 2
   396  				print("runtime: bad pointer in frame ", gofuncname(f), " at ", add(scanp, i*ptrSize), ": ", p, "\n")
   397  				gothrow("invalid stack pointer")
   398  			}
   399  			if minp <= up && up < maxp {
   400  				if stackDebug >= 3 {
   401  					print("adjust ptr ", p, " ", gofuncname(f), "\n")
   402  				}
   403  				*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(up + delta)
   404  			}
   405  		}
   406  	}
   407  }
   408  
   409  // Note: the argument/return area is adjusted by the callee.
   410  func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
   411  	adjinfo := (*adjustinfo)(arg)
   412  	targetpc := frame.continpc
   413  	if targetpc == 0 {
   414  		// Frame is dead.
   415  		return true
   416  	}
   417  	f := frame.fn
   418  	if stackDebug >= 2 {
   419  		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n")
   420  	}
   421  	if f.entry == systemstack_switchPC {
   422  		// A special routine at the bottom of stack of a goroutine that does an systemstack call.
   423  		// We will allow it to be copied even though we don't
   424  		// have full GC info for it (because it is written in asm).
   425  		return true
   426  	}
   427  	if targetpc != f.entry {
   428  		targetpc--
   429  	}
   430  	pcdata := pcdatavalue(f, _PCDATA_StackMapIndex, targetpc)
   431  	if pcdata == -1 {
   432  		pcdata = 0 // in prologue
   433  	}
   434  
   435  	// Adjust local variables if stack frame has been allocated.
   436  	size := frame.varp - frame.sp
   437  	var minsize uintptr
   438  	if thechar != '6' && thechar != '8' {
   439  		minsize = ptrSize
   440  	} else {
   441  		minsize = 0
   442  	}
   443  	if size > minsize {
   444  		var bv bitvector
   445  		stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
   446  		if stackmap == nil || stackmap.n <= 0 {
   447  			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
   448  			gothrow("missing stackmap")
   449  		}
   450  		// Locals bitmap information, scan just the pointers in locals.
   451  		if pcdata < 0 || pcdata >= stackmap.n {
   452  			// don't know where we are
   453  			print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
   454  			gothrow("bad symbol table")
   455  		}
   456  		bv = stackmapdata(stackmap, pcdata)
   457  		size = (uintptr(bv.n) * ptrSize) / _BitsPerPointer
   458  		if stackDebug >= 3 {
   459  			print("      locals ", pcdata, "/", stackmap.n, " ", size/ptrSize, " words ", bv.bytedata, "\n")
   460  		}
   461  		adjustpointers(unsafe.Pointer(frame.varp-size), &bv, adjinfo, f)
   462  	}
   463  
   464  	// Adjust arguments.
   465  	if frame.arglen > 0 {
   466  		var bv bitvector
   467  		if frame.argmap != nil {
   468  			bv = *frame.argmap
   469  		} else {
   470  			stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
   471  			if stackmap == nil || stackmap.n <= 0 {
   472  				print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
   473  				gothrow("missing stackmap")
   474  			}
   475  			if pcdata < 0 || pcdata >= stackmap.n {
   476  				// don't know where we are
   477  				print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
   478  				gothrow("bad symbol table")
   479  			}
   480  			bv = stackmapdata(stackmap, pcdata)
   481  		}
   482  		if stackDebug >= 3 {
   483  			print("      args\n")
   484  		}
   485  		adjustpointers(unsafe.Pointer(frame.argp), &bv, adjinfo, nil)
   486  	}
   487  	return true
   488  }
   489  
   490  func adjustctxt(gp *g, adjinfo *adjustinfo) {
   491  	adjustpointer(adjinfo, (unsafe.Pointer)(&gp.sched.ctxt))
   492  }
   493  
   494  func adjustdefers(gp *g, adjinfo *adjustinfo) {
   495  	// Adjust defer argument blocks the same way we adjust active stack frames.
   496  	tracebackdefers(gp, adjustframe, noescape(unsafe.Pointer(adjinfo)))
   497  
   498  	// Adjust pointers in the Defer structs.
   499  	// Defer structs themselves are never on the stack.
   500  	for d := gp._defer; d != nil; d = d.link {
   501  		adjustpointer(adjinfo, (unsafe.Pointer)(&d.fn))
   502  		adjustpointer(adjinfo, (unsafe.Pointer)(&d.argp))
   503  		adjustpointer(adjinfo, (unsafe.Pointer)(&d._panic))
   504  	}
   505  }
   506  
   507  func adjustpanics(gp *g, adjinfo *adjustinfo) {
   508  	// Panics are on stack and already adjusted.
   509  	// Update pointer to head of list in G.
   510  	adjustpointer(adjinfo, (unsafe.Pointer)(&gp._panic))
   511  }
   512  
   513  func adjustsudogs(gp *g, adjinfo *adjustinfo) {
   514  	// the data elements pointed to by a SudoG structure
   515  	// might be in the stack.
   516  	for s := gp.waiting; s != nil; s = s.waitlink {
   517  		adjustpointer(adjinfo, (unsafe.Pointer)(&s.elem))
   518  		adjustpointer(adjinfo, (unsafe.Pointer)(&s.selectdone))
   519  	}
   520  }
   521  
   522  func fillstack(stk stack, b byte) {
   523  	for p := stk.lo; p < stk.hi; p++ {
   524  		*(*byte)(unsafe.Pointer(p)) = b
   525  	}
   526  }
   527  
   528  // Copies gp's stack to a new stack of a different size.
   529  // Caller must have changed gp status to Gcopystack.
   530  func copystack(gp *g, newsize uintptr) {
   531  	if gp.syscallsp != 0 {
   532  		gothrow("stack growth not allowed in system call")
   533  	}
   534  	old := gp.stack
   535  	if old.lo == 0 {
   536  		gothrow("nil stackbase")
   537  	}
   538  	used := old.hi - gp.sched.sp
   539  
   540  	// allocate new stack
   541  	new := stackalloc(uint32(newsize))
   542  	if stackPoisonCopy != 0 {
   543  		fillstack(new, 0xfd)
   544  	}
   545  	if stackDebug >= 1 {
   546  		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]/", old.hi-old.lo, " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n")
   547  	}
   548  
   549  	// adjust pointers in the to-be-copied frames
   550  	var adjinfo adjustinfo
   551  	adjinfo.old = old
   552  	adjinfo.delta = new.hi - old.hi
   553  	gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, adjustframe, noescape(unsafe.Pointer(&adjinfo)), 0)
   554  
   555  	// adjust other miscellaneous things that have pointers into stacks.
   556  	adjustctxt(gp, &adjinfo)
   557  	adjustdefers(gp, &adjinfo)
   558  	adjustpanics(gp, &adjinfo)
   559  	adjustsudogs(gp, &adjinfo)
   560  
   561  	// copy the stack to the new location
   562  	if stackPoisonCopy != 0 {
   563  		fillstack(new, 0xfb)
   564  	}
   565  	memmove(unsafe.Pointer(new.hi-used), unsafe.Pointer(old.hi-used), used)
   566  
   567  	// Swap out old stack for new one
   568  	gp.stack = new
   569  	gp.stackguard0 = new.lo + _StackGuard // NOTE: might clobber a preempt request
   570  	gp.sched.sp = new.hi - used
   571  
   572  	// free old stack
   573  	if stackPoisonCopy != 0 {
   574  		fillstack(old, 0xfc)
   575  	}
   576  	if newsize > old.hi-old.lo {
   577  		// growing, free stack immediately
   578  		stackfree(old)
   579  	} else {
   580  		// shrinking, queue up free operation.  We can't actually free the stack
   581  		// just yet because we might run into the following situation:
   582  		// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
   583  		// 2) The stack that pointer points to is shrunk
   584  		// 3) The old stack is freed
   585  		// 4) The containing span is marked free
   586  		// 5) GC attempts to mark the SudoG.elem pointer.  The marking fails because
   587  		//    the pointer looks like a pointer into a free span.
   588  		// By not freeing, we prevent step #4 until GC is done.
   589  		lock(&stackpoolmu)
   590  		*(*stack)(unsafe.Pointer(old.lo)) = stackfreequeue
   591  		stackfreequeue = old
   592  		unlock(&stackpoolmu)
   593  	}
   594  }
   595  
   596  // round x up to a power of 2.
   597  func round2(x int32) int32 {
   598  	s := uint(0)
   599  	for 1<<s < x {
   600  		s++
   601  	}
   602  	return 1 << s
   603  }
   604  
   605  // Called from runtime·morestack when more stack is needed.
   606  // Allocate larger stack and relocate to new stack.
   607  // Stack growth is multiplicative, for constant amortized cost.
   608  //
   609  // g->atomicstatus will be Grunning or Gscanrunning upon entry.
   610  // If the GC is trying to stop this g then it will set preemptscan to true.
   611  func newstack() {
   612  	thisg := getg()
   613  	// TODO: double check all gp. shouldn't be getg().
   614  	if thisg.m.morebuf.g.stackguard0 == stackFork {
   615  		gothrow("stack growth after fork")
   616  	}
   617  	if thisg.m.morebuf.g != thisg.m.curg {
   618  		print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
   619  		morebuf := thisg.m.morebuf
   620  		traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g)
   621  		gothrow("runtime: wrong goroutine in newstack")
   622  	}
   623  	if thisg.m.curg.throwsplit {
   624  		gp := thisg.m.curg
   625  		// Update syscallsp, syscallpc in case traceback uses them.
   626  		morebuf := thisg.m.morebuf
   627  		gp.syscallsp = morebuf.sp
   628  		gp.syscallpc = morebuf.pc
   629  		print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   630  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   631  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   632  		gothrow("runtime: stack split at bad time")
   633  	}
   634  
   635  	// The goroutine must be executing in order to call newstack,
   636  	// so it must be Grunning or Gscanrunning.
   637  
   638  	gp := thisg.m.curg
   639  	morebuf := thisg.m.morebuf
   640  	thisg.m.morebuf.pc = 0
   641  	thisg.m.morebuf.lr = 0
   642  	thisg.m.morebuf.sp = 0
   643  	thisg.m.morebuf.g = nil
   644  
   645  	casgstatus(gp, _Grunning, _Gwaiting)
   646  	gp.waitreason = "stack growth"
   647  
   648  	rewindmorestack(&gp.sched)
   649  
   650  	if gp.stack.lo == 0 {
   651  		gothrow("missing stack in newstack")
   652  	}
   653  	sp := gp.sched.sp
   654  	if thechar == '6' || thechar == '8' {
   655  		// The call to morestack cost a word.
   656  		sp -= ptrSize
   657  	}
   658  	if stackDebug >= 1 || sp < gp.stack.lo {
   659  		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
   660  			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
   661  			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
   662  	}
   663  	if sp < gp.stack.lo {
   664  		print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
   665  		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
   666  		gothrow("runtime: split stack overflow")
   667  	}
   668  
   669  	if gp.sched.ctxt != nil {
   670  		// morestack wrote sched.ctxt on its way in here,
   671  		// without a write barrier. Run the write barrier now.
   672  		// It is not possible to be preempted between then
   673  		// and now, so it's okay.
   674  		writebarrierptr_nostore((*uintptr)(unsafe.Pointer(&gp.sched.ctxt)), uintptr(gp.sched.ctxt))
   675  	}
   676  
   677  	if gp.stackguard0 == stackPreempt {
   678  		if gp == thisg.m.g0 {
   679  			gothrow("runtime: preempt g0")
   680  		}
   681  		if thisg.m.p == nil && thisg.m.locks == 0 {
   682  			gothrow("runtime: g is running but p is not")
   683  		}
   684  		if gp.preemptscan {
   685  			for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
   686  				// Likely to be racing with the GC as it sees a _Gwaiting and does the stack scan.
   687  				// If so this stack will be scanned twice which does not change correctness.
   688  			}
   689  			gcphasework(gp)
   690  			casfrom_Gscanstatus(gp, _Gscanwaiting, _Gwaiting)
   691  			casgstatus(gp, _Gwaiting, _Grunning)
   692  			gp.stackguard0 = gp.stack.lo + _StackGuard
   693  			gp.preempt = false
   694  			gp.preemptscan = false // Tells the GC premption was successful.
   695  			gogo(&gp.sched)        // never return
   696  		}
   697  
   698  		// Be conservative about where we preempt.
   699  		// We are interested in preempting user Go code, not runtime code.
   700  		if thisg.m.locks != 0 || thisg.m.mallocing != 0 || thisg.m.gcing != 0 || thisg.m.p.status != _Prunning {
   701  			// Let the goroutine keep running for now.
   702  			// gp->preempt is set, so it will be preempted next time.
   703  			gp.stackguard0 = gp.stack.lo + _StackGuard
   704  			casgstatus(gp, _Gwaiting, _Grunning)
   705  			gogo(&gp.sched) // never return
   706  		}
   707  
   708  		// Act like goroutine called runtime.Gosched.
   709  		casgstatus(gp, _Gwaiting, _Grunning)
   710  		gosched_m(gp) // never return
   711  	}
   712  
   713  	// Allocate a bigger segment and move the stack.
   714  	oldsize := int(gp.stack.hi - gp.stack.lo)
   715  	newsize := oldsize * 2
   716  	if uintptr(newsize) > maxstacksize {
   717  		print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
   718  		gothrow("stack overflow")
   719  	}
   720  
   721  	casgstatus(gp, _Gwaiting, _Gcopystack)
   722  
   723  	// The concurrent GC will not scan the stack while we are doing the copy since
   724  	// the gp is in a Gcopystack status.
   725  	copystack(gp, uintptr(newsize))
   726  	if stackDebug >= 1 {
   727  		print("stack grow done\n")
   728  	}
   729  	casgstatus(gp, _Gcopystack, _Grunning)
   730  	gogo(&gp.sched)
   731  }
   732  
   733  //go:nosplit
   734  func nilfunc() {
   735  	*(*uint8)(nil) = 0
   736  }
   737  
   738  // adjust Gobuf as if it executed a call to fn
   739  // and then did an immediate gosave.
   740  func gostartcallfn(gobuf *gobuf, fv *funcval) {
   741  	var fn unsafe.Pointer
   742  	if fv != nil {
   743  		fn = (unsafe.Pointer)(fv.fn)
   744  	} else {
   745  		fn = unsafe.Pointer(funcPC(nilfunc))
   746  	}
   747  	gostartcall(gobuf, fn, (unsafe.Pointer)(fv))
   748  }
   749  
   750  // Maybe shrink the stack being used by gp.
   751  // Called at garbage collection time.
   752  func shrinkstack(gp *g) {
   753  	if readgstatus(gp) == _Gdead {
   754  		if gp.stack.lo != 0 {
   755  			// Free whole stack - it will get reallocated
   756  			// if G is used again.
   757  			stackfree(gp.stack)
   758  			gp.stack.lo = 0
   759  			gp.stack.hi = 0
   760  		}
   761  		return
   762  	}
   763  	if gp.stack.lo == 0 {
   764  		gothrow("missing stack in shrinkstack")
   765  	}
   766  
   767  	oldsize := gp.stack.hi - gp.stack.lo
   768  	newsize := oldsize / 2
   769  	if newsize < _FixedStack {
   770  		return // don't shrink below the minimum-sized stack
   771  	}
   772  	used := gp.stack.hi - gp.sched.sp
   773  	if used >= oldsize/4 {
   774  		return // still using at least 1/4 of the segment.
   775  	}
   776  
   777  	// We can't copy the stack if we're in a syscall.
   778  	// The syscall might have pointers into the stack.
   779  	if gp.syscallsp != 0 {
   780  		return
   781  	}
   782  	if goos_windows != 0 && gp.m != nil && gp.m.libcallsp != 0 {
   783  		return
   784  	}
   785  
   786  	if stackDebug > 0 {
   787  		print("shrinking stack ", oldsize, "->", newsize, "\n")
   788  	}
   789  
   790  	oldstatus := casgcopystack(gp)
   791  	copystack(gp, newsize)
   792  	casgstatus(gp, _Gcopystack, oldstatus)
   793  }
   794  
   795  // Do any delayed stack freeing that was queued up during GC.
   796  func shrinkfinish() {
   797  	lock(&stackpoolmu)
   798  	s := stackfreequeue
   799  	stackfreequeue = stack{}
   800  	unlock(&stackpoolmu)
   801  	for s.lo != 0 {
   802  		t := *(*stack)(unsafe.Pointer(s.lo))
   803  		stackfree(s)
   804  		s = t
   805  	}
   806  }
   807  
   808  //go:nosplit
   809  func morestackc() {
   810  	systemstack(func() {
   811  		gothrow("attempt to execute C code on Go stack")
   812  	})
   813  }