github.com/xushiwei/go@v0.0.0-20130601165731-2b9d83f45bc9/src/pkg/runtime/stack.c (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "runtime.h"
     6  #include "arch_GOARCH.h"
     7  #include "malloc.h"
     8  #include "stack.h"
     9  
    10  typedef struct StackCacheNode StackCacheNode;
    11  struct StackCacheNode
    12  {
    13  	StackCacheNode *next;
    14  	void*	batch[StackCacheBatch-1];
    15  };
    16  
    17  static StackCacheNode *stackcache;
    18  static Lock stackcachemu;
    19  
    20  // stackcacherefill/stackcacherelease implement a global cache of stack segments.
    21  // The cache is required to prevent unlimited growth of per-thread caches.
    22  static void
    23  stackcacherefill(void)
    24  {
    25  	StackCacheNode *n;
    26  	int32 i, pos;
    27  
    28  	runtime·lock(&stackcachemu);
    29  	n = stackcache;
    30  	if(n)
    31  		stackcache = n->next;
    32  	runtime·unlock(&stackcachemu);
    33  	if(n == nil) {
    34  		n = (StackCacheNode*)runtime·SysAlloc(FixedStack*StackCacheBatch);
    35  		if(n == nil)
    36  			runtime·throw("out of memory (stackcacherefill)");
    37  		runtime·xadd64(&mstats.stacks_sys, FixedStack*StackCacheBatch);
    38  		for(i = 0; i < StackCacheBatch-1; i++)
    39  			n->batch[i] = (byte*)n + (i+1)*FixedStack;
    40  	}
    41  	pos = m->stackcachepos;
    42  	for(i = 0; i < StackCacheBatch-1; i++) {
    43  		m->stackcache[pos] = n->batch[i];
    44  		pos = (pos + 1) % StackCacheSize;
    45  	}
    46  	m->stackcache[pos] = n;
    47  	pos = (pos + 1) % StackCacheSize;
    48  	m->stackcachepos = pos;
    49  	m->stackcachecnt += StackCacheBatch;
    50  }
    51  
    52  static void
    53  stackcacherelease(void)
    54  {
    55  	StackCacheNode *n;
    56  	uint32 i, pos;
    57  
    58  	pos = (m->stackcachepos - m->stackcachecnt) % StackCacheSize;
    59  	n = (StackCacheNode*)m->stackcache[pos];
    60  	pos = (pos + 1) % StackCacheSize;
    61  	for(i = 0; i < StackCacheBatch-1; i++) {
    62  		n->batch[i] = m->stackcache[pos];
    63  		pos = (pos + 1) % StackCacheSize;
    64  	}
    65  	m->stackcachecnt -= StackCacheBatch;
    66  	runtime·lock(&stackcachemu);
    67  	n->next = stackcache;
    68  	stackcache = n;
    69  	runtime·unlock(&stackcachemu);
    70  }
    71  
    72  void*
    73  runtime·stackalloc(uint32 n)
    74  {
    75  	uint32 pos;
    76  	void *v;
    77  
    78  	// Stackalloc must be called on scheduler stack, so that we
    79  	// never try to grow the stack during the code that stackalloc runs.
    80  	// Doing so would cause a deadlock (issue 1547).
    81  	if(g != m->g0)
    82  		runtime·throw("stackalloc not on scheduler stack");
    83  
    84  	// Stack allocator uses malloc/free most of the time,
    85  	// but if we're in the middle of malloc and need stack,
    86  	// we have to do something else to avoid deadlock.
    87  	// In that case, we fall back on a fixed-size free-list
    88  	// allocator, assuming that inside malloc all the stack
    89  	// frames are small, so that all the stack allocations
    90  	// will be a single size, the minimum (right now, 5k).
    91  	if(n == FixedStack || m->mallocing || m->gcing) {
    92  		if(n != FixedStack) {
    93  			runtime·printf("stackalloc: in malloc, size=%d want %d\n", FixedStack, n);
    94  			runtime·throw("stackalloc");
    95  		}
    96  		if(m->stackcachecnt == 0)
    97  			stackcacherefill();
    98  		pos = m->stackcachepos;
    99  		pos = (pos - 1) % StackCacheSize;
   100  		v = m->stackcache[pos];
   101  		m->stackcachepos = pos;
   102  		m->stackcachecnt--;
   103  		m->stackinuse++;
   104  		return v;
   105  	}
   106  	return runtime·mallocgc(n, FlagNoProfiling|FlagNoGC, 0, 0);
   107  }
   108  
   109  void
   110  runtime·stackfree(void *v, uintptr n)
   111  {
   112  	uint32 pos;
   113  
   114  	if(n == FixedStack || m->mallocing || m->gcing) {
   115  		if(m->stackcachecnt == StackCacheSize)
   116  			stackcacherelease();
   117  		pos = m->stackcachepos;
   118  		m->stackcache[pos] = v;
   119  		m->stackcachepos = (pos + 1) % StackCacheSize;
   120  		m->stackcachecnt++;
   121  		m->stackinuse--;
   122  		return;
   123  	}
   124  	runtime·free(v);
   125  }
   126  
   127  // Called from runtime·lessstack when returning from a function which
   128  // allocated a new stack segment.  The function's return value is in
   129  // m->cret.
   130  void
   131  runtime·oldstack(void)
   132  {
   133  	Stktop *top;
   134  	Gobuf label;
   135  	uint32 argsize;
   136  	uintptr cret;
   137  	byte *sp, *old;
   138  	uintptr *src, *dst, *dstend;
   139  	G *gp;
   140  	int64 goid;
   141  
   142  //printf("oldstack m->cret=%p\n", m->cret);
   143  
   144  	gp = m->curg;
   145  	top = (Stktop*)gp->stackbase;
   146  	old = (byte*)gp->stackguard - StackGuard;
   147  	sp = (byte*)top;
   148  	argsize = top->argsize;
   149  	if(argsize > 0) {
   150  		sp -= argsize;
   151  		dst = (uintptr*)top->argp;
   152  		dstend = dst + argsize/sizeof(*dst);
   153  		src = (uintptr*)sp;
   154  		while(dst < dstend)
   155  			*dst++ = *src++;
   156  	}
   157  	goid = top->gobuf.g->goid;	// fault if g is bad, before gogo
   158  	USED(goid);
   159  
   160  	label = top->gobuf;
   161  	gp->stackbase = (uintptr)top->stackbase;
   162  	gp->stackguard = (uintptr)top->stackguard;
   163  	if(top->free != 0)
   164  		runtime·stackfree(old, top->free);
   165  
   166  	cret = m->cret;
   167  	m->cret = 0;  // drop reference
   168  	runtime·gogo(&label, cret);
   169  }
   170  
   171  // Called from reflect·call or from runtime·morestack when a new
   172  // stack segment is needed.  Allocate a new stack big enough for
   173  // m->moreframesize bytes, copy m->moreargsize bytes to the new frame,
   174  // and then act as though runtime·lessstack called the function at
   175  // m->morepc.
   176  void
   177  runtime·newstack(void)
   178  {
   179  	int32 framesize, minalloc, argsize;
   180  	Stktop *top;
   181  	byte *stk, *sp;
   182  	uintptr *src, *dst, *dstend;
   183  	G *gp;
   184  	Gobuf label;
   185  	bool reflectcall;
   186  	uintptr free;
   187  
   188  	framesize = m->moreframesize;
   189  	argsize = m->moreargsize;
   190  	gp = m->curg;
   191  
   192  	if(m->morebuf.sp < gp->stackguard - StackGuard) {
   193  		runtime·printf("runtime: split stack overflow: %p < %p\n", m->morebuf.sp, gp->stackguard - StackGuard);
   194  		runtime·throw("runtime: split stack overflow");
   195  	}
   196  	if(argsize % sizeof(uintptr) != 0) {
   197  		runtime·printf("runtime: stack split with misaligned argsize %d\n", argsize);
   198  		runtime·throw("runtime: stack split argsize");
   199  	}
   200  
   201  	minalloc = 0;
   202  	reflectcall = framesize==1;
   203  	if(reflectcall) {
   204  		framesize = 0;
   205  		// moreframesize_minalloc is only set in runtime·gc(),
   206  		// that calls newstack via reflect·call().
   207  		minalloc = m->moreframesize_minalloc;
   208  		m->moreframesize_minalloc = 0;
   209  		if(framesize < minalloc)
   210  			framesize = minalloc;
   211  	}
   212  
   213  	if(reflectcall && minalloc == 0 && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > gp->stackguard) {
   214  		// special case: called from reflect.call (framesize==1)
   215  		// to call code with an arbitrary argument size,
   216  		// and we have enough space on the current stack.
   217  		// the new Stktop* is necessary to unwind, but
   218  		// we don't need to create a new segment.
   219  		top = (Stktop*)(m->morebuf.sp - sizeof(*top));
   220  		stk = (byte*)gp->stackguard - StackGuard;
   221  		free = 0;
   222  	} else {
   223  		// allocate new segment.
   224  		framesize += argsize;
   225  		framesize += StackExtra;	// room for more functions, Stktop.
   226  		if(framesize < StackMin)
   227  			framesize = StackMin;
   228  		framesize += StackSystem;
   229  		stk = runtime·stackalloc(framesize);
   230  		top = (Stktop*)(stk+framesize-sizeof(*top));
   231  		free = framesize;
   232  	}
   233  
   234  	if(0) {
   235  		runtime·printf("newstack framesize=%d argsize=%d morepc=%p moreargp=%p gobuf=%p, %p top=%p old=%p\n",
   236  			framesize, argsize, m->morepc, m->moreargp, m->morebuf.pc, m->morebuf.sp, top, gp->stackbase);
   237  	}
   238  
   239  	top->stackbase = (byte*)gp->stackbase;
   240  	top->stackguard = (byte*)gp->stackguard;
   241  	top->gobuf = m->morebuf;
   242  	top->argp = m->moreargp;
   243  	top->argsize = argsize;
   244  	top->free = free;
   245  	m->moreargp = nil;
   246  	m->morebuf.pc = nil;
   247  	m->morebuf.sp = (uintptr)nil;
   248  
   249  	// copy flag from panic
   250  	top->panic = gp->ispanic;
   251  	gp->ispanic = false;
   252  
   253  	gp->stackbase = (uintptr)top;
   254  	gp->stackguard = (uintptr)stk + StackGuard;
   255  
   256  	sp = (byte*)top;
   257  	if(argsize > 0) {
   258  		sp -= argsize;
   259  		dst = (uintptr*)sp;
   260  		dstend = dst + argsize/sizeof(*dst);
   261  		src = (uintptr*)top->argp;
   262  		while(dst < dstend)
   263  			*dst++ = *src++;
   264  	}
   265  	if(thechar == '5') {
   266  		// caller would have saved its LR below args.
   267  		sp -= sizeof(void*);
   268  		*(void**)sp = nil;
   269  	}
   270  
   271  	// Continue as if lessstack had just called m->morepc
   272  	// (the PC that decided to grow the stack).
   273  	label.sp = (uintptr)sp;
   274  	label.pc = (byte*)runtime·lessstack;
   275  	label.g = m->curg;
   276  	if(reflectcall)
   277  		runtime·gogocallfn(&label, (FuncVal*)m->morepc);
   278  	else
   279  		runtime·gogocall(&label, m->morepc, m->cret);
   280  
   281  	*(int32*)345 = 123;	// never return
   282  }