github.com/rohankumardubey/syslog-redirector-golang@v0.0.0-20140320174030-4859f03d829a/src/pkg/runtime/stack.c (about)

     1  // Copyright 2013 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include "runtime.h"
     6  #include "arch_GOARCH.h"
     7  #include "malloc.h"
     8  #include "stack.h"
     9  
    10  enum
    11  {
    12  	StackDebug = 0,
    13  };
    14  
    15  typedef struct StackCacheNode StackCacheNode;
    16  struct StackCacheNode
    17  {
    18  	StackCacheNode *next;
    19  	void*	batch[StackCacheBatch-1];
    20  };
    21  
    22  static StackCacheNode *stackcache;
    23  static Lock stackcachemu;
    24  
    25  // stackcacherefill/stackcacherelease implement a global cache of stack segments.
    26  // The cache is required to prevent unlimited growth of per-thread caches.
    27  static void
    28  stackcacherefill(void)
    29  {
    30  	StackCacheNode *n;
    31  	int32 i, pos;
    32  
    33  	runtime·lock(&stackcachemu);
    34  	n = stackcache;
    35  	if(n)
    36  		stackcache = n->next;
    37  	runtime·unlock(&stackcachemu);
    38  	if(n == nil) {
    39  		n = (StackCacheNode*)runtime·SysAlloc(FixedStack*StackCacheBatch, &mstats.stacks_sys);
    40  		if(n == nil)
    41  			runtime·throw("out of memory (stackcacherefill)");
    42  		for(i = 0; i < StackCacheBatch-1; i++)
    43  			n->batch[i] = (byte*)n + (i+1)*FixedStack;
    44  	}
    45  	pos = m->stackcachepos;
    46  	for(i = 0; i < StackCacheBatch-1; i++) {
    47  		m->stackcache[pos] = n->batch[i];
    48  		pos = (pos + 1) % StackCacheSize;
    49  	}
    50  	m->stackcache[pos] = n;
    51  	pos = (pos + 1) % StackCacheSize;
    52  	m->stackcachepos = pos;
    53  	m->stackcachecnt += StackCacheBatch;
    54  }
    55  
    56  static void
    57  stackcacherelease(void)
    58  {
    59  	StackCacheNode *n;
    60  	uint32 i, pos;
    61  
    62  	pos = (m->stackcachepos - m->stackcachecnt) % StackCacheSize;
    63  	n = (StackCacheNode*)m->stackcache[pos];
    64  	pos = (pos + 1) % StackCacheSize;
    65  	for(i = 0; i < StackCacheBatch-1; i++) {
    66  		n->batch[i] = m->stackcache[pos];
    67  		pos = (pos + 1) % StackCacheSize;
    68  	}
    69  	m->stackcachecnt -= StackCacheBatch;
    70  	runtime·lock(&stackcachemu);
    71  	n->next = stackcache;
    72  	stackcache = n;
    73  	runtime·unlock(&stackcachemu);
    74  }
    75  
    76  void*
    77  runtime·stackalloc(uint32 n)
    78  {
    79  	uint32 pos;
    80  	void *v;
    81  
    82  	// Stackalloc must be called on scheduler stack, so that we
    83  	// never try to grow the stack during the code that stackalloc runs.
    84  	// Doing so would cause a deadlock (issue 1547).
    85  	if(g != m->g0)
    86  		runtime·throw("stackalloc not on scheduler stack");
    87  
    88  	// Stacks are usually allocated with a fixed-size free-list allocator,
    89  	// but if we need a stack of non-standard size, we fall back on malloc
    90  	// (assuming that inside malloc and GC all the stack frames are small,
    91  	// so that we do not deadlock).
    92  	if(n == FixedStack || m->mallocing || m->gcing) {
    93  		if(n != FixedStack) {
    94  			runtime·printf("stackalloc: in malloc, size=%d want %d\n", FixedStack, n);
    95  			runtime·throw("stackalloc");
    96  		}
    97  		if(m->stackcachecnt == 0)
    98  			stackcacherefill();
    99  		pos = m->stackcachepos;
   100  		pos = (pos - 1) % StackCacheSize;
   101  		v = m->stackcache[pos];
   102  		m->stackcachepos = pos;
   103  		m->stackcachecnt--;
   104  		m->stackinuse++;
   105  		return v;
   106  	}
   107  	return runtime·mallocgc(n, 0, FlagNoProfiling|FlagNoGC|FlagNoZero|FlagNoInvokeGC);
   108  }
   109  
   110  void
   111  runtime·stackfree(void *v, uintptr n)
   112  {
   113  	uint32 pos;
   114  
   115  	if(n == FixedStack || m->mallocing || m->gcing) {
   116  		if(m->stackcachecnt == StackCacheSize)
   117  			stackcacherelease();
   118  		pos = m->stackcachepos;
   119  		m->stackcache[pos] = v;
   120  		m->stackcachepos = (pos + 1) % StackCacheSize;
   121  		m->stackcachecnt++;
   122  		m->stackinuse--;
   123  		return;
   124  	}
   125  	runtime·free(v);
   126  }
   127  
   128  // Called from runtime·lessstack when returning from a function which
   129  // allocated a new stack segment.  The function's return value is in
   130  // m->cret.
   131  void
   132  runtime·oldstack(void)
   133  {
   134  	Stktop *top;
   135  	uint32 argsize;
   136  	byte *sp, *old;
   137  	uintptr *src, *dst, *dstend;
   138  	G *gp;
   139  	int64 goid;
   140  	int32 oldstatus;
   141  
   142  	gp = m->curg;
   143  	top = (Stktop*)gp->stackbase;
   144  	old = (byte*)gp->stackguard - StackGuard;
   145  	sp = (byte*)top;
   146  	argsize = top->argsize;
   147  
   148  	if(StackDebug) {
   149  		runtime·printf("runtime: oldstack gobuf={pc:%p sp:%p lr:%p} cret=%p argsize=%p\n",
   150  			top->gobuf.pc, top->gobuf.sp, top->gobuf.lr, m->cret, (uintptr)argsize);
   151  	}
   152  
   153  	// gp->status is usually Grunning, but it could be Gsyscall if a stack split
   154  	// happens during a function call inside entersyscall.
   155  	oldstatus = gp->status;
   156  	
   157  	gp->sched = top->gobuf;
   158  	gp->sched.ret = m->cret;
   159  	m->cret = 0; // drop reference
   160  	gp->status = Gwaiting;
   161  	gp->waitreason = "stack unsplit";
   162  
   163  	if(argsize > 0) {
   164  		sp -= argsize;
   165  		dst = (uintptr*)top->argp;
   166  		dstend = dst + argsize/sizeof(*dst);
   167  		src = (uintptr*)sp;
   168  		while(dst < dstend)
   169  			*dst++ = *src++;
   170  	}
   171  	goid = top->gobuf.g->goid;	// fault if g is bad, before gogo
   172  	USED(goid);
   173  
   174  	gp->stackbase = top->stackbase;
   175  	gp->stackguard = top->stackguard;
   176  	gp->stackguard0 = gp->stackguard;
   177  	gp->panicwrap = top->panicwrap;
   178  
   179  	if(top->free != 0) {
   180  		gp->stacksize -= top->free;
   181  		runtime·stackfree(old, top->free);
   182  	}
   183  
   184  	gp->status = oldstatus;
   185  	runtime·gogo(&gp->sched);
   186  }
   187  
   188  uintptr runtime·maxstacksize = 1<<20; // enough until runtime.main sets it for real
   189  
   190  // Called from runtime·newstackcall or from runtime·morestack when a new
   191  // stack segment is needed.  Allocate a new stack big enough for
   192  // m->moreframesize bytes, copy m->moreargsize bytes to the new frame,
   193  // and then act as though runtime·lessstack called the function at
   194  // m->morepc.
   195  void
   196  runtime·newstack(void)
   197  {
   198  	int32 framesize, argsize, oldstatus;
   199  	Stktop *top, *oldtop;
   200  	byte *stk;
   201  	uintptr sp;
   202  	uintptr *src, *dst, *dstend;
   203  	G *gp;
   204  	Gobuf label;
   205  	bool newstackcall;
   206  	uintptr free;
   207  
   208  	if(m->morebuf.g != m->curg) {
   209  		runtime·printf("runtime: newstack called from g=%p\n"
   210  			"\tm=%p m->curg=%p m->g0=%p m->gsignal=%p\n",
   211  			m->morebuf.g, m, m->curg, m->g0, m->gsignal);
   212  		runtime·throw("runtime: wrong goroutine in newstack");
   213  	}
   214  
   215  	// gp->status is usually Grunning, but it could be Gsyscall if a stack split
   216  	// happens during a function call inside entersyscall.
   217  	gp = m->curg;
   218  	oldstatus = gp->status;
   219  
   220  	framesize = m->moreframesize;
   221  	argsize = m->moreargsize;
   222  	gp->status = Gwaiting;
   223  	gp->waitreason = "stack split";
   224  	newstackcall = framesize==1;
   225  	if(newstackcall)
   226  		framesize = 0;
   227  
   228  	// For newstackcall the context already points to beginning of runtime·newstackcall.
   229  	if(!newstackcall)
   230  		runtime·rewindmorestack(&gp->sched);
   231  
   232  	sp = gp->sched.sp;
   233  	if(thechar == '6' || thechar == '8') {
   234  		// The call to morestack cost a word.
   235  		sp -= sizeof(uintptr);
   236  	}
   237  	if(StackDebug || sp < gp->stackguard - StackGuard) {
   238  		runtime·printf("runtime: newstack framesize=%p argsize=%p sp=%p stack=[%p, %p]\n"
   239  			"\tmorebuf={pc:%p sp:%p lr:%p}\n"
   240  			"\tsched={pc:%p sp:%p lr:%p ctxt:%p}\n",
   241  			(uintptr)framesize, (uintptr)argsize, sp, gp->stackguard - StackGuard, gp->stackbase,
   242  			m->morebuf.pc, m->morebuf.sp, m->morebuf.lr,
   243  			gp->sched.pc, gp->sched.sp, gp->sched.lr, gp->sched.ctxt);
   244  	}
   245  	if(sp < gp->stackguard - StackGuard) {
   246  		runtime·printf("runtime: split stack overflow: %p < %p\n", sp, gp->stackguard - StackGuard);
   247  		runtime·throw("runtime: split stack overflow");
   248  	}
   249  
   250  	if(argsize % sizeof(uintptr) != 0) {
   251  		runtime·printf("runtime: stack split with misaligned argsize %d\n", argsize);
   252  		runtime·throw("runtime: stack split argsize");
   253  	}
   254  
   255  	if(gp->stackguard0 == (uintptr)StackPreempt) {
   256  		if(gp == m->g0)
   257  			runtime·throw("runtime: preempt g0");
   258  		if(oldstatus == Grunning && m->p == nil && m->locks == 0)
   259  			runtime·throw("runtime: g is running but p is not");
   260  		if(oldstatus == Gsyscall && m->locks == 0)
   261  			runtime·throw("runtime: stack split during syscall");
   262  		// Be conservative about where we preempt.
   263  		// We are interested in preempting user Go code, not runtime code.
   264  		if(oldstatus != Grunning || m->locks || m->mallocing || m->gcing || m->p->status != Prunning) {
   265  			// Let the goroutine keep running for now.
   266  			// gp->preempt is set, so it will be preempted next time.
   267  			gp->stackguard0 = gp->stackguard;
   268  			gp->status = oldstatus;
   269  			runtime·gogo(&gp->sched);	// never return
   270  		}
   271  		// Act like goroutine called runtime.Gosched.
   272  		gp->status = oldstatus;
   273  		runtime·gosched0(gp);	// never return
   274  	}
   275  
   276  	if(newstackcall && m->morebuf.sp - sizeof(Stktop) - argsize - 32 > gp->stackguard) {
   277  		// special case: called from runtime.newstackcall (framesize==1)
   278  		// to call code with an arbitrary argument size,
   279  		// and we have enough space on the current stack.
   280  		// the new Stktop* is necessary to unwind, but
   281  		// we don't need to create a new segment.
   282  		top = (Stktop*)(m->morebuf.sp - sizeof(*top));
   283  		stk = (byte*)gp->stackguard - StackGuard;
   284  		free = 0;
   285  	} else {
   286  		// allocate new segment.
   287  		framesize += argsize;
   288  		framesize += StackExtra;	// room for more functions, Stktop.
   289  		if(framesize < StackMin)
   290  			framesize = StackMin;
   291  		framesize += StackSystem;
   292  		gp->stacksize += framesize;
   293  		if(gp->stacksize > runtime·maxstacksize) {
   294  			runtime·printf("runtime: goroutine stack exceeds %D-byte limit\n", (uint64)runtime·maxstacksize);
   295  			runtime·throw("stack overflow");
   296  		}
   297  		stk = runtime·stackalloc(framesize);
   298  		top = (Stktop*)(stk+framesize-sizeof(*top));
   299  		free = framesize;
   300  	}
   301  
   302  	if(StackDebug) {
   303  		runtime·printf("\t-> new stack [%p, %p]\n", stk, top);
   304  	}
   305  
   306  	top->stackbase = gp->stackbase;
   307  	top->stackguard = gp->stackguard;
   308  	top->gobuf = m->morebuf;
   309  	top->argp = m->moreargp;
   310  	top->argsize = argsize;
   311  	top->free = free;
   312  	m->moreargp = nil;
   313  	m->morebuf.pc = (uintptr)nil;
   314  	m->morebuf.lr = (uintptr)nil;
   315  	m->morebuf.sp = (uintptr)nil;
   316  
   317  	// copy flag from panic
   318  	top->panic = gp->ispanic;
   319  	gp->ispanic = false;
   320  	
   321  	// if this isn't a panic, maybe we're splitting the stack for a panic.
   322  	// if we're splitting in the top frame, propagate the panic flag
   323  	// forward so that recover will know we're in a panic.
   324  	oldtop = (Stktop*)top->stackbase;
   325  	if(oldtop != nil && oldtop->panic && top->argp == (byte*)oldtop - oldtop->argsize - gp->panicwrap)
   326  		top->panic = true;
   327  
   328  	top->panicwrap = gp->panicwrap;
   329  	gp->panicwrap = 0;
   330  
   331  	gp->stackbase = (uintptr)top;
   332  	gp->stackguard = (uintptr)stk + StackGuard;
   333  	gp->stackguard0 = gp->stackguard;
   334  
   335  	sp = (uintptr)top;
   336  	if(argsize > 0) {
   337  		sp -= argsize;
   338  		dst = (uintptr*)sp;
   339  		dstend = dst + argsize/sizeof(*dst);
   340  		src = (uintptr*)top->argp;
   341  		while(dst < dstend)
   342  			*dst++ = *src++;
   343  	}
   344  	if(thechar == '5') {
   345  		// caller would have saved its LR below args.
   346  		sp -= sizeof(void*);
   347  		*(void**)sp = nil;
   348  	}
   349  
   350  	// Continue as if lessstack had just called m->morepc
   351  	// (the PC that decided to grow the stack).
   352  	runtime·memclr((byte*)&label, sizeof label);
   353  	label.sp = sp;
   354  	label.pc = (uintptr)runtime·lessstack;
   355  	label.g = m->curg;
   356  	if(newstackcall)
   357  		runtime·gostartcallfn(&label, (FuncVal*)m->cret);
   358  	else {
   359  		runtime·gostartcall(&label, (void(*)(void))gp->sched.pc, gp->sched.ctxt);
   360  		gp->sched.ctxt = nil;
   361  	}
   362  	gp->status = oldstatus;
   363  	runtime·gogo(&label);
   364  
   365  	*(int32*)345 = 123;	// never return
   366  }
   367  
   368  // adjust Gobuf as if it executed a call to fn
   369  // and then did an immediate gosave.
   370  void
   371  runtime·gostartcallfn(Gobuf *gobuf, FuncVal *fv)
   372  {
   373  	runtime·gostartcall(gobuf, fv->fn, fv);
   374  }
   375  
   376  void
   377  runtime∕debug·setMaxStack(intgo in, intgo out)
   378  {
   379  	out = runtime·maxstacksize;
   380  	runtime·maxstacksize = in;
   381  	FLUSH(&out);
   382  }