github.com/guyezi/gofrontend@v0.0.0-20200228202240-7a62a49e62c0/libgo/runtime/proc.c (about)

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  #include <errno.h>
     6  #include <limits.h>
     7  #include <signal.h>
     8  #include <stdlib.h>
     9  #include <pthread.h>
    10  #include <unistd.h>
    11  
    12  #include "config.h"
    13  
    14  #ifdef HAVE_DL_ITERATE_PHDR
    15  #include <link.h>
    16  #endif
    17  
    18  #include "runtime.h"
    19  #include "arch.h"
    20  #include "defs.h"
    21  
    22  #ifdef USING_SPLIT_STACK
    23  
    24  /* FIXME: These are not declared anywhere.  */
    25  
    26  extern void __splitstack_getcontext(void *context[10]);
    27  
    28  extern void __splitstack_setcontext(void *context[10]);
    29  
    30  extern void *__splitstack_makecontext(size_t, void *context[10], size_t *);
    31  
    32  extern void * __splitstack_resetcontext(void *context[10], size_t *);
    33  
    34  extern void __splitstack_releasecontext(void *context[10]);
    35  
    36  extern void *__splitstack_find(void *, void *, size_t *, void **, void **,
    37  			       void **);
    38  
    39  extern void __splitstack_block_signals (int *, int *);
    40  
    41  extern void __splitstack_block_signals_context (void *context[10], int *,
    42  						int *);
    43  
    44  #endif
    45  
    46  #ifndef PTHREAD_STACK_MIN
    47  # define PTHREAD_STACK_MIN 8192
    48  #endif
    49  
    50  #if defined(USING_SPLIT_STACK) && defined(LINKER_SUPPORTS_SPLIT_STACK)
    51  # define StackMin PTHREAD_STACK_MIN
    52  #else
    53  # define StackMin ((sizeof(char *) < 8) ? 2 * 1024 * 1024 : 4 * 1024 * 1024)
    54  #endif
    55  
    56  uintptr runtime_stacks_sys;
    57  
    58  void gtraceback(G*)
    59    __asm__(GOSYM_PREFIX "runtime.gtraceback");
    60  
    61  static void gscanstack(G*);
    62  
    63  #ifdef __rtems__
    64  #define __thread
    65  #endif
    66  
    67  __thread G *g __asm__(GOSYM_PREFIX "runtime.g");
    68  
    69  #ifndef SETCONTEXT_CLOBBERS_TLS
    70  
    71  static inline void
    72  initcontext(void)
    73  {
    74  }
    75  
    76  static inline void
    77  fixcontext(__go_context_t *c __attribute__ ((unused)))
    78  {
    79  }
    80  
    81  #else
    82  
    83  # if defined(__x86_64__) && defined(__sun__)
    84  
    85  // x86_64 Solaris 10 and 11 have a bug: setcontext switches the %fs
    86  // register to that of the thread which called getcontext.  The effect
    87  // is that the address of all __thread variables changes.  This bug
    88  // also affects pthread_self() and pthread_getspecific.  We work
    89  // around it by clobbering the context field directly to keep %fs the
    90  // same.
    91  
    92  static __thread greg_t fs;
    93  
    94  static inline void
    95  initcontext(void)
    96  {
    97  	ucontext_t c;
    98  
    99  	getcontext(&c);
   100  	fs = c.uc_mcontext.gregs[REG_FSBASE];
   101  }
   102  
   103  static inline void
   104  fixcontext(ucontext_t* c)
   105  {
   106  	c->uc_mcontext.gregs[REG_FSBASE] = fs;
   107  }
   108  
   109  # elif defined(__NetBSD__)
   110  
   111  // NetBSD has a bug: setcontext clobbers tlsbase, we need to save
   112  // and restore it ourselves.
   113  
   114  static __thread __greg_t tlsbase;
   115  
   116  static inline void
   117  initcontext(void)
   118  {
   119  	ucontext_t c;
   120  
   121  	getcontext(&c);
   122  	tlsbase = c.uc_mcontext._mc_tlsbase;
   123  }
   124  
   125  static inline void
   126  fixcontext(ucontext_t* c)
   127  {
   128  	c->uc_mcontext._mc_tlsbase = tlsbase;
   129  }
   130  
   131  # elif defined(__sparc__)
   132  
   133  static inline void
   134  initcontext(void)
   135  {
   136  }
   137  
   138  static inline void
   139  fixcontext(ucontext_t *c)
   140  {
   141  	/* ??? Using 
   142  	     register unsigned long thread __asm__("%g7");
   143  	     c->uc_mcontext.gregs[REG_G7] = thread;
   144  	   results in
   145  	     error: variable ‘thread’ might be clobbered by \
   146  		‘longjmp’ or ‘vfork’ [-Werror=clobbered]
   147  	   which ought to be false, as %g7 is a fixed register.  */
   148  
   149  	if (sizeof (c->uc_mcontext.gregs[REG_G7]) == 8)
   150  		asm ("stx %%g7, %0" : "=m"(c->uc_mcontext.gregs[REG_G7]));
   151  	else
   152  		asm ("st %%g7, %0" : "=m"(c->uc_mcontext.gregs[REG_G7]));
   153  }
   154  
   155  # elif defined(_AIX)
   156  
   157  static inline void
   158  initcontext(void)
   159  {
   160  }
   161  
   162  static inline void
   163  fixcontext(ucontext_t* c)
   164  {
   165  	// Thread pointer is in r13, per 64-bit ABI.
   166  	if (sizeof (c->uc_mcontext.jmp_context.gpr[13]) == 8)
   167  		asm ("std 13, %0" : "=m"(c->uc_mcontext.jmp_context.gpr[13]));
   168  }
   169  
   170  # else
   171  
   172  #  error unknown case for SETCONTEXT_CLOBBERS_TLS
   173  
   174  # endif
   175  
   176  #endif
   177  
   178  // ucontext_arg returns a properly aligned ucontext_t value.  On some
   179  // systems a ucontext_t value must be aligned to a 16-byte boundary.
   180  // The g structure that has fields of type ucontext_t is defined in
   181  // Go, and Go has no simple way to align a field to such a boundary.
   182  // So we make the field larger in runtime2.go and pick an appropriate
   183  // offset within the field here.
   184  static __go_context_t*
   185  ucontext_arg(uintptr_t* go_ucontext)
   186  {
   187  	uintptr_t p = (uintptr_t)go_ucontext;
   188  	size_t align = __alignof__(__go_context_t);
   189  	if(align > 16) {
   190  		// We only ensured space for up to a 16 byte alignment
   191  		// in libgo/go/runtime/runtime2.go.
   192  		runtime_throw("required alignment of __go_context_t too large");
   193  	}
   194  	p = (p + align - 1) &~ (uintptr_t)(align - 1);
   195  	return (__go_context_t*)p;
   196  }
   197  
   198  // We can not always refer to the TLS variables directly.  The
   199  // compiler will call tls_get_addr to get the address of the variable,
   200  // and it may hold it in a register across a call to schedule.  When
   201  // we get back from the call we may be running in a different thread,
   202  // in which case the register now points to the TLS variable for a
   203  // different thread.  We use non-inlinable functions to avoid this
   204  // when necessary.
   205  
   206  G* runtime_g(void) __attribute__ ((noinline, no_split_stack));
   207  
   208  G*
   209  runtime_g(void)
   210  {
   211  	return g;
   212  }
   213  
   214  M* runtime_m(void) __attribute__ ((noinline, no_split_stack));
   215  
   216  M*
   217  runtime_m(void)
   218  {
   219  	if(g == nil)
   220  		return nil;
   221  	return g->m;
   222  }
   223  
   224  // Set g.
   225  void
   226  runtime_setg(G* gp)
   227  {
   228  	g = gp;
   229  }
   230  
   231  void runtime_newosproc(M *)
   232    __asm__(GOSYM_PREFIX "runtime.newosproc");
   233  
   234  // Start a new thread.
   235  void
   236  runtime_newosproc(M *mp)
   237  {
   238  	pthread_attr_t attr;
   239  	sigset_t clear, old;
   240  	pthread_t tid;
   241  	int tries;
   242  	int ret;
   243  
   244  	if(pthread_attr_init(&attr) != 0)
   245  		runtime_throw("pthread_attr_init");
   246  	if(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
   247  		runtime_throw("pthread_attr_setdetachstate");
   248  
   249  	// Block signals during pthread_create so that the new thread
   250  	// starts with signals disabled.  It will enable them in minit.
   251  	sigfillset(&clear);
   252  
   253  #ifdef SIGTRAP
   254  	// Blocking SIGTRAP reportedly breaks gdb on Alpha GNU/Linux.
   255  	sigdelset(&clear, SIGTRAP);
   256  #endif
   257  
   258  	sigemptyset(&old);
   259  	pthread_sigmask(SIG_BLOCK, &clear, &old);
   260  
   261  	for (tries = 0; tries < 20; tries++) {
   262  		ret = pthread_create(&tid, &attr, runtime_mstart, mp);
   263  		if (ret != EAGAIN) {
   264  			break;
   265  		}
   266  		runtime_usleep((tries + 1) * 1000); // Milliseconds.
   267  	}
   268  
   269  	pthread_sigmask(SIG_SETMASK, &old, nil);
   270  
   271  	if (ret != 0) {
   272  		runtime_printf("pthread_create failed: %d\n", ret);
   273  		runtime_throw("pthread_create");
   274  	}
   275  
   276  	if(pthread_attr_destroy(&attr) != 0)
   277  		runtime_throw("pthread_attr_destroy");
   278  }
   279  
   280  // Switch context to a different goroutine.  This is like longjmp.
   281  void runtime_gogo(G*) __attribute__ ((noinline));
   282  void
   283  runtime_gogo(G* newg)
   284  {
   285  #ifdef USING_SPLIT_STACK
   286  	__splitstack_setcontext((void*)(&newg->stackcontext[0]));
   287  #endif
   288  	g = newg;
   289  	newg->fromgogo = true;
   290  	fixcontext(ucontext_arg(&newg->context[0]));
   291  	__go_setcontext(ucontext_arg(&newg->context[0]));
   292  	runtime_throw("gogo setcontext returned");
   293  }
   294  
   295  // Save context and call fn passing g as a parameter.  This is like
   296  // setjmp.  Because getcontext always returns 0, unlike setjmp, we use
   297  // g->fromgogo as a code.  It will be true if we got here via
   298  // setcontext.  g == nil the first time this is called in a new m.
   299  void runtime_mcall(FuncVal *) __attribute__ ((noinline));
   300  void
   301  runtime_mcall(FuncVal *fv)
   302  {
   303  	M *mp;
   304  	G *gp;
   305  #ifndef USING_SPLIT_STACK
   306  	void *afterregs;
   307  #endif
   308  
   309  	// Ensure that all registers are on the stack for the garbage
   310  	// collector.
   311  	__builtin_unwind_init();
   312  	flush_registers_to_secondary_stack();
   313  
   314  	gp = g;
   315  	mp = gp->m;
   316  	if(gp == mp->g0)
   317  		runtime_throw("runtime: mcall called on m->g0 stack");
   318  
   319  	if(gp != nil) {
   320  
   321  #ifdef USING_SPLIT_STACK
   322  		__splitstack_getcontext((void*)(&gp->stackcontext[0]));
   323  #else
   324  		// We have to point to an address on the stack that is
   325  		// below the saved registers.
   326  		gp->gcnextsp = (uintptr)(&afterregs);
   327  		gp->gcnextsp2 = (uintptr)(secondary_stack_pointer());
   328  #endif
   329  		gp->fromgogo = false;
   330  		__go_getcontext(ucontext_arg(&gp->context[0]));
   331  
   332  		// When we return from getcontext, we may be running
   333  		// in a new thread.  That means that g may have
   334  		// changed.  It is a global variables so we will
   335  		// reload it, but the address of g may be cached in
   336  		// our local stack frame, and that address may be
   337  		// wrong.  Call the function to reload the value for
   338  		// this thread.
   339  		gp = runtime_g();
   340  		mp = gp->m;
   341  
   342  		if(gp->traceback != 0)
   343  			gtraceback(gp);
   344  		if(gp->scang != 0)
   345  			gscanstack(gp);
   346  	}
   347  	if (gp == nil || !gp->fromgogo) {
   348  #ifdef USING_SPLIT_STACK
   349  		__splitstack_setcontext((void*)(&mp->g0->stackcontext[0]));
   350  #endif
   351  		mp->g0->entry = fv;
   352  		mp->g0->param = gp;
   353  
   354  		// It's OK to set g directly here because this case
   355  		// can not occur if we got here via a setcontext to
   356  		// the getcontext call just above.
   357  		g = mp->g0;
   358  
   359  		fixcontext(ucontext_arg(&mp->g0->context[0]));
   360  		__go_setcontext(ucontext_arg(&mp->g0->context[0]));
   361  		runtime_throw("runtime: mcall function returned");
   362  	}
   363  }
   364  
   365  // Goroutine scheduler
   366  // The scheduler's job is to distribute ready-to-run goroutines over worker threads.
   367  //
   368  // The main concepts are:
   369  // G - goroutine.
   370  // M - worker thread, or machine.
   371  // P - processor, a resource that is required to execute Go code.
   372  //     M must have an associated P to execute Go code, however it can be
   373  //     blocked or in a syscall w/o an associated P.
   374  //
   375  // Design doc at http://golang.org/s/go11sched.
   376  
   377  extern G* allocg(void)
   378    __asm__ (GOSYM_PREFIX "runtime.allocg");
   379  
   380  bool	runtime_isarchive;
   381  
   382  extern void kickoff(void)
   383    __asm__(GOSYM_PREFIX "runtime.kickoff");
   384  extern void minit(void)
   385    __asm__(GOSYM_PREFIX "runtime.minit");
   386  extern void mstart1()
   387    __asm__(GOSYM_PREFIX "runtime.mstart1");
   388  extern void stopm(void)
   389    __asm__(GOSYM_PREFIX "runtime.stopm");
   390  extern void mexit(bool)
   391    __asm__(GOSYM_PREFIX "runtime.mexit");
   392  extern void handoffp(P*)
   393    __asm__(GOSYM_PREFIX "runtime.handoffp");
   394  extern void wakep(void)
   395    __asm__(GOSYM_PREFIX "runtime.wakep");
   396  extern void stoplockedm(void)
   397    __asm__(GOSYM_PREFIX "runtime.stoplockedm");
   398  extern void schedule(void)
   399    __asm__(GOSYM_PREFIX "runtime.schedule");
   400  extern void execute(G*, bool)
   401    __asm__(GOSYM_PREFIX "runtime.execute");
   402  extern void reentersyscall(uintptr, uintptr)
   403    __asm__(GOSYM_PREFIX "runtime.reentersyscall");
   404  extern void reentersyscallblock(uintptr, uintptr)
   405    __asm__(GOSYM_PREFIX "runtime.reentersyscallblock");
   406  extern G* gfget(P*)
   407    __asm__(GOSYM_PREFIX "runtime.gfget");
   408  extern void acquirep(P*)
   409    __asm__(GOSYM_PREFIX "runtime.acquirep");
   410  extern P* releasep(void)
   411    __asm__(GOSYM_PREFIX "runtime.releasep");
   412  extern void incidlelocked(int32)
   413    __asm__(GOSYM_PREFIX "runtime.incidlelocked");
   414  extern void globrunqput(G*)
   415    __asm__(GOSYM_PREFIX "runtime.globrunqput");
   416  extern P* pidleget(void)
   417    __asm__(GOSYM_PREFIX "runtime.pidleget");
   418  extern struct mstats* getMemstats(void)
   419    __asm__(GOSYM_PREFIX "runtime.getMemstats");
   420  
   421  bool runtime_isstarted;
   422  
   423  // Used to determine the field alignment.
   424  
   425  struct field_align
   426  {
   427    char c;
   428    Hchan *p;
   429  };
   430  
   431  void getTraceback(G*, G*) __asm__(GOSYM_PREFIX "runtime.getTraceback");
   432  
   433  // getTraceback stores a traceback of gp in the g's traceback field
   434  // and then returns to me.  We expect that gp's traceback is not nil.
   435  // It works by saving me's current context, and checking gp's traceback field.
   436  // If gp's traceback field is not nil, it starts running gp.
   437  // In places where we call getcontext, we check the traceback field.
   438  // If it is not nil, we collect a traceback, and then return to the
   439  // goroutine stored in the traceback field, which is me.
   440  void getTraceback(G* me, G* gp)
   441  {
   442  	M* holdm;
   443  
   444  	holdm = gp->m;
   445  	gp->m = me->m;
   446  
   447  #ifdef USING_SPLIT_STACK
   448  	__splitstack_getcontext((void*)(&me->stackcontext[0]));
   449  #endif
   450  	__go_getcontext(ucontext_arg(&me->context[0]));
   451  
   452  	if (gp->traceback != 0) {
   453  		runtime_gogo(gp);
   454  	}
   455  
   456  	gp->m = holdm;
   457  }
   458  
   459  // Do a stack trace of gp, and then restore the context to
   460  // gp->traceback->gp.
   461  
   462  void
   463  gtraceback(G* gp)
   464  {
   465  	Traceback* traceback;
   466  
   467  	traceback = (Traceback*)gp->traceback;
   468  	gp->traceback = 0;
   469  	traceback->c = runtime_callers(1, traceback->locbuf,
   470  		sizeof traceback->locbuf / sizeof traceback->locbuf[0], false);
   471  	runtime_gogo(traceback->gp);
   472  }
   473  
   474  void doscanstackswitch(G*, G*) __asm__(GOSYM_PREFIX "runtime.doscanstackswitch");
   475  
   476  // Switch to gp and let it scan its stack.
   477  // The first time gp->scang is set (to me). The second time here
   478  // gp is done scanning, and has unset gp->scang, so we just return.
   479  void
   480  doscanstackswitch(G* me, G* gp)
   481  {
   482  	M* holdm;
   483  
   484  	__go_assert(me->entry == nil);
   485  	me->fromgogo = false;
   486  
   487  	holdm = gp->m;
   488  	gp->m = me->m;
   489  
   490  #ifdef USING_SPLIT_STACK
   491  	__splitstack_getcontext((void*)(&me->stackcontext[0]));
   492  #endif
   493  	__go_getcontext(ucontext_arg(&me->context[0]));
   494  
   495  	if(me->entry != nil) {
   496  		// Got here from mcall.
   497  		// The stack scanning code may call systemstack, which calls
   498  		// mcall, which calls setcontext.
   499  		// Run the function, which at the end will switch back to gp.
   500  		FuncVal *fv = me->entry;
   501  		void (*pfn)(G*) = (void (*)(G*))fv->fn;
   502  		G* gp1 = (G*)me->param;
   503  		__go_assert(gp1 == gp);
   504  		me->entry = nil;
   505  		me->param = nil;
   506  		__builtin_call_with_static_chain(pfn(gp1), fv);
   507  		abort();
   508  	}
   509  
   510  	if (gp->scang != 0)
   511  		runtime_gogo(gp);
   512  
   513  	gp->m = holdm;
   514  }
   515  
   516  // Do a stack scan, then switch back to the g that triggers this scan.
   517  // We come here from doscanstackswitch.
   518  static void
   519  gscanstack(G *gp)
   520  {
   521  	G *oldg, *oldcurg;
   522  
   523  	oldg = (G*)gp->scang;
   524  	oldcurg = oldg->m->curg;
   525  	oldg->m->curg = gp;
   526  	gp->scang = 0;
   527  
   528  	doscanstack(gp, (void*)gp->scangcw);
   529  
   530  	gp->scangcw = 0;
   531  	oldg->m->curg = oldcurg;
   532  	runtime_gogo(oldg);
   533  }
   534  
   535  // Called by pthread_create to start an M.
   536  void*
   537  runtime_mstart(void *arg)
   538  {
   539  	M* mp;
   540  	G* gp;
   541  
   542  	mp = (M*)(arg);
   543  	gp = mp->g0;
   544  	gp->m = mp;
   545  
   546  	g = gp;
   547  
   548  	gp->entry = nil;
   549  	gp->param = nil;
   550  
   551  	// We have to call minit before we call getcontext,
   552  	// because getcontext will copy the signal mask.
   553  	minit();
   554  
   555  	initcontext();
   556  
   557  	// Record top of stack for use by mcall.
   558  	// Once we call schedule we're never coming back,
   559  	// so other calls can reuse this stack space.
   560  #ifdef USING_SPLIT_STACK
   561  	__splitstack_getcontext((void*)(&gp->stackcontext[0]));
   562  #else
   563  	gp->gcinitialsp = &arg;
   564  	// Setting gcstacksize to 0 is a marker meaning that gcinitialsp
   565  	// is the top of the stack, not the bottom.
   566  	gp->gcstacksize = 0;
   567  	gp->gcnextsp = (uintptr)(&arg);
   568  	gp->gcinitialsp2 = secondary_stack_pointer();
   569  	gp->gcnextsp2 = (uintptr)(gp->gcinitialsp2);
   570  #endif
   571  
   572  	// Save the currently active context.  This will return
   573  	// multiple times via the setcontext call in mcall.
   574  	__go_getcontext(ucontext_arg(&gp->context[0]));
   575  
   576  	if(gp->traceback != 0) {
   577  		// Got here from getTraceback.
   578  		// I'm not sure this ever actually happens--getTraceback
   579  		// may always go to the getcontext call in mcall.
   580  		gtraceback(gp);
   581  	}
   582  	if(gp->scang != 0)
   583  		// Got here from doscanswitch. Should not happen.
   584  		runtime_throw("mstart with scang");
   585  
   586  	if(gp->entry != nil) {
   587  		// Got here from mcall.
   588  		FuncVal *fv = gp->entry;
   589  		void (*pfn)(G*) = (void (*)(G*))fv->fn;
   590  		G* gp1 = (G*)gp->param;
   591  		gp->entry = nil;
   592  		gp->param = nil;
   593  		__builtin_call_with_static_chain(pfn(gp1), fv);
   594  		*(int*)0x21 = 0x21;
   595  	}
   596  
   597  	if(mp->exiting) {
   598  		mexit(true);
   599  		return nil;
   600  	}
   601  
   602  	// Initial call to getcontext--starting thread.
   603  
   604  #ifdef USING_SPLIT_STACK
   605  	{
   606  		int dont_block_signals = 0;
   607  		__splitstack_block_signals(&dont_block_signals, nil);
   608  	}
   609  #endif
   610  
   611  	mstart1();
   612  
   613  	// mstart1 does not return, but we need a return statement
   614  	// here to avoid a compiler warning.
   615  	return nil;
   616  }
   617  
   618  typedef struct CgoThreadStart CgoThreadStart;
   619  struct CgoThreadStart
   620  {
   621  	M *m;
   622  	G *g;
   623  	uintptr *tls;
   624  	void (*fn)(void);
   625  };
   626  
   627  void setGContext(void) __asm__ (GOSYM_PREFIX "runtime.setGContext");
   628  
   629  // setGContext sets up a new goroutine context for the current g.
   630  void
   631  setGContext(void)
   632  {
   633  	int val;
   634  	G *gp;
   635  
   636  	initcontext();
   637  	gp = g;
   638  	gp->entry = nil;
   639  	gp->param = nil;
   640  #ifdef USING_SPLIT_STACK
   641  	__splitstack_getcontext((void*)(&gp->stackcontext[0]));
   642  	val = 0;
   643  	__splitstack_block_signals(&val, nil);
   644  #else
   645  	gp->gcinitialsp = &val;
   646  	gp->gcstack = 0;
   647  	gp->gcstacksize = 0;
   648  	gp->gcnextsp = (uintptr)(&val);
   649  	gp->gcinitialsp2 = secondary_stack_pointer();
   650  	gp->gcnextsp2 = (uintptr)(gp->gcinitialsp2);
   651  #endif
   652  	__go_getcontext(ucontext_arg(&gp->context[0]));
   653  
   654  	if(gp->entry != nil) {
   655  		// Got here from mcall.
   656  		FuncVal *fv = gp->entry;
   657  		void (*pfn)(G*) = (void (*)(G*))fv->fn;
   658  		G* gp1 = (G*)gp->param;
   659  		gp->entry = nil;
   660  		gp->param = nil;
   661  		__builtin_call_with_static_chain(pfn(gp1), fv);
   662  		*(int*)0x22 = 0x22;
   663  	}
   664  }
   665  
   666  void makeGContext(G*, byte*, uintptr)
   667  	__asm__(GOSYM_PREFIX "runtime.makeGContext");
   668  
   669  // makeGContext makes a new context for a g.
   670  void
   671  makeGContext(G* gp, byte* sp, uintptr spsize) {
   672  	__go_context_t *uc;
   673  
   674  	uc = ucontext_arg(&gp->context[0]);
   675  	__go_getcontext(uc);
   676  	__go_makecontext(uc, kickoff, sp, (size_t)spsize);
   677  }
   678  
   679  // The goroutine g is about to enter a system call.
   680  // Record that it's not using the cpu anymore.
   681  // This is called only from the go syscall library and cgocall,
   682  // not from the low-level system calls used by the runtime.
   683  //
   684  // Entersyscall cannot split the stack: the runtime_gosave must
   685  // make g->sched refer to the caller's stack segment, because
   686  // entersyscall is going to return immediately after.
   687  
   688  void runtime_entersyscall() __attribute__ ((no_split_stack));
   689  static void doentersyscall(uintptr, uintptr)
   690    __attribute__ ((no_split_stack, noinline));
   691  
   692  void
   693  runtime_entersyscall()
   694  {
   695  	// Save the registers in the g structure so that any pointers
   696  	// held in registers will be seen by the garbage collector.
   697  	if (!runtime_usestackmaps)
   698  		__go_getcontext(ucontext_arg(&g->gcregs[0]));
   699  
   700  	// Note that if this function does save any registers itself,
   701  	// we might store the wrong value in the call to getcontext.
   702  	// FIXME: This assumes that we do not need to save any
   703  	// callee-saved registers to access the TLS variable g.  We
   704  	// don't want to put the ucontext_t on the stack because it is
   705  	// large and we can not split the stack here.
   706  	doentersyscall((uintptr)runtime_getcallerpc(),
   707  		       (uintptr)runtime_getcallersp());
   708  }
   709  
   710  static void
   711  doentersyscall(uintptr pc, uintptr sp)
   712  {
   713  	// Leave SP around for GC and traceback.
   714  #ifdef USING_SPLIT_STACK
   715  	{
   716  	  size_t gcstacksize;
   717  	  g->gcstack = (uintptr)(__splitstack_find(nil, nil, &gcstacksize,
   718  						   (void**)(&g->gcnextsegment),
   719  						   (void**)(&g->gcnextsp),
   720  						   &g->gcinitialsp));
   721  	  g->gcstacksize = (uintptr)gcstacksize;
   722  	}
   723  #else
   724  	{
   725  		void *v;
   726  
   727  		g->gcnextsp = (uintptr)(&v);
   728  		g->gcnextsp2 = (uintptr)(secondary_stack_pointer());
   729  	}
   730  #endif
   731  
   732  	reentersyscall(pc, sp);
   733  }
   734  
   735  static void doentersyscallblock(uintptr, uintptr)
   736    __attribute__ ((no_split_stack, noinline));
   737  
   738  // The same as runtime_entersyscall(), but with a hint that the syscall is blocking.
   739  void
   740  runtime_entersyscallblock()
   741  {
   742  	// Save the registers in the g structure so that any pointers
   743  	// held in registers will be seen by the garbage collector.
   744  	if (!runtime_usestackmaps)
   745  		__go_getcontext(ucontext_arg(&g->gcregs[0]));
   746  
   747  	// See comment in runtime_entersyscall.
   748  	doentersyscallblock((uintptr)runtime_getcallerpc(),
   749  			    (uintptr)runtime_getcallersp());
   750  }
   751  
   752  static void
   753  doentersyscallblock(uintptr pc, uintptr sp)
   754  {
   755  	// Leave SP around for GC and traceback.
   756  #ifdef USING_SPLIT_STACK
   757  	{
   758  	  size_t gcstacksize;
   759  	  g->gcstack = (uintptr)(__splitstack_find(nil, nil, &gcstacksize,
   760  						   (void**)(&g->gcnextsegment),
   761  						   (void**)(&g->gcnextsp),
   762  						   &g->gcinitialsp));
   763  	  g->gcstacksize = (uintptr)gcstacksize;
   764  	}
   765  #else
   766  	{
   767  		void *v;
   768  
   769  		g->gcnextsp = (uintptr)(&v);
   770  		g->gcnextsp2 = (uintptr)(secondary_stack_pointer());
   771  	}
   772  #endif
   773  
   774  	reentersyscallblock(pc, sp);
   775  }
   776  
   777  // Allocate a new g, with a stack big enough for stacksize bytes.
   778  G*
   779  runtime_malg(bool allocatestack, bool signalstack, byte** ret_stack, uintptr* ret_stacksize)
   780  {
   781  	uintptr stacksize;
   782  	G *newg;
   783  	byte* unused_stack;
   784  	uintptr unused_stacksize;
   785  #ifdef USING_SPLIT_STACK
   786  	int dont_block_signals = 0;
   787  	size_t ss_stacksize;
   788  #endif
   789  
   790  	if (ret_stack == nil) {
   791  		ret_stack = &unused_stack;
   792  	}
   793  	if (ret_stacksize == nil) {
   794  		ret_stacksize = &unused_stacksize;
   795  	}
   796  	newg = allocg();
   797  	if(allocatestack) {
   798  		stacksize = StackMin;
   799  		if(signalstack) {
   800  			stacksize = 32 * 1024; // OS X wants >= 8K, GNU/Linux >= 2K
   801  #ifdef SIGSTKSZ
   802  			if(stacksize < SIGSTKSZ)
   803  				stacksize = SIGSTKSZ;
   804  #endif
   805  		}
   806  
   807  #ifdef USING_SPLIT_STACK
   808  		*ret_stack = __splitstack_makecontext(stacksize,
   809  						      (void*)(&newg->stackcontext[0]),
   810  						      &ss_stacksize);
   811  		*ret_stacksize = (uintptr)ss_stacksize;
   812  		__splitstack_block_signals_context((void*)(&newg->stackcontext[0]),
   813  						   &dont_block_signals, nil);
   814  #else
   815                  // In 64-bit mode, the maximum Go allocation space is
   816                  // 128G.  Our stack size is 4M, which only permits 32K
   817                  // goroutines.  In order to not limit ourselves,
   818                  // allocate the stacks out of separate memory.  In
   819                  // 32-bit mode, the Go allocation space is all of
   820                  // memory anyhow.
   821  		if(sizeof(void*) == 8) {
   822  			void *p = runtime_sysAlloc(stacksize, &getMemstats()->stacks_sys);
   823  			if(p == nil)
   824  				runtime_throw("runtime: cannot allocate memory for goroutine stack");
   825  			*ret_stack = (byte*)p;
   826  		} else {
   827  			*ret_stack = runtime_mallocgc(stacksize, nil, false);
   828  			runtime_xadd(&runtime_stacks_sys, stacksize);
   829  		}
   830  		*ret_stacksize = (uintptr)stacksize;
   831  		newg->gcinitialsp = *ret_stack;
   832  		newg->gcstacksize = (uintptr)stacksize;
   833  		newg->gcinitialsp2 = initial_secondary_stack_pointer(*ret_stack);
   834  #endif
   835  	}
   836  	return newg;
   837  }
   838  
   839  void stackfree(G*)
   840    __asm__(GOSYM_PREFIX "runtime.stackfree");
   841  
   842  // stackfree frees the stack of a g.
   843  void
   844  stackfree(G* gp)
   845  {
   846  #ifdef USING_SPLIT_STACK
   847    __splitstack_releasecontext((void*)(&gp->stackcontext[0]));
   848  #else
   849    // If gcstacksize is 0, the stack is allocated by libc and will be
   850    // released when the thread exits. Otherwise, in 64-bit mode it was
   851    // allocated using sysAlloc and in 32-bit mode it was allocated
   852    // using garbage collected memory.
   853    if (gp->gcstacksize != 0) {
   854      if (sizeof(void*) == 8) {
   855        runtime_sysFree(gp->gcinitialsp, gp->gcstacksize, &getMemstats()->stacks_sys);
   856      }
   857      gp->gcinitialsp = nil;
   858      gp->gcstacksize = 0;
   859    }
   860  #endif
   861  }
   862  
   863  void resetNewG(G*, void **, uintptr*)
   864    __asm__(GOSYM_PREFIX "runtime.resetNewG");
   865  
   866  // Reset stack information for g pulled out of the cache to start a
   867  // new goroutine.
   868  void
   869  resetNewG(G *newg, void **sp, uintptr *spsize)
   870  {
   871  #ifdef USING_SPLIT_STACK
   872    int dont_block_signals = 0;
   873    size_t ss_spsize;
   874  
   875    *sp = __splitstack_resetcontext((void*)(&newg->stackcontext[0]), &ss_spsize);
   876    *spsize = ss_spsize;
   877    __splitstack_block_signals_context((void*)(&newg->stackcontext[0]),
   878  				     &dont_block_signals, nil);
   879  #else
   880    *sp = newg->gcinitialsp;
   881    *spsize = newg->gcstacksize;
   882    if(*spsize == 0)
   883      runtime_throw("bad spsize in resetNewG");
   884    newg->gcnextsp = (uintptr)(*sp);
   885    newg->gcnextsp2 = (uintptr)(newg->gcinitialsp2);
   886  #endif
   887  }